diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..00a6291 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/.vscode +/build diff --git a/CMakeLists.txt b/CMakeLists.txt index b13fbcb..6361a4e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,3 +1,18 @@ +# == DO NOT EDIT THE FOLLOWING LINES for the Raspberry Pi Pico VS Code Extension to work == +if(WIN32) + set(USERHOME $ENV{USERPROFILE}) +else() + set(USERHOME $ENV{HOME}) +endif() +set(sdkVersion 2.1.0) +set(toolchainVersion 13_3_Rel1) +set(picotoolVersion 2.1.0) +set(picoVscode ${USERHOME}/.pico-sdk/cmake/pico-vscode.cmake) +if (EXISTS ${picoVscode}) + include(${picoVscode}) +endif() +# ==================================================================================== +set(PICO_BOARD pico2 CACHE STRING "Board type") cmake_minimum_required(VERSION 3.12) @@ -6,24 +21,22 @@ include(pico_sdk_import.cmake) project(pico-tflmicro C CXX ASM) set(CMAKE_C_STANDARD 11) -set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD 17) pico_sdk_init() -add_library(pico-tflmicro "") +add_library(pico-tflmicro STATIC) target_include_directories(pico-tflmicro PUBLIC ${CMAKE_CURRENT_LIST_DIR}/src/ - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include ${CMAKE_CURRENT_LIST_DIR}/src/third_party/ruy ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Include + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include ) target_compile_definitions( @@ -31,12 +44,16 @@ target_compile_definitions( PUBLIC COMPILE_DEFINITIONS TF_LITE_DISABLE_X86_NEON=1 COMPILE_DEFINITIONS TF_LITE_STATIC_MEMORY=1 + COMPILE_DEFINITIONS TF_LITE_USE_CTIME=1 COMPILE_DEFINITIONS CMSIS_NN=1 + COMPILE_DEFINITIONS ARDUINO=1 + COMPILE_DEFINITIONS TFLITE_USE_CTIME=1 ) set_target_properties( pico-tflmicro PROPERTIES + COMPILE_FLAGS -Os COMPILE_FLAGS -fno-rtti COMPILE_FLAGS -fno-exceptions COMPILE_FLAGS -fno-threadsafe-statics @@ -46,87 +63,113 @@ set_target_properties( target_link_libraries( pico-tflmicro pico_stdlib + pico_multicore ) target_sources(pico-tflmicro - PUBLIC - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/c/common.c - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/error_reporter.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/flatbuffer_conversions.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/op_resolver.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/tensor_utils.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/quantization_util.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/kernel_util.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/all_ops_resolver.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/activations.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/arg_min_max.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/ceil.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/circular_buffer.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis-nn/add.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis-nn/conv.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis-nn/mul.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis-nn/svdf.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/comparisons.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/concatenation.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/dequantize.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/detection_postprocess.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/elementwise.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/ethosu.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/floor.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/hard_swish.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/kernel_runner.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/kernel_util.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/l2norm.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/logical.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/logistic.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/maximum_minimum.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/neg.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/pack.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/pad.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/prelu.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/quantize.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/reduce.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/reshape.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/round.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/shape.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/split.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/split_v.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/strided_slice.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/sub.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/tanh.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/unpack.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_helpers.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_allocator.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_error_reporter.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_interpreter.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_profiler.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_string.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_utils.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/recording_micro_allocator.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/recording_simple_memory_allocator.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/rp2/debug_log.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/rp2/micro_time.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/simple_memory_allocator.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/test_helpers.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/testing/test_conv_model.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/schema/schema_utils.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/LICENSE - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/core/public/version.h + PRIVATE + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/delay.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/delay_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/energy.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/energy_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/fft_auto_scale_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/fft_auto_scale_kernel.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/fft_auto_scale_kernel.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/fft_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank_log.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank_spectral_subtraction.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank_square_root.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank_square_root.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/filter_bank_square_root_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/framer.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/framer_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/irfft.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/irfft.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/overlap_add.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/pcan.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/pcan_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/rfft.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/rfft.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/stacker.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/stacker_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/window.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/micro/kernels/window_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/circular_buffer.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/circular_buffer.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/complex.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/energy.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/energy.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/fft_auto_scale.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/fft_auto_scale.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/filter_bank.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/filter_bank.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/filter_bank_log.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/filter_bank_log.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/filter_bank_spectral_subtraction.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/filter_bank_spectral_subtraction.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/filter_bank_square_root.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/filter_bank_square_root.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/irfft.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/irfft_float.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/irfft_int16.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/irfft_int32.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/kiss_fft_wrappers/kiss_fft_common.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/kiss_fft_wrappers/kiss_fft_float.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/kiss_fft_wrappers/kiss_fft_float.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/kiss_fft_wrappers/kiss_fft_int16.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/kiss_fft_wrappers/kiss_fft_int16.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/kiss_fft_wrappers/kiss_fft_int32.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/kiss_fft_wrappers/kiss_fft_int32.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/log.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/log.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/max_abs.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/max_abs.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/msb.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/msb_32.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/msb_64.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/overlap_add.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/overlap_add.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/pcan_argc_fixed.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/pcan_argc_fixed.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/rfft.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/rfft_float.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/rfft_int16.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/rfft_int32.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/square_root.h + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/square_root_32.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/square_root_64.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/window.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/signal/src/window.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/compiler/mlir/lite/core/api/error_reporter.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/compiler/mlir/lite/core/api/error_reporter.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/compiler/mlir/lite/core/c/tflite_types.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/compiler/mlir/lite/schema/schema_generated.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/compiler/mlir/lite/schema/schema_utils.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/compiler/mlir/lite/schema/schema_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/builtin_op_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/builtin_ops.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/c/builtin_op_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/c/c_api_types.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/c/common.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/context_util.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/error_reporter.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/flatbuffer_conversions.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/flatbuffer_conversions.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/op_resolver.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/profiler.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/tensor_utils.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/api/tensor_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/c/builtin_op_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/c/c_api_types.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/c/common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/c/common.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/core/macros.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/common.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/common.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/compatibility.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/cppmath.h @@ -134,18 +177,35 @@ target_sources(pico-tflmicro ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/min.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/optimized/neon_check.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/portable_tensor.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/portable_tensor_utils.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/portable_tensor_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/quantization_util.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/quantization_util.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/add.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/add_n.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/arg_min_max.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/batch_matmul.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/binary_function.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/broadcast_args.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/broadcast_to.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/ceil.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/comparisons.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/comparisons.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/concatenation.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/conv.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/cumsum.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/depth_to_space.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/dequantize.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/div.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/elu.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/exp.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/fill.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/floor.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/floor_div.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/floor_mod.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/fully_connected.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/hard_swish.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/integer_ops/add.h @@ -158,196 +218,471 @@ target_sources(pico-tflmicro ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/l2normalization.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/leaky_relu.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/log_softmax.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/logistic.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/lstm_cell.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/maximum_minimum.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/mul.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/neg.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/pad.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/pooling.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/prelu.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/quantize.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/reduce.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/requantize.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/resize_bilinear.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/round.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/select.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/slice.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/softmax.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/space_to_depth.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/strided_slice.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/sub.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/tanh.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/transpose.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/reference/transpose_conv.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/runtime_shape.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/runtime_shape.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/strided_slice_logic.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/tensor_ctypes.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/tensor_ctypes.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/tensor_utils.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/internal/types.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/kernel_util.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/kernel_util.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/op_macros.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/kernels/padding.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/all_ops_resolver.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/benchmarks/micro_benchmark.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/compatibility.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/compression.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/debug_log.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/debug_log.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/fake_micro_context.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/fake_micro_context.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/flatbuffer_utils.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/flatbuffer_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/hexdump.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/hexdump.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/hexdump_test.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/activation_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/activations.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/activations.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/activations_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/add.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/add_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/add_n.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/arg_min_max.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/assign_variable.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/batch_matmul.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/batch_to_space_nd.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/broadcast_args.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/broadcast_to.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/call_once.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cast.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/ceil.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/circular_buffer.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/circular_buffer.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/circular_buffer_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/add.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/batch_matmul.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/conv.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/mul.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/pooling.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/softmax.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/svdf.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/transpose_conv.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cmsis_nn/unidirectional_sequence_lstm.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/comparisons.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/concatenation.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/conv.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/conv_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/conv_test.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/cumsum.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/decompress.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/decompress.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/decompress_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/depth_to_space.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/depthwise_conv.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/depthwise_conv_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/dequantize.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/dequantize.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/dequantize_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/detection_postprocess.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/div.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/elementwise.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/elu.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/embedding_lookup.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/ethosu.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/ethosu.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/flexbuffers_generated_data.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/exp.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/expand_dims.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/fill.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/floor.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/floor_div.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/floor_mod.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/fully_connected.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/fully_connected_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/gather.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/gather_nd.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/hard_swish.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/hard_swish.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/hard_swish_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/if.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/kernel_runner.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/kernel_runner.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/kernel_util.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/kernel_util.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/l2_pool_2d.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/l2norm.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/leaky_relu.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/leaky_relu.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/leaky_relu_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/log_softmax.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/logical.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/logical.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/logical_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/logistic.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/logistic.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/logistic_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/lstm_eval.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/lstm_eval.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/lstm_eval_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/lstm_eval_test.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/lstm_shared.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/maximum_minimum.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/micro_ops.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/micro_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/micro_tensor_utils.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/micro_tensor_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/mirror_pad.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/mul.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/mul_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/neg.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/pack.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/pad.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/pad.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/pooling.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/pooling_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/prelu.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/prelu.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/prelu_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/quantize.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/quantize.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/quantize_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/read_variable.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/reduce.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/reduce.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/reduce_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/reshape.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/reshape.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/reshape_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/resize_bilinear.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/round.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/select.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/shape.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/slice.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/softmax.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/softmax_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/space_to_batch_nd.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/space_to_depth.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/split.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/split_v.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/squared_difference.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/squeeze.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/strided_slice.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/strided_slice.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/strided_slice_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/sub.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/sub.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/sub_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/svdf.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/svdf_common.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/tanh.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/transpose.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/transpose_conv.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/unpack.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/var_handle.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/while.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/kernels/zeros_like.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_helpers.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_helpers.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/memory_planner.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/memory_plan_struct.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/micro_memory_planner.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_allocation_info.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_allocation_info.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_allocator.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_allocator.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_error_reporter.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_arena_constants.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_common.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_context.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_context.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_graph.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_interpreter.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_interpreter.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_interpreter_context.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_interpreter_context.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_interpreter_graph.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_interpreter_graph.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_log.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_log.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_mutable_op_resolver.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_op_resolver.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_op_resolver.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_profiler.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_profiler.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_string.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_profiler_interface.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_resource_variable.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_resource_variable.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_time.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_time.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_utils.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/micro_utils.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/mock_micro_graph.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/mock_micro_graph.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/recording_micro_allocator.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/recording_micro_allocator.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/recording_micro_interpreter.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/recording_simple_memory_allocator.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/simple_memory_allocator.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/span.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/static_vector.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/system_setup.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/system_setup.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/test_helper_custom_ops.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/test_helper_custom_ops.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/test_helpers.cpp ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/test_helpers.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/testing/micro_test.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/portable_type_to_tflitetype.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/schema/schema_generated.h ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/schema/schema_utils.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/version.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q15.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15_opt.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7_opt.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_no_shift.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_pool_q7_HWC.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/SVDFunctions/arm_svdf_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q15.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_u8.c - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/cmsis_armclang.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/cmsis_clang.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/cmsis_gcc.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/arm_common_tables.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/arm_helium_utils.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/arm_math.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/arm_math_memory.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/arm_math_types.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/basic_math_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/bayes_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/complex_math_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/controller_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/distance_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/fast_math_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/filtering_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/interpolation_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/matrix_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/none.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/statistics_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/support_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_defines.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/transform_functions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include/dsp/utils.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Include/arm_nn_tables.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Include/arm_nn_types.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Include/arm_nnfunctions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Include/arm_nnsupportfunctions.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/LICENSE.txt - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/LICENSE.txt + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/cmsis_iccarm.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/cmsis_version.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_ca.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm0.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm0plus.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm1.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm23.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm3.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm33.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm35p.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm4.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm52.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm55.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm7.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_cm85.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_sc000.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_sc300.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/core_starmc1.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include/tz_context.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include/Internal/arm_nn_compiler.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include/arm_nn_math_types.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include/arm_nn_tables.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include/arm_nn_types.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include/arm_nnfunctions.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include/arm_nnsupportfunctions.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_nn_activation_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu6_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu_q15.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu_q7.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_add_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_add_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_acc_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16_batch_offset.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_maximum_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_minimum_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_w.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_x.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_y.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_z.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1_x_n_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s4_fast.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_even_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_fast_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s4_opt.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_row_offset_s8_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s4_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_get_buffer_sizes_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_wrapper_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_batch_matmul_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_batch_matmul_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_get_buffer_sizes_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_get_buffer_sizes_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_per_channel_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_wrapper_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_vector_sum_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_vector_sum_s8_s64.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/LSTMFunctions/arm_lstm_unidirectional_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/LSTMFunctions/arm_lstm_unidirectional_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_calculate_gate_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_calculate_gate_s8_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_step_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_step_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_interleaved_t_even_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8_s32.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_transpose_conv_row_s8_s32.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mul_result_acc_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mul_result_acc_s8_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_per_ch_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s4.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_svdf_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nntables.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_s8_to_s16_unordered_with_offset.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/PadFunctions/arm_pad_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_get_buffer_sizes_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_get_buffer_sizes_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_max_pool_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_max_pool_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/ReshapeFunctions/arm_reshape_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_get_buffer_sizes_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_state_s16_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s8_s16.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_u8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Source/TransposeFunctions/arm_transpose_s8.c + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/allocator.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/array.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/base.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/buffer.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/buffer_ref.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/code_generator.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/code_generators.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/default_allocator.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/detached_buffer.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/file_manager.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/flatbuffer_builder.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/flatbuffers.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/flex_flat_util.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/flexbuffers.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/grpc.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/hash.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/idl.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/minireflect.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/reflection.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/reflection_generated.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/registry.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/stl_emulation.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/string.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/struct.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/table.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/util.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp/LICENSE + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/vector.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/vector_downward.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include/flatbuffers/verifier.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp/fixedpoint/fixedpoint.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp/fixedpoint/fixedpoint_neon.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp/fixedpoint/fixedpoint_sse.h ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp/internal/detect_platform.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft/COPYING ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft/_kiss_fft_guts.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft/kiss_fft.c ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft/kiss_fft.h + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft/tools/kiss_fftr.c ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft/tools/kiss_fftr.h - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/ruy/LICENSE ${CMAKE_CURRENT_LIST_DIR}/src/third_party/ruy/ruy/profiler/instrumentation.h + ) -add_library(pico-tflmicro_test "") +add_library(pico-tflmicro_test STATIC) target_include_directories(pico-tflmicro_test PUBLIC ${CMAKE_CURRENT_LIST_DIR}/src/ - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/DSP/Include ${CMAKE_CURRENT_LIST_DIR}/src/third_party/ruy ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include - ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/NN/Include + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include ) target_compile_definitions( @@ -370,74 +705,113 @@ set_target_properties( target_link_libraries( pico-tflmicro_test pico_stdlib + pico_multicore ) -target_sources(pico-tflmicro_test - PUBLIC - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/testing/test_conv_model.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/testing/util_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/testing/micro_test.h - ${CMAKE_CURRENT_LIST_DIR}/src/tensorflow/lite/micro/testing/test_conv_model.h -) - -add_subdirectory("examples/keyword_benchmark") add_subdirectory("examples/hello_world") add_subdirectory("examples/person_detection") -add_subdirectory("examples/magic_wand") -add_subdirectory("examples/micro_speech") -add_subdirectory("tests/greedy_memory_planner_test") -add_subdirectory("tests/kernel_activations_test") -add_subdirectory("tests/kernel_add_test") -add_subdirectory("tests/kernel_arg_min_max_test") -add_subdirectory("tests/kernel_ceil_test") -add_subdirectory("tests/kernel_circular_buffer_test") -add_subdirectory("tests/kernel_comparisons_test") -add_subdirectory("tests/kernel_concatenation_test") -add_subdirectory("tests/kernel_conv_test") -add_subdirectory("tests/kernel_depthwise_conv_test") -add_subdirectory("tests/kernel_dequantize_test") -add_subdirectory("tests/kernel_detection_postprocess_test") -add_subdirectory("tests/kernel_elementwise_test") -add_subdirectory("tests/kernel_floor_test") -add_subdirectory("tests/kernel_fully_connected_test") -add_subdirectory("tests/kernel_hard_swish_test") -add_subdirectory("tests/kernel_l2norm_test") -add_subdirectory("tests/kernel_logical_test") -add_subdirectory("tests/kernel_logistic_test") -add_subdirectory("tests/kernel_maximum_minimum_test") -add_subdirectory("tests/kernel_mul_test") -add_subdirectory("tests/kernel_neg_test") -add_subdirectory("tests/kernel_pack_test") -add_subdirectory("tests/kernel_pad_test") -add_subdirectory("tests/kernel_pooling_test") -add_subdirectory("tests/kernel_prelu_test") -add_subdirectory("tests/kernel_quantization_util_test") -add_subdirectory("tests/kernel_quantize_test") -add_subdirectory("tests/kernel_reduce_test") -add_subdirectory("tests/kernel_reshape_test") -add_subdirectory("tests/kernel_resize_nearest_neighbor_test") -add_subdirectory("tests/kernel_round_test") -add_subdirectory("tests/kernel_shape_test") -add_subdirectory("tests/kernel_softmax_test") -add_subdirectory("tests/kernel_split_test") -add_subdirectory("tests/kernel_split_v_test") -add_subdirectory("tests/kernel_strided_slice_test") -add_subdirectory("tests/kernel_sub_test") -add_subdirectory("tests/kernel_svdf_test") -add_subdirectory("tests/kernel_tanh_test") -add_subdirectory("tests/kernel_unpack_test") -add_subdirectory("tests/linear_memory_planner_test") + +add_subdirectory("tests/arena_allocator_non_persistent_arena_buffer_allocator_test") +add_subdirectory("tests/arena_allocator_persistent_arena_buffer_allocator_test") +add_subdirectory("tests/arena_allocator_recording_single_arena_buffer_allocator_test") +add_subdirectory("tests/arena_allocator_single_arena_buffer_allocator_test") +add_subdirectory("tests/fake_micro_context_test") +add_subdirectory("tests/flatbuffer_utils_test") +add_subdirectory("tests/hexdump_test") +add_subdirectory("tests/kernels_activations_test") +add_subdirectory("tests/kernels_add_n_test") +add_subdirectory("tests/kernels_add_test") +add_subdirectory("tests/kernels_arg_min_max_test") +add_subdirectory("tests/kernels_batch_matmul_test") +add_subdirectory("tests/kernels_batch_to_space_nd_test") +add_subdirectory("tests/kernels_broadcast_args_test") +add_subdirectory("tests/kernels_broadcast_to_test") +add_subdirectory("tests/kernels_call_once_test") +add_subdirectory("tests/kernels_cast_test") +add_subdirectory("tests/kernels_ceil_test") +add_subdirectory("tests/kernels_circular_buffer_test") +add_subdirectory("tests/kernels_comparisons_test") +add_subdirectory("tests/kernels_concatenation_test") +add_subdirectory("tests/kernels_conv_test") +add_subdirectory("tests/kernels_cumsum_test") +add_subdirectory("tests/kernels_decompress_test") +add_subdirectory("tests/kernels_depth_to_space_test") +add_subdirectory("tests/kernels_depthwise_conv_test") +add_subdirectory("tests/kernels_dequantize_test") +add_subdirectory("tests/kernels_detection_postprocess_test") +add_subdirectory("tests/kernels_div_test") +add_subdirectory("tests/kernels_elementwise_test") +add_subdirectory("tests/kernels_elu_test") +add_subdirectory("tests/kernels_embedding_lookup_test") +add_subdirectory("tests/kernels_exp_test") +add_subdirectory("tests/kernels_expand_dims_test") +add_subdirectory("tests/kernels_fill_test") +add_subdirectory("tests/kernels_floor_div_test") +add_subdirectory("tests/kernels_floor_mod_test") +add_subdirectory("tests/kernels_floor_test") +add_subdirectory("tests/kernels_fully_connected_test") +add_subdirectory("tests/kernels_gather_nd_test") +add_subdirectory("tests/kernels_gather_test") +add_subdirectory("tests/kernels_hard_swish_test") +add_subdirectory("tests/kernels_if_test") +add_subdirectory("tests/kernels_l2_pool_2d_test") +add_subdirectory("tests/kernels_l2norm_test") +add_subdirectory("tests/kernels_leaky_relu_test") +add_subdirectory("tests/kernels_log_softmax_test") +add_subdirectory("tests/kernels_logical_test") +add_subdirectory("tests/kernels_logistic_test") +add_subdirectory("tests/kernels_lstm_eval_test") +add_subdirectory("tests/kernels_maximum_minimum_test") +add_subdirectory("tests/kernels_mirror_pad_test") +add_subdirectory("tests/kernels_mul_test") +add_subdirectory("tests/kernels_neg_test") +add_subdirectory("tests/kernels_pack_test") +add_subdirectory("tests/kernels_pad_test") +add_subdirectory("tests/kernels_pooling_test") +add_subdirectory("tests/kernels_prelu_test") +add_subdirectory("tests/kernels_quantization_util_test") +add_subdirectory("tests/kernels_quantize_test") +add_subdirectory("tests/kernels_reduce_test") +add_subdirectory("tests/kernels_reshape_test") +add_subdirectory("tests/kernels_resize_bilinear_test") +add_subdirectory("tests/kernels_resize_nearest_neighbor_test") +add_subdirectory("tests/kernels_round_test") +add_subdirectory("tests/kernels_select_test") +add_subdirectory("tests/kernels_shape_test") +add_subdirectory("tests/kernels_slice_test") +add_subdirectory("tests/kernels_softmax_test") +add_subdirectory("tests/kernels_space_to_batch_nd_test") +add_subdirectory("tests/kernels_space_to_depth_test") +add_subdirectory("tests/kernels_split_test") +add_subdirectory("tests/kernels_split_v_test") +add_subdirectory("tests/kernels_squared_difference_test") +add_subdirectory("tests/kernels_squeeze_test") +add_subdirectory("tests/kernels_strided_slice_test") +add_subdirectory("tests/kernels_sub_test") +add_subdirectory("tests/kernels_svdf_test") +add_subdirectory("tests/kernels_tanh_test") +add_subdirectory("tests/kernels_transpose_conv_test") +add_subdirectory("tests/kernels_transpose_test") +add_subdirectory("tests/kernels_unidirectional_sequence_lstm_test") +add_subdirectory("tests/kernels_unpack_test") +add_subdirectory("tests/kernels_while_test") +add_subdirectory("tests/kernels_zeros_like_test") add_subdirectory("tests/memory_arena_threshold_test") add_subdirectory("tests/memory_helpers_test") +add_subdirectory("tests/memory_planner_greedy_memory_planner_test") +add_subdirectory("tests/memory_planner_linear_memory_planner_test") +add_subdirectory("tests/memory_planner_non_persistent_buffer_planner_shim_test") +add_subdirectory("tests/micro_allocation_info_test") add_subdirectory("tests/micro_allocator_test") -add_subdirectory("tests/micro_error_reporter_test") -add_subdirectory("tests/micro_interpreter_test") +add_subdirectory("tests/micro_interpreter_context_test") +add_subdirectory("tests/micro_log_test") add_subdirectory("tests/micro_mutable_op_resolver_test") -add_subdirectory("tests/micro_string_test") +add_subdirectory("tests/micro_resource_variable_test") add_subdirectory("tests/micro_time_test") add_subdirectory("tests/micro_utils_test") add_subdirectory("tests/recording_micro_allocator_test") -add_subdirectory("tests/recording_simple_memory_allocator_test") -add_subdirectory("tests/simple_memory_allocator_test") +add_subdirectory("tests/span_test") +add_subdirectory("tests/static_vector_test") add_subdirectory("tests/testing_helpers_test") +add_subdirectory("tests/testing_util_test") + diff --git a/CMakeLists_template.txt b/CMakeLists_template.txt new file mode 100644 index 0000000..de6f373 --- /dev/null +++ b/CMakeLists_template.txt @@ -0,0 +1,114 @@ +# == DO NOT EDIT THE FOLLOWING LINES for the Raspberry Pi Pico VS Code Extension to work == +if(WIN32) + set(USERHOME $ENV{USERPROFILE}) +else() + set(USERHOME $ENV{HOME}) +endif() +set(sdkVersion 2.1.0) +set(toolchainVersion 13_3_Rel1) +set(picotoolVersion 2.1.0) +set(picoVscode ${USERHOME}/.pico-sdk/cmake/pico-vscode.cmake) +if (EXISTS ${picoVscode}) + include(${picoVscode}) +endif() +# ==================================================================================== +set(PICO_BOARD pico CACHE STRING "Board type") + +cmake_minimum_required(VERSION 3.12) + +# Pull in PICO SDK (must be before project) +include(pico_sdk_import.cmake) + +project(pico-tflmicro C CXX ASM) +set(CMAKE_C_STANDARD 11) +set(CMAKE_CXX_STANDARD 17) + +pico_sdk_init() + +add_library(pico-tflmicro STATIC) + +target_include_directories(pico-tflmicro + PUBLIC + ${CMAKE_CURRENT_LIST_DIR}/src/ + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/ruy + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include +) + +target_compile_definitions( + pico-tflmicro + PUBLIC + COMPILE_DEFINITIONS TF_LITE_DISABLE_X86_NEON=1 + COMPILE_DEFINITIONS TF_LITE_STATIC_MEMORY=1 + COMPILE_DEFINITIONS TF_LITE_USE_CTIME=1 + COMPILE_DEFINITIONS CMSIS_NN=1 + COMPILE_DEFINITIONS ARDUINO=1 + COMPILE_DEFINITIONS TFLITE_USE_CTIME=1 +) + +set_target_properties( + pico-tflmicro + PROPERTIES + COMPILE_FLAGS -Os + COMPILE_FLAGS -fno-rtti + COMPILE_FLAGS -fno-exceptions + COMPILE_FLAGS -fno-threadsafe-statics + COMPILE_FLAGS -nostdlib +) + +target_link_libraries( + pico-tflmicro + pico_stdlib + pico_multicore +) + +target_sources(pico-tflmicro + PRIVATE +{{LIBRARY_SOURCES}} +) + +add_library(pico-tflmicro_test STATIC) + +target_include_directories(pico-tflmicro_test + PUBLIC + ${CMAKE_CURRENT_LIST_DIR}/src/ + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/ruy + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/gemmlowp + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/kissfft + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis/CMSIS/Core/Include + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/flatbuffers/include + ${CMAKE_CURRENT_LIST_DIR}/src/third_party/cmsis_nn/Include +) + +target_compile_definitions( + pico-tflmicro_test + PUBLIC + COMPILE_DEFINITIONS TF_LITE_DISABLE_X86_NEON=1 + COMPILE_DEFINITIONS TF_LITE_STATIC_MEMORY=1 + COMPILE_DEFINITIONS CMSIS_NN=1 +) + +set_target_properties( + pico-tflmicro_test + PROPERTIES + COMPILE_FLAGS -fno-rtti + COMPILE_FLAGS -fno-exceptions + COMPILE_FLAGS -fno-threadsafe-statics + COMPILE_FLAGS -nostdlib +) + +target_link_libraries( + pico-tflmicro_test + pico_stdlib + pico_multicore +) + +add_subdirectory("examples/hello_world") +add_subdirectory("examples/person_detection") + +{{TEST_FOLDERS}} diff --git a/README.md b/README.md index 43d8f9e..9155e87 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ - # TensorFlow Lite Micro An Open Source Machine Learning Framework for Everyone. @@ -6,49 +5,61 @@ An Open Source Machine Learning Framework for Everyone. ## Introduction This is a version of the [TensorFlow Lite Micro library](https://www.tensorflow.org/lite/microcontrollers) -for the Raspberry Pi Pico microcontroller. It allows you to run machine learning models to -do things like voice recognition, detect people in images, recognize gestures from an accelerometer, -and other sensor analysis tasks. +for the Raspberry Pi Pico microcontroller. It allows you to run machine +learning models to do things like voice recognition, detect people in images, +recognize gestures from an accelerometer, and other sensor analysis tasks. +This version has scripts to upstream changes from the Google codebase. It also +takes advantage of the RP2040's dual cores for increased speed on some +operations. ## Getting Started -First you'll need to follow the Pico setup instructions to initialize the development -environment on your machine. Once that is done, make sure that the PICO_SDK_PATH -environment variable has been set to the location of the Pico SDK, either in the shell -you're building in, or the CMake configure environment variable setting of the extension -if you're using VS Code. +First you'll need to follow the Pico setup instructions to initialize the +development environment on your machine. Once that is done, make sure that the +`PICO_SDK_PATH` environment variable has been set to the location of the Pico +SDK, either in the shell you're building in, or the CMake configure environment +variable setting of the extension if you're using VS Code. + +You should then be able to build the library, tests, and examples. The easiest +way to build is using VS Code's CMake integration, by loading the project and +choosing the build option at the bottom of the window. -You should then be able to build the library, tests, and examples. The easiest way to -build is using VS Code's CMake integration, by loading the project and choosing the -build option at the bottom of the window. +Alternatively you can build the entire project, including tests, by running the +following commands from a terminal once you're in this repo's directory: + +```bash +mkdir build +cd build +cmake .. +make +``` ## What's Included -There are several example applications included. The simplest one to begin with is the -hello_world project. This demonstrates the fundamentals of deploying an ML model on a -device, driving the Pico's LED in a learned sine-wave pattern. +There are several example applications included. The simplest one to begin with +is the hello_world project. This demonstrates the fundamentals of deploying an +ML model on a device, driving the Pico's LED in a learned sine-wave pattern. +Once you have built the project, a UF2 file you can copy to the Pico should be +present at `build/examples/hello_world/hello_world.uf2`. -Other examples include simple speech recognition, a magic wand gesture recognizer, -and spotting people in camera images, but because they require audio, accelerometer or -image inputs you'll need to write some code to hook up your own sensors, since these -are not included with the base microcontroller. +Another example is the person detector, but since the Pico doesn't come with +image inputs you'll need to write some code to hook up your own sensor. You can +find a fork of TFLM for the Arducam Pico4ML that does this at [arducam.com/pico4ml-an-rp2040-based-platform-for-tiny-machine-learning/](https://www.arducam.com/pico4ml-an-rp2040-based-platform-for-tiny-machine-learning/). ## Contributing -This repository (https://github.com/raspberrypi/pico-tflmicro) is read-only, because -it has been automatically generated from the master TensorFlow repository at -https://github.com/tensorflow/tensorflow. This means that all issues and pull requests -need to be filed there. You can generate a version of this generated project by -running the commands: +This repository (https://github.com/raspberrypi/pico-tflmicro) is read-only, +because it has been automatically generated from the master TensorFlow +repository at https://github.com/tensorflow/tensorflow. It's maintained by +@petewarden on a best effort basis, so bugs and PRs may not get addressed. You +can generate an updated version of this generated project by running the command: ``` -git clone https://github.com/tensorflow/tensorflow -cd tensorflow -tensorflow/lite/micro/tools/project/generate.py rp2 pico-tflmicro +sync/sync_with_upstream.sh ``` -This should create a Pico-compatible project from the latest version of the TensorFlow -repository. +This should create a Pico-compatible project from the latest version of the +TensorFlow repository. ## Learning More @@ -63,7 +74,6 @@ has more details on the design and implementation of the framework. ## Licensing -The TensorFlow source code is covered by the license described in src/tensorflow/LICENSE, -components from other libraries have the appropriate licenses included in their -third_party folders. - +The TensorFlow source code is covered by the Apache 2 license described in +src/tensorflow/LICENSE, components from other libraries have the appropriate +licenses included in their third_party folders. \ No newline at end of file diff --git a/benchmark_results.txt b/benchmark_results.txt new file mode 100644 index 0000000..2dcf80e --- /dev/null +++ b/benchmark_results.txt @@ -0,0 +1,74 @@ +Results of running person_detection_benchmark with and without multicore optimizations. + +To reproduce these, run `make person_detection_benchmark`, with and without the +TF_LITE_PICO_MULTICORE macro defined at the top of src/third_party/cmsis_nn/Source/NNSup +portFunctions/arm_nn_mat_mult_nt_t_s8.c + +Without multicore CONV2D optimizations: + +NoPersonDataIterations(1) took 823658 ticks (823 ms) +DEPTHWISE_CONV_2D took 34553 ticks (34 ms). +DEPTHWISE_CONV_2D took 60260 ticks (60 ms). +CONV_2D took 47509 ticks (47 ms). +DEPTHWISE_CONV_2D took 29581 ticks (29 ms). +CONV_2D took 32941 ticks (32 ms). +DEPTHWISE_CONV_2D took 57434 ticks (57 ms). +CONV_2D took 51301 ticks (51 ms). +DEPTHWISE_CONV_2D took 14411 ticks (14 ms). +CONV_2D took 26003 ticks (26 ms). +DEPTHWISE_CONV_2D took 27689 ticks (27 ms). +CONV_2D took 44571 ticks (44 ms). +DEPTHWISE_CONV_2D took 7025 ticks (7 ms). +CONV_2D took 23344 ticks (23 ms). +DEPTHWISE_CONV_2D took 13935 ticks (13 ms). +CONV_2D took 43007 ticks (43 ms). +DEPTHWISE_CONV_2D took 12996 ticks (12 ms). +CONV_2D took 42947 ticks (42 ms). +DEPTHWISE_CONV_2D took 12983 ticks (12 ms). +CONV_2D took 42953 ticks (42 ms). +DEPTHWISE_CONV_2D took 13023 ticks (13 ms). +CONV_2D took 42979 ticks (42 ms). +DEPTHWISE_CONV_2D took 13015 ticks (13 ms). +CONV_2D took 42951 ticks (42 ms). +DEPTHWISE_CONV_2D took 3522 ticks (3 ms). +CONV_2D took 25795 ticks (25 ms). +DEPTHWISE_CONV_2D took 6016 ticks (6 ms). +CONV_2D took 49461 ticks (49 ms). +AVERAGE_POOL_2D took 874 ticks (0 ms). +CONV_2D took 220 ticks (0 ms). +RESHAPE took 21 ticks (0 ms). +SOFTMAX took 338 ticks (0 ms). + +Multi-core CONV2D and Depthwise Conv: +NoPersonDataIterations(1) took 587400 ticks (587 ms) +DEPTHWISE_CONV_2D took 34550 ticks (34 ms). +DEPTHWISE_CONV_2D took 31942 ticks (31 ms). +CONV_2D took 29140 ticks (29 ms). +DEPTHWISE_CONV_2D took 15765 ticks (15 ms). +CONV_2D took 21402 ticks (21 ms). +DEPTHWISE_CONV_2D took 30346 ticks (30 ms). +CONV_2D took 35317 ticks (35 ms). +DEPTHWISE_CONV_2D took 7792 ticks (7 ms). +CONV_2D took 17922 ticks (17 ms). +DEPTHWISE_CONV_2D took 14706 ticks (14 ms). +CONV_2D took 32168 ticks (32 ms). +DEPTHWISE_CONV_2D took 4780 ticks (4 ms). +CONV_2D took 16981 ticks (16 ms). +DEPTHWISE_CONV_2D took 9800 ticks (9 ms). +CONV_2D took 36303 ticks (36 ms). +DEPTHWISE_CONV_2D took 7141 ticks (7 ms). +CONV_2D took 36236 ticks (36 ms). +DEPTHWISE_CONV_2D took 7137 ticks (7 ms). +CONV_2D took 36343 ticks (36 ms). +DEPTHWISE_CONV_2D took 7166 ticks (7 ms). +CONV_2D took 36217 ticks (36 ms). +DEPTHWISE_CONV_2D took 7148 ticks (7 ms). +CONV_2D took 36216 ticks (36 ms). +DEPTHWISE_CONV_2D took 3624 ticks (3 ms). +CONV_2D took 22197 ticks (22 ms). +DEPTHWISE_CONV_2D took 4526 ticks (4 ms). +CONV_2D took 43024 ticks (43 ms). +AVERAGE_POOL_2D took 876 ticks (0 ms). +CONV_2D took 275 ticks (0 ms). +RESHAPE took 20 ticks (0 ms). +SOFTMAX took 340 ticks (0 ms). \ No newline at end of file diff --git a/examples/hello_world/CMakeLists.txt b/examples/hello_world/CMakeLists.txt index 6531dfb..b008d45 100644 --- a/examples/hello_world/CMakeLists.txt +++ b/examples/hello_world/CMakeLists.txt @@ -24,9 +24,9 @@ set_target_properties( target_sources(hello_world_test PRIVATE + ${CMAKE_CURRENT_LIST_DIR}/hello_world_float_model_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/hello_world_int8_model_data.cpp ${CMAKE_CURRENT_LIST_DIR}/hello_world_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/model.cpp - ${CMAKE_CURRENT_LIST_DIR}/model.h ) target_link_libraries( @@ -36,6 +36,9 @@ target_link_libraries( pico-tflmicro_test ) +pico_enable_stdio_usb(hello_world_test 1) +pico_enable_stdio_uart(hello_world_test 0) + pico_add_extra_outputs(hello_world_test) @@ -58,13 +61,13 @@ set_target_properties( target_sources(hello_world PRIVATE ${CMAKE_CURRENT_LIST_DIR}/constants.cpp + ${CMAKE_CURRENT_LIST_DIR}/hello_world_float_model_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/hello_world_int8_model_data.cpp ${CMAKE_CURRENT_LIST_DIR}/main.cpp ${CMAKE_CURRENT_LIST_DIR}/main_functions.cpp - ${CMAKE_CURRENT_LIST_DIR}/model.cpp ${CMAKE_CURRENT_LIST_DIR}/rp2/output_handler.cpp ${CMAKE_CURRENT_LIST_DIR}/constants.h ${CMAKE_CURRENT_LIST_DIR}/main_functions.h - ${CMAKE_CURRENT_LIST_DIR}/model.h ${CMAKE_CURRENT_LIST_DIR}/output_handler.h ) @@ -74,6 +77,9 @@ target_link_libraries( hardware_pwm ) +pico_enable_stdio_usb(hello_world 1) +pico_enable_stdio_uart(hello_world 0) + pico_add_extra_outputs(hello_world) diff --git a/examples/hello_world/hello_world_float_model_data.cpp b/examples/hello_world/hello_world_float_model_data.cpp new file mode 100644 index 0000000..274186f --- /dev/null +++ b/examples/hello_world/hello_world_float_model_data.cpp @@ -0,0 +1,6 @@ +#include + +#include "hello_world_float_model_data.h" + +const unsigned int g_hello_world_float_model_data_size = 3164; +alignas(16) const unsigned char g_hello_world_float_model_data[] = {0x1c,0x0,0x0,0x0,0x54,0x46,0x4c,0x33,0x14,0x0,0x20,0x0,0x1c,0x0,0x18,0x0,0x14,0x0,0x10,0x0,0xc,0x0,0x0,0x0,0x8,0x0,0x4,0x0,0x14,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x90,0x0,0x0,0x0,0xe8,0x0,0x0,0x0,0x0,0x7,0x0,0x0,0x10,0x7,0x0,0x0,0x8,0xc,0x0,0x0,0x3,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x0,0x0,0xa,0x0,0x10,0x0,0xc,0x0,0x8,0x0,0x4,0x0,0xa,0x0,0x0,0x0,0xc,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x38,0x0,0x0,0x0,0xf,0x0,0x0,0x0,0x73,0x65,0x72,0x76,0x69,0x6e,0x67,0x5f,0x64,0x65,0x66,0x61,0x75,0x6c,0x74,0x0,0x1,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x98,0xff,0xff,0xff,0x9,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x7,0x0,0x0,0x0,0x64,0x65,0x6e,0x73,0x65,0x5f,0x32,0x0,0x1,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xca,0xf9,0xff,0xff,0x4,0x0,0x0,0x0,0xb,0x0,0x0,0x0,0x64,0x65,0x6e,0x73,0x65,0x5f,0x69,0x6e,0x70,0x75,0x74,0x0,0x2,0x0,0x0,0x0,0x34,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xdc,0xff,0xff,0xff,0xc,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x13,0x0,0x0,0x0,0x43,0x4f,0x4e,0x56,0x45,0x52,0x53,0x49,0x4f,0x4e,0x5f,0x4d,0x45,0x54,0x41,0x44,0x41,0x54,0x41,0x0,0x8,0x0,0xc,0x0,0x8,0x0,0x4,0x0,0x8,0x0,0x0,0x0,0xb,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x13,0x0,0x0,0x0,0x6d,0x69,0x6e,0x5f,0x72,0x75,0x6e,0x74,0x69,0x6d,0x65,0x5f,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x0,0xd,0x0,0x0,0x0,0x14,0x6,0x0,0x0,0xc,0x6,0x0,0x0,0xbc,0x5,0x0,0x0,0xa0,0x5,0x0,0x0,0x50,0x5,0x0,0x0,0x0,0x5,0x0,0x0,0xf0,0x0,0x0,0x0,0xa0,0x0,0x0,0x0,0x98,0x0,0x0,0x0,0x90,0x0,0x0,0x0,0x88,0x0,0x0,0x0,0x68,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x76,0xfa,0xff,0xff,0x4,0x0,0x0,0x0,0x54,0x0,0x0,0x0,0xc,0x0,0x0,0x0,0x8,0x0,0xe,0x0,0x8,0x0,0x4,0x0,0x8,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x24,0x0,0x0,0x0,0x0,0x0,0x6,0x0,0x8,0x0,0x4,0x0,0x6,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xa,0x0,0x10,0x0,0xc,0x0,0x8,0x0,0x4,0x0,0xa,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x6,0x0,0x0,0x0,0x32,0x2e,0x31,0x31,0x2e,0x30,0x0,0x0,0xd6,0xfa,0xff,0xff,0x4,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x31,0x2e,0x35,0x2e,0x30,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xc0,0xf5,0xff,0xff,0xc4,0xf5,0xff,0xff,0xc8,0xf5,0xff,0xff,0xfe,0xfa,0xff,0xff,0x4,0x0,0x0,0x0,0x40,0x0,0x0,0x0,0xea,0x85,0xf1,0xbe,0xef,0x1c,0x9a,0x3f,0xa3,0x23,0xa1,0x3f,0x12,0x94,0xb7,0x3e,0x0,0x89,0xe2,0x3d,0xeb,0x7a,0x86,0xbe,0x18,0x6f,0xa9,0x3e,0xec,0x6d,0x61,0x3f,0x12,0x4a,0x3d,0xbe,0x42,0x6b,0x8a,0x3f,0xc1,0xc3,0x3c,0xbf,0xe8,0xc0,0x9f,0x3e,0xee,0xaf,0x59,0xbf,0xc1,0x51,0x6e,0xbf,0x98,0x8c,0xb,0x3e,0xe1,0xc3,0xe3,0x3d,0x4a,0xfb,0xff,0xff,0x4,0x0,0x0,0x0,0x0,0x4,0x0,0x0,0x80,0x6d,0x32,0x3b,0xa,0x64,0x42,0x3e,0xf5,0xa4,0xd3,0x3e,0x1a,0xc7,0x3a,0xbe,0x66,0x99,0x4d,0x3e,0xac,0xe9,0x49,0xbe,0x38,0x73,0x7b,0xbd,0x2,0xb3,0x4,0xbe,0x34,0xae,0xff,0xbd,0xb7,0xf5,0xdb,0xbe,0xaf,0xb3,0xb2,0xbe,0x4d,0x95,0xb1,0x3e,0x83,0xa6,0xbc,0x3e,0x8e,0x14,0x3b,0x3e,0xfa,0x7c,0x74,0x3e,0xe9,0xf9,0x6d,0xbe,0x90,0x31,0xd3,0xbc,0x56,0xf2,0x48,0x3e,0x4a,0x1,0xa,0x3d,0x5b,0x82,0x1b,0xbf,0xe5,0x26,0x92,0x3d,0xcd,0x5a,0xcd,0x3e,0x69,0x43,0xcc,0xbe,0x24,0xaa,0xff,0xbd,0xf3,0x2,0xc2,0xbe,0xe5,0x2a,0x87,0x3e,0x71,0x62,0xbd,0xbe,0x2f,0xa3,0xae,0x3e,0x2a,0x47,0x89,0xbe,0x64,0x3b,0x25,0x3e,0xee,0x6f,0x9,0xbe,0x65,0xe0,0xba,0x3e,0x16,0xff,0x11,0x3e,0x60,0x69,0x38,0xbe,0x5f,0x7c,0xb6,0x3e,0xd4,0xec,0x13,0xbf,0xbd,0xa3,0xc9,0xbe,0x3,0x41,0x4e,0x3f,0xa9,0xf2,0x86,0xbe,0x70,0x4f,0x85,0xbc,0x53,0xec,0x9a,0x3e,0x4f,0xc9,0xe9,0x3e,0xe2,0xfa,0x60,0x3e,0x9c,0x7f,0x60,0xbe,0x86,0xc7,0x21,0x3e,0x2a,0xc5,0x0,0xbf,0xd1,0xda,0xaa,0x3e,0xaa,0x8b,0x14,0xbe,0x51,0x60,0x54,0xbe,0x48,0xc1,0xb7,0xbe,0xd3,0x8,0x38,0x3e,0x58,0x2e,0xeb,0x3e,0x3,0x92,0x4f,0x3e,0x5a,0x49,0xcb,0xbe,0xf5,0x1e,0xbf,0x3e,0x80,0xdc,0x9c,0xbe,0xcf,0x99,0xa2,0x3e,0x59,0x82,0x3d,0xbe,0x87,0x6f,0x98,0x3e,0x86,0xa5,0x8a,0xbe,0xe,0x9b,0x63,0xbe,0xfb,0x7a,0x33,0xbe,0x6,0x10,0x71,0xbe,0xa8,0xfc,0x10,0xbd,0x9c,0x46,0x91,0x3d,0x88,0x3c,0x92,0xbe,0xd4,0xbc,0xf4,0xbd,0x4d,0x74,0xbf,0x3e,0x88,0x6c,0x3,0xbd,0x7b,0xe9,0xdb,0xbe,0x9f,0xdf,0xc1,0x3e,0x6c,0xe4,0x82,0x3d,0x44,0x78,0xd5,0x3d,0x80,0x8a,0xfc,0xbb,0x6,0x7b,0x14,0x3e,0xb0,0x24,0x21,0x3d,0x18,0xce,0x9,0x3d,0xc2,0x28,0xb3,0xbe,0xd6,0xb0,0x8,0xbe,0x1c,0x63,0xc3,0xbe,0x80,0x98,0x5e,0x3b,0xac,0xd8,0xe7,0x3d,0x31,0x12,0xa4,0x3c,0x22,0x2,0xed,0x3d,0x47,0xf3,0xa6,0xbb,0x82,0xab,0xd8,0x3e,0x0,0xfc,0x1,0xbb,0xa7,0xc9,0x8a,0x3e,0x80,0x5d,0x97,0xbd,0x5f,0xe8,0x3b,0xbe,0x44,0x62,0xc2,0xbd,0x8,0x96,0x78,0xbd,0xda,0xd8,0x72,0x3e,0xa0,0xf3,0x8f,0x3e,0x68,0xd8,0x33,0xbd,0x26,0x14,0x17,0x3e,0xac,0xc0,0xc9,0xbe,0x8e,0x8a,0x94,0xbe,0x50,0x3d,0xb5,0xbc,0xcf,0xbb,0x82,0x3e,0x9a,0x88,0xb3,0xbe,0x11,0x8a,0xda,0xbe,0xe9,0xd9,0x95,0x3e,0xa0,0x13,0x4b,0x3d,0xf9,0xb6,0x83,0x3e,0xf4,0x14,0xbc,0xbe,0x1c,0x89,0xc1,0x3d,0xeb,0xee,0xca,0x3e,0xfc,0x30,0xab,0xbe,0xfc,0x62,0x9b,0xbd,0x50,0x74,0xaf,0xbe,0x37,0x16,0xd6,0x3e,0x30,0x3e,0xb5,0xbc,0x0,0xd1,0xf6,0x3a,0x66,0xbd,0xf9,0x3d,0x94,0x2a,0x6,0x3f,0xf7,0xc8,0xcb,0xbe,0x4a,0xa5,0xdc,0xbe,0xb5,0xd0,0xa2,0xbe,0x99,0xf0,0x7a,0xbe,0x42,0x1b,0x53,0xbe,0xdf,0x90,0x6e,0xbe,0xee,0xfe,0xbf,0x3e,0x80,0xd3,0x53,0x3c,0x20,0x0,0x45,0x3c,0x2c,0xd0,0x4f,0xbe,0xf0,0x67,0xdf,0xbd,0xce,0xb1,0x5,0x3e,0xc,0x4a,0xf3,0x3d,0x3a,0xf1,0x50,0x3e,0xa0,0xb2,0x29,0xbe,0x78,0x69,0x14,0x3e,0x44,0x93,0xf8,0x3d,0x24,0x67,0xa3,0x3d,0x7a,0x9b,0x96,0xbe,0x48,0x69,0xca,0xbd,0x7c,0xea,0xab,0x3d,0x32,0xd6,0x8b,0x3e,0xa3,0xca,0x47,0xbd,0xbe,0x1a,0xcd,0xbe,0xc1,0x54,0xce,0x3e,0xd8,0xbb,0xc3,0x3e,0x5c,0xfc,0xdb,0xbe,0x50,0xf0,0xa0,0x3c,0x80,0x30,0xd0,0x3c,0x65,0x29,0xb3,0xbe,0xf1,0x1f,0xaf,0xbc,0x40,0xfa,0xcd,0x3e,0xf3,0x33,0x46,0xbe,0xe8,0x9a,0xe7,0xbe,0x10,0xa1,0x9d,0xbe,0xce,0xc4,0x43,0x3e,0x16,0xaf,0x5c,0xbe,0x5,0xf8,0xf,0xbf,0x9a,0x81,0xd0,0x3e,0x80,0x94,0xc6,0x3b,0x2b,0xc,0x5f,0xbe,0x3d,0xc6,0xbf,0x3e,0x28,0xf6,0x7d,0xbd,0xca,0x61,0x8e,0xbe,0x40,0x24,0xe7,0x3c,0xc3,0xf1,0x83,0x3e,0x27,0x30,0x3f,0x3e,0x35,0x15,0xda,0xbe,0x60,0xa3,0xa7,0xbb,0xfc,0x0,0x65,0x3f,0x7,0x96,0xbd,0x3e,0x37,0x93,0x83,0x3e,0x88,0x45,0x3b,0x3d,0xf5,0xff,0xc7,0xbc,0x30,0x5d,0x8b,0xbd,0xc5,0x79,0x91,0x3e,0x78,0x14,0x15,0xbe,0xef,0xe5,0x23,0xbf,0x11,0x2a,0xa7,0x3e,0x41,0x7e,0xda,0x3e,0xb7,0x32,0x7b,0xbe,0x8d,0x63,0xc4,0x3e,0x3e,0x29,0x26,0x3e,0xbc,0x5b,0xe9,0xbd,0x90,0x49,0x59,0x3d,0xe0,0x87,0x7d,0xbc,0xb1,0xef,0xac,0x3e,0xb8,0x30,0x16,0xbd,0xac,0x56,0x8e,0xbd,0x18,0x58,0xbb,0xbe,0x90,0x6f,0xab,0xbd,0xe3,0x61,0x84,0x3e,0x48,0x41,0x6d,0x3d,0xfb,0x22,0xcd,0x3e,0x80,0x9b,0x2,0x3c,0x8d,0xc3,0xb1,0xbe,0xca,0xb2,0xcc,0xbe,0x62,0xab,0x65,0xbe,0xaf,0x17,0x53,0x3e,0x97,0xdf,0x7,0xbe,0x98,0x21,0x7f,0x3e,0x63,0x10,0x51,0x3f,0x4e,0x1e,0x3,0x3e,0x38,0xa3,0x99,0xbe,0x78,0x1f,0x20,0xbe,0xd,0xda,0xf2,0x3e,0x86,0xac,0x43,0xbe,0x39,0xcb,0xa9,0x3e,0x20,0x72,0x52,0x3d,0x2,0x97,0xca,0xbe,0x5c,0xe8,0xd8,0xbd,0x5f,0x38,0xb2,0x3e,0x83,0x15,0xbc,0x3e,0xa7,0xfb,0xa2,0x3e,0xae,0x3c,0x77,0xbe,0x0,0xe4,0x7e,0xbe,0xb,0xc4,0x7c,0xbe,0x13,0x4c,0x4b,0x3f,0x73,0x84,0xd0,0x3e,0xe0,0x67,0x55,0x3c,0xa4,0x27,0xa7,0xbe,0x6f,0x6f,0xed,0xbd,0xc5,0xb8,0xe,0x3f,0x50,0x5d,0x80,0x3c,0x6e,0x37,0x9,0x3e,0x91,0x74,0x2f,0xbf,0xec,0x2b,0xb1,0x3d,0xff,0xad,0x83,0x3e,0xc,0x4,0xbb,0xbd,0x88,0xdc,0xb7,0xbd,0xb5,0x1b,0x22,0xbe,0x88,0x9b,0x86,0x3e,0xef,0x1a,0x40,0x3e,0x7a,0x62,0xd0,0xbe,0xfc,0x4d,0xef,0x3d,0x14,0xe9,0xdb,0xbe,0x81,0x7c,0x89,0xbe,0xf,0xd7,0x7c,0x3e,0x91,0xcd,0x16,0xbe,0x6b,0xfb,0x87,0x3e,0xc2,0xbf,0x8f,0xbe,0x64,0x69,0x84,0x3e,0x8f,0x1c,0xd6,0xbe,0x3c,0x63,0xb7,0xbd,0x6a,0x68,0x60,0x3e,0xcd,0x69,0x93,0x3e,0xcb,0x23,0x87,0xbe,0xf,0xe1,0xa8,0xbe,0x30,0x40,0x2d,0x3e,0xb6,0xc9,0x2c,0x3d,0xb4,0x1e,0x52,0xbe,0x49,0x94,0xc1,0xbe,0x0,0x2b,0x9e,0xbb,0x44,0x9e,0xaa,0x3e,0xb,0xa2,0x9e,0x3e,0x4a,0x26,0x37,0xbe,0x8,0x8e,0x30,0xbe,0x54,0xbf,0x69,0x3d,0x50,0x33,0xa1,0xbe,0xdf,0x29,0xcb,0xbe,0x56,0xff,0xff,0xff,0x4,0x0,0x0,0x0,0x40,0x0,0x0,0x0,0x30,0x77,0xf9,0xbd,0xbb,0x30,0xc9,0xbe,0x45,0xf5,0x48,0x3e,0x52,0x14,0x32,0x3f,0x64,0xcc,0x12,0x3e,0xe0,0xe1,0x83,0xbd,0xec,0x89,0x38,0xbe,0x10,0xd0,0x5e,0xbd,0x36,0x7f,0xec,0xbe,0xd3,0xa6,0xcf,0x3e,0x42,0x2b,0x8f,0x3e,0xff,0x9e,0x3,0xbf,0xf0,0x88,0xd7,0xbe,0xbc,0x27,0x20,0x3f,0x52,0xa5,0xbf,0xbe,0x30,0xa3,0xa9,0xbe,0xa2,0xff,0xff,0xff,0x4,0x0,0x0,0x0,0x40,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x99,0xde,0xc1,0x3e,0xf8,0xdf,0x38,0xbf,0x9d,0x42,0xba,0x3d,0x32,0x8f,0x5b,0x3f,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x9d,0xa1,0x97,0x3f,0x16,0xce,0x55,0xbe,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x49,0xbe,0x41,0xbc,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xee,0xff,0xff,0xff,0x4,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xbc,0xf9,0x23,0xbe,0x0,0x0,0x6,0x0,0x8,0x0,0x4,0x0,0x6,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x40,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xc5,0xb3,0x0,0x3f,0xa4,0xba,0xd0,0x3e,0x52,0xce,0x82,0xbe,0x0,0x0,0x0,0x0,0x4f,0x1b,0x33,0x3e,0x0,0x0,0x0,0x0,0x21,0x72,0x77,0xbe,0xcc,0xcd,0x8f,0x3d,0xfd,0xa3,0xdc,0xbe,0x88,0xe3,0x18,0x3f,0x0,0x0,0x0,0x0,0x91,0x8c,0x61,0x3e,0xe,0x76,0xb,0x3f,0xb0,0x55,0x47,0xbe,0x14,0x9,0x92,0xbd,0x20,0xfb,0xff,0xff,0x24,0xfb,0xff,0xff,0xf,0x0,0x0,0x0,0x4d,0x4c,0x49,0x52,0x20,0x43,0x6f,0x6e,0x76,0x65,0x72,0x74,0x65,0x64,0x2e,0x0,0x1,0x0,0x0,0x0,0x14,0x0,0x0,0x0,0x0,0x0,0xe,0x0,0x18,0x0,0x14,0x0,0x10,0x0,0xc,0x0,0x8,0x0,0x4,0x0,0xe,0x0,0x0,0x0,0x14,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0xd8,0x0,0x0,0x0,0xdc,0x0,0x0,0x0,0xe0,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x6d,0x61,0x69,0x6e,0x0,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x80,0x0,0x0,0x0,0x38,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x9a,0xff,0xff,0xff,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x8,0xc,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x9c,0xfb,0xff,0xff,0x1,0x0,0x0,0x0,0x9,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x6,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xca,0xff,0xff,0xff,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x8,0x10,0x0,0x0,0x0,0x14,0x0,0x0,0x0,0xba,0xff,0xff,0xff,0x0,0x0,0x0,0x1,0x1,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x7,0x0,0x0,0x0,0x5,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0xe,0x0,0x16,0x0,0x0,0x0,0x10,0x0,0xc,0x0,0xb,0x0,0x4,0x0,0xe,0x0,0x0,0x0,0x18,0x0,0x0,0x0,0x0,0x0,0x0,0x8,0x18,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x0,0x0,0x6,0x0,0x8,0x0,0x7,0x0,0x6,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x1,0x0,0x0,0x0,0x7,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x9,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xa,0x0,0x0,0x0,0x8c,0x3,0x0,0x0,0x1c,0x3,0x0,0x0,0xac,0x2,0x0,0x0,0x58,0x2,0x0,0x0,0x10,0x2,0x0,0x0,0xc4,0x1,0x0,0x0,0x78,0x1,0x0,0x0,0xf0,0x0,0x0,0x0,0x60,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xb2,0xfc,0xff,0xff,0x0,0x0,0x0,0x1,0x14,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0xa,0x0,0x0,0x0,0x34,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xff,0xff,0xff,0xff,0x1,0x0,0x0,0x0,0x9c,0xfc,0xff,0xff,0x19,0x0,0x0,0x0,0x53,0x74,0x61,0x74,0x65,0x66,0x75,0x6c,0x50,0x61,0x72,0x74,0x69,0x74,0x69,0x6f,0x6e,0x65,0x64,0x43,0x61,0x6c,0x6c,0x3a,0x30,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0xa,0xfd,0xff,0xff,0x0,0x0,0x0,0x1,0x14,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x9,0x0,0x0,0x0,0x68,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xff,0xff,0xff,0xff,0x10,0x0,0x0,0x0,0xf4,0xfc,0xff,0xff,0x4c,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x3b,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x52,0x65,0x6c,0x75,0x3b,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x0,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x96,0xfd,0xff,0xff,0x0,0x0,0x0,0x1,0x14,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x60,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xff,0xff,0xff,0xff,0x10,0x0,0x0,0x0,0x80,0xfd,0xff,0xff,0x46,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x3b,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x52,0x65,0x6c,0x75,0x3b,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x86,0xfe,0xff,0xff,0x0,0x0,0x0,0x1,0x10,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x7,0x0,0x0,0x0,0x28,0x0,0x0,0x0,0xf4,0xfd,0xff,0xff,0x19,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x32,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xce,0xfe,0xff,0xff,0x0,0x0,0x0,0x1,0x10,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x6,0x0,0x0,0x0,0x28,0x0,0x0,0x0,0x3c,0xfe,0xff,0xff,0x19,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x16,0xff,0xff,0xff,0x0,0x0,0x0,0x1,0x10,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x5,0x0,0x0,0x0,0x24,0x0,0x0,0x0,0x84,0xfe,0xff,0xff,0x17,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x0,0x2,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x5a,0xff,0xff,0xff,0x0,0x0,0x0,0x1,0x10,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x34,0x0,0x0,0x0,0xc8,0xfe,0xff,0xff,0x27,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x2f,0x52,0x65,0x61,0x64,0x56,0x61,0x72,0x69,0x61,0x62,0x6c,0x65,0x4f,0x70,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xaa,0xff,0xff,0xff,0x0,0x0,0x0,0x1,0x10,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x38,0x0,0x0,0x0,0x18,0xff,0xff,0xff,0x29,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x32,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x2f,0x52,0x65,0x61,0x64,0x56,0x61,0x72,0x69,0x61,0x62,0x6c,0x65,0x4f,0x70,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x16,0x0,0x18,0x0,0x14,0x0,0x0,0x0,0x10,0x0,0xc,0x0,0x8,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x7,0x0,0x16,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x10,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x38,0x0,0x0,0x0,0x84,0xff,0xff,0xff,0x29,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x2f,0x52,0x65,0x61,0x64,0x56,0x61,0x72,0x69,0x61,0x62,0x6c,0x65,0x4f,0x70,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x0,0x0,0x16,0x0,0x1c,0x0,0x18,0x0,0x0,0x0,0x14,0x0,0x10,0x0,0xc,0x0,0x0,0x0,0x0,0x0,0x8,0x0,0x7,0x0,0x16,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x14,0x0,0x0,0x0,0x20,0x0,0x0,0x0,0x20,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x3c,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xff,0xff,0xff,0xff,0x1,0x0,0x0,0x0,0x4,0x0,0x4,0x0,0x4,0x0,0x0,0x0,0x1d,0x0,0x0,0x0,0x73,0x65,0x72,0x76,0x69,0x6e,0x67,0x5f,0x64,0x65,0x66,0x61,0x75,0x6c,0x74,0x5f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x69,0x6e,0x70,0x75,0x74,0x3a,0x30,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xc,0x0,0xc,0x0,0xb,0x0,0x0,0x0,0x0,0x0,0x4,0x0,0xc,0x0,0x0,0x0,0x9,0x0,0x0,0x0,0x0,0x0,0x0,0x9}; diff --git a/examples/hello_world/hello_world_float_model_data.h b/examples/hello_world/hello_world_float_model_data.h new file mode 100644 index 0000000..63e0abb --- /dev/null +++ b/examples/hello_world/hello_world_float_model_data.h @@ -0,0 +1,4 @@ +#include + +extern const unsigned int g_hello_world_float_model_data_size; +extern const unsigned char g_hello_world_float_model_data[]; diff --git a/examples/hello_world/hello_world_int8_model_data.cpp b/examples/hello_world/hello_world_int8_model_data.cpp new file mode 100644 index 0000000..0f4a582 --- /dev/null +++ b/examples/hello_world/hello_world_int8_model_data.cpp @@ -0,0 +1,6 @@ +#include + +#include "hello_world_int8_model_data.h" + +const unsigned int g_hello_world_int8_model_data_size = 2704; +alignas(16) const unsigned char g_hello_world_int8_model_data[] = {0x28,0x0,0x0,0x0,0x54,0x46,0x4c,0x33,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x14,0x0,0x20,0x0,0x4,0x0,0x8,0x0,0xc,0x0,0x10,0x0,0x14,0x0,0x0,0x0,0x18,0x0,0x1c,0x0,0x14,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x3c,0xa,0x0,0x0,0xf0,0x3,0x0,0x0,0xd8,0x3,0x0,0x0,0xe0,0x0,0x0,0x0,0x80,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x0,0x0,0xa,0x0,0x10,0x0,0x4,0x0,0x8,0x0,0xc,0x0,0xa,0x0,0x0,0x0,0x40,0x0,0x0,0x0,0x1c,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xf,0x0,0x0,0x0,0x73,0x65,0x72,0x76,0x69,0x6e,0x67,0x5f,0x64,0x65,0x66,0x61,0x75,0x6c,0x74,0x0,0x1,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x98,0xff,0xff,0xff,0x8,0x0,0x0,0x0,0x9,0x0,0x0,0x0,0x7,0x0,0x0,0x0,0x64,0x65,0x6e,0x73,0x65,0x5f,0x32,0x0,0x1,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xba,0xfc,0xff,0xff,0x4,0x0,0x0,0x0,0xb,0x0,0x0,0x0,0x64,0x65,0x6e,0x73,0x65,0x5f,0x69,0x6e,0x70,0x75,0x74,0x0,0x2,0x0,0x0,0x0,0x34,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xdc,0xff,0xff,0xff,0x8,0x0,0x0,0x0,0xc,0x0,0x0,0x0,0x13,0x0,0x0,0x0,0x43,0x4f,0x4e,0x56,0x45,0x52,0x53,0x49,0x4f,0x4e,0x5f,0x4d,0x45,0x54,0x41,0x44,0x41,0x54,0x41,0x0,0x8,0x0,0xc,0x0,0x4,0x0,0x8,0x0,0x8,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0xb,0x0,0x0,0x0,0x13,0x0,0x0,0x0,0x6d,0x69,0x6e,0x5f,0x72,0x75,0x6e,0x74,0x69,0x6d,0x65,0x5f,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x0,0xd,0x0,0x0,0x0,0xec,0x2,0x0,0x0,0xe4,0x2,0x0,0x0,0xcc,0x2,0x0,0x0,0x98,0x2,0x0,0x0,0x44,0x2,0x0,0x0,0x30,0x1,0x0,0x0,0xdc,0x0,0x0,0x0,0xb8,0x0,0x0,0x0,0xb0,0x0,0x0,0x0,0xa8,0x0,0x0,0x0,0xa0,0x0,0x0,0x0,0x78,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x66,0xfd,0xff,0xff,0x4,0x0,0x0,0x0,0x58,0x0,0x0,0x0,0xc,0x0,0x0,0x0,0x8,0x0,0xe,0x0,0x8,0x0,0x4,0x0,0x8,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x28,0x0,0x0,0x0,0x0,0x0,0x6,0x0,0x8,0x0,0x4,0x0,0x6,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0xeb,0x3,0x0,0x0,0x0,0x0,0xa,0x0,0x10,0x0,0xc,0x0,0x8,0x0,0x4,0x0,0xa,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x6,0x0,0x0,0x0,0x32,0x2e,0x31,0x31,0x2e,0x30,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd6,0xfd,0xff,0xff,0x4,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x31,0x2e,0x31,0x34,0x2e,0x30,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x64,0xfd,0xff,0xff,0x68,0xfd,0xff,0xff,0x6c,0xfd,0xff,0xff,0x6,0xfe,0xff,0xff,0x4,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xf7,0xca,0x39,0x47,0x68,0x73,0x62,0x63,0x40,0xe6,0x7f,0x19,0xae,0x44,0x5f,0x56,0x0,0x0,0x0,0x0,0x26,0xfe,0xff,0xff,0x4,0x0,0x0,0x0,0x40,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xc2,0xea,0xff,0xff,0x75,0xea,0xff,0xff,0xb8,0xfa,0xff,0xff,0x24,0xfa,0xff,0xff,0xc8,0xef,0xff,0xff,0xac,0xff,0xff,0xff,0x44,0xd,0x0,0x0,0x0,0x0,0x0,0x0,0xbd,0x7,0x0,0x0,0x33,0xea,0xff,0xff,0x0,0x0,0x0,0x0,0xcc,0xe4,0xff,0xff,0x4f,0xd,0x0,0x0,0xcf,0xe3,0xff,0xff,0x0,0x0,0x0,0x0,0x76,0xfe,0xff,0xff,0x4,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0xf4,0x1a,0xed,0x9,0x19,0x21,0xf4,0x24,0xe0,0x21,0xef,0xbc,0xf7,0xf5,0xfa,0x19,0x3,0xdc,0xd2,0x2,0x6,0xf9,0xf4,0x2,0xff,0xfa,0xef,0xf1,0xef,0xd3,0x27,0xe1,0xfb,0x27,0xdd,0xeb,0xdb,0xe4,0x5,0x1a,0x17,0xfc,0x24,0x12,0x15,0xef,0x1e,0xe4,0x10,0xfe,0x14,0xda,0x1c,0xf8,0xf3,0xf1,0xef,0xe2,0xf3,0x9,0xe3,0xe9,0xed,0xe3,0xe4,0x15,0x7,0xb,0x4,0x1b,0x1a,0xfe,0xeb,0x1,0xde,0x21,0xe6,0xb,0xec,0x3,0x23,0xa,0x22,0x24,0x1e,0x27,0x3,0xe6,0x3,0x24,0xff,0xc0,0x11,0xf8,0xfc,0xf1,0x11,0xc,0xf5,0xe0,0xf3,0x7,0x17,0xe5,0xe8,0xed,0xfa,0xdc,0xe8,0x23,0xfb,0x7,0xdd,0xfb,0xfd,0x0,0x14,0x26,0x11,0x17,0xe7,0xf1,0x11,0xea,0x2,0x26,0x4,0x4,0x25,0x21,0x1d,0xa,0xdb,0x1d,0xdc,0x20,0x1,0xfa,0xe3,0x37,0xb,0xf1,0x1a,0x16,0xef,0x1c,0xe7,0x3,0xe0,0x16,0x2,0x3,0x21,0x18,0x9,0x2e,0xd9,0xe5,0x14,0xb,0xea,0x1a,0xfc,0xd8,0x13,0x0,0xc4,0xd8,0xec,0xd9,0xfe,0xd,0x19,0x20,0xd8,0xd6,0xe2,0x1f,0xe9,0xd7,0xca,0xe2,0xdd,0xc6,0x13,0xe7,0x4,0x3e,0x0,0x1,0x14,0xc7,0xdb,0xe7,0x15,0x15,0xf5,0x6,0xd6,0x1a,0xdc,0x9,0x22,0xfe,0x8,0x2,0x13,0xef,0x19,0x1e,0xe2,0x9,0xfd,0xf3,0x14,0xdd,0xda,0x20,0xd9,0xf,0xe3,0xf9,0xf7,0xee,0xe9,0x24,0xe6,0x29,0x0,0x7,0x16,0xe2,0x1e,0xd,0x23,0xd3,0xdd,0xf7,0x14,0xfa,0x8,0x22,0x26,0x21,0x9,0x8,0xf,0xb,0xe0,0x12,0xf4,0x7f,0xdc,0x58,0xe5,0x26,0x0,0x0,0x0,0x0,0x86,0xff,0xff,0xff,0x4,0x0,0x0,0x0,0x40,0x0,0x0,0x0,0x27,0xfd,0xff,0xff,0xa2,0x7,0x0,0x0,0x62,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0xf1,0x0,0x0,0x0,0x29,0xfe,0xff,0xff,0xdd,0xff,0xff,0xff,0x9d,0xfc,0xff,0xff,0x3b,0x2,0x0,0x0,0x45,0x2,0x0,0x0,0xa4,0x10,0x0,0x0,0x67,0xf,0x0,0x0,0x4f,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x87,0xfc,0xff,0xff,0x11,0xec,0xff,0xff,0x0,0x0,0x0,0x0,0xd6,0xff,0xff,0xff,0x4,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xd9,0x3b,0x27,0x15,0x1c,0xe0,0xde,0xdd,0xf,0x1b,0xc5,0xd7,0x12,0xdd,0xf9,0x7f,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x6,0x0,0x8,0x0,0x4,0x0,0x6,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xad,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x84,0xff,0xff,0xff,0x88,0xff,0xff,0xff,0xf,0x0,0x0,0x0,0x4d,0x4c,0x49,0x52,0x20,0x43,0x6f,0x6e,0x76,0x65,0x72,0x74,0x65,0x64,0x2e,0x0,0x1,0x0,0x0,0x0,0x14,0x0,0x0,0x0,0x0,0x0,0xe,0x0,0x18,0x0,0x4,0x0,0x8,0x0,0xc,0x0,0x10,0x0,0x14,0x0,0xe,0x0,0x0,0x0,0x4,0x1,0x0,0x0,0xf8,0x0,0x0,0x0,0xec,0x0,0x0,0x0,0x14,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x6d,0x61,0x69,0x6e,0x0,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x94,0x0,0x0,0x0,0x4c,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xca,0xff,0xff,0xff,0x0,0x0,0x0,0x8,0x1c,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x4,0x0,0x4,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x9,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0xe,0x0,0x14,0x0,0x0,0x0,0x8,0x0,0xc,0x0,0x7,0x0,0x10,0x0,0xe,0x0,0x0,0x0,0x0,0x0,0x0,0x8,0x1c,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xba,0xff,0xff,0xff,0x0,0x0,0x0,0x1,0x1,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x7,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x0,0x0,0xe,0x0,0x16,0x0,0x0,0x0,0x8,0x0,0xc,0x0,0x7,0x0,0x10,0x0,0xe,0x0,0x0,0x0,0x0,0x0,0x0,0x8,0x24,0x0,0x0,0x0,0x18,0x0,0x0,0x0,0xc,0x0,0x0,0x0,0x0,0x0,0x6,0x0,0x8,0x0,0x7,0x0,0x6,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x1,0x0,0x0,0x0,0x7,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x6,0x0,0x0,0x0,0x5,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x9,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xa,0x0,0x0,0x0,0x9c,0x4,0x0,0x0,0xc,0x4,0x0,0x0,0x88,0x3,0x0,0x0,0x14,0x3,0x0,0x0,0xa8,0x2,0x0,0x0,0x34,0x2,0x0,0x0,0xd0,0x1,0x0,0x0,0x2c,0x1,0x0,0x0,0x80,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xa2,0xfb,0xff,0xff,0x0,0x0,0x9,0x1,0x64,0x0,0x0,0x0,0xa,0x0,0x0,0x0,0x3c,0x0,0x0,0x0,0x14,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xff,0xff,0xff,0xff,0x1,0x0,0x0,0x0,0x8c,0xfb,0xff,0xff,0x18,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x5,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0xcb,0xd6,0x7,0x3c,0x19,0x0,0x0,0x0,0x53,0x74,0x61,0x74,0x65,0x66,0x75,0x6c,0x50,0x61,0x72,0x74,0x69,0x74,0x69,0x6f,0x6e,0x65,0x64,0x43,0x61,0x6c,0x6c,0x3a,0x30,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1a,0xfc,0xff,0xff,0x0,0x0,0x9,0x1,0x94,0x0,0x0,0x0,0x9,0x0,0x0,0x0,0x38,0x0,0x0,0x0,0x14,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xff,0xff,0xff,0xff,0x10,0x0,0x0,0x0,0x4,0xfc,0xff,0xff,0x14,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x80,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x1,0x0,0x0,0x0,0x5d,0x4f,0x51,0x3c,0x4c,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x3b,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x52,0x65,0x6c,0x75,0x3b,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x0,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xc2,0xfc,0xff,0xff,0x0,0x0,0x9,0x1,0x8c,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x38,0x0,0x0,0x0,0x14,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xff,0xff,0xff,0xff,0x10,0x0,0x0,0x0,0xac,0xfc,0xff,0xff,0x14,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x80,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x1,0x0,0x0,0x0,0x9f,0x51,0x5a,0x3c,0x46,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x3b,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x52,0x65,0x6c,0x75,0x3b,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xee,0xfd,0xff,0xff,0x0,0x0,0x9,0x1,0x4c,0x0,0x0,0x0,0x7,0x0,0x0,0x0,0x28,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x3c,0xfd,0xff,0xff,0x14,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0xaa,0x59,0x84,0x3b,0x17,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x0,0x2,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x4e,0xfe,0xff,0xff,0x0,0x0,0x2,0x1,0x60,0x0,0x0,0x0,0x6,0x0,0x0,0x0,0x2c,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x9c,0xfd,0xff,0xff,0x18,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x55,0x5b,0xcf,0x38,0x27,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x2f,0x52,0x65,0x61,0x64,0x56,0x61,0x72,0x69,0x61,0x62,0x6c,0x65,0x4f,0x70,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xbe,0xfe,0xff,0xff,0x0,0x0,0x9,0x1,0x54,0x0,0x0,0x0,0x5,0x0,0x0,0x0,0x2c,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xc,0xfe,0xff,0xff,0x18,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x7f,0x7f,0x32,0x3c,0x19,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x26,0xff,0xff,0xff,0x0,0x0,0x2,0x1,0x60,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x28,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x74,0xfe,0xff,0xff,0x14,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x7b,0x39,0x18,0x39,0x29,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x31,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x2f,0x52,0x65,0x61,0x64,0x56,0x61,0x72,0x69,0x61,0x62,0x6c,0x65,0x4f,0x70,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x96,0xff,0xff,0xff,0x0,0x0,0x9,0x1,0x54,0x0,0x0,0x0,0x3,0x0,0x0,0x0,0x2c,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0xe4,0xfe,0xff,0xff,0x18,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x17,0x44,0x7c,0x3c,0x19,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x32,0x2f,0x4d,0x61,0x74,0x4d,0x75,0x6c,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x0,0x0,0x16,0x0,0x18,0x0,0x8,0x0,0x6,0x0,0xc,0x0,0x10,0x0,0x14,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x7,0x0,0x16,0x0,0x0,0x0,0x0,0x0,0x2,0x1,0x64,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x2c,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x64,0xff,0xff,0xff,0x18,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0xcb,0x41,0x4e,0x39,0x29,0x0,0x0,0x0,0x73,0x65,0x71,0x75,0x65,0x6e,0x74,0x69,0x61,0x6c,0x2f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x32,0x2f,0x42,0x69,0x61,0x73,0x41,0x64,0x64,0x2f,0x52,0x65,0x61,0x64,0x56,0x61,0x72,0x69,0x61,0x62,0x6c,0x65,0x4f,0x70,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x0,0x0,0x16,0x0,0x1c,0x0,0x8,0x0,0x6,0x0,0xc,0x0,0x10,0x0,0x14,0x0,0x0,0x0,0x0,0x0,0x18,0x0,0x7,0x0,0x16,0x0,0x0,0x0,0x0,0x0,0x9,0x1,0x74,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x48,0x0,0x0,0x0,0x20,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0xff,0xff,0xff,0xff,0x1,0x0,0x0,0x0,0xc,0x0,0xc,0x0,0x0,0x0,0x0,0x0,0x4,0x0,0x8,0x0,0xc,0x0,0x0,0x0,0x18,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x80,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x0,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x86,0x8a,0xc8,0x3c,0x1d,0x0,0x0,0x0,0x73,0x65,0x72,0x76,0x69,0x6e,0x67,0x5f,0x64,0x65,0x66,0x61,0x75,0x6c,0x74,0x5f,0x64,0x65,0x6e,0x73,0x65,0x5f,0x69,0x6e,0x70,0x75,0x74,0x3a,0x30,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x1,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0xc,0x0,0x10,0x0,0x7,0x0,0x0,0x0,0x8,0x0,0xc,0x0,0xc,0x0,0x0,0x0,0x0,0x0,0x0,0x9,0x4,0x0,0x0,0x0,0x9,0x0,0x0,0x0}; diff --git a/examples/hello_world/hello_world_int8_model_data.h b/examples/hello_world/hello_world_int8_model_data.h new file mode 100644 index 0000000..c0c87a5 --- /dev/null +++ b/examples/hello_world/hello_world_int8_model_data.h @@ -0,0 +1,4 @@ +#include + +extern const unsigned int g_hello_world_int8_model_data_size; +extern const unsigned char g_hello_world_int8_model_data[]; diff --git a/examples/hello_world/hello_world_test.cpp b/examples/hello_world/hello_world_test.cpp index 3326b1c..6218c4c 100644 --- a/examples/hello_world/hello_world_test.cpp +++ b/examples/hello_world/hello_world_test.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,118 +15,145 @@ limitations under the License. #include -#include "tensorflow/lite/micro/all_ops_resolver.h" -#include "model.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/core/c/common.h" +#include "hello_world_float_model_data.h" +#include "hello_world_int8_model_data.h" #include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/testing/micro_test.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" +#include "tensorflow/lite/micro/micro_profiler.h" +#include "tensorflow/lite/micro/recording_micro_interpreter.h" +#include "tensorflow/lite/micro/system_setup.h" #include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" -TF_LITE_MICRO_TESTS_BEGIN +namespace { +using HelloWorldOpResolver = tflite::MicroMutableOpResolver<1>; -TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { - // Define the input and the expected output - float x = 0.0f; - float y_true = sin(x); +TfLiteStatus RegisterOps(HelloWorldOpResolver& op_resolver) { + TF_LITE_ENSURE_STATUS(op_resolver.AddFullyConnected()); + return kTfLiteOk; +} +} // namespace + +TfLiteStatus ProfileMemoryAndLatency() { + tflite::MicroProfiler profiler; + HelloWorldOpResolver op_resolver; + TF_LITE_ENSURE_STATUS(RegisterOps(op_resolver)); + + // Arena size just a round number. The exact arena usage can be determined + // using the RecordingMicroInterpreter. + constexpr int kTensorArenaSize = 3000; + uint8_t tensor_arena[kTensorArenaSize]; + constexpr int kNumResourceVariables = 24; + + tflite::RecordingMicroAllocator* allocator( + tflite::RecordingMicroAllocator::Create(tensor_arena, kTensorArenaSize)); + tflite::RecordingMicroInterpreter interpreter( + tflite::GetModel(g_hello_world_float_model_data), op_resolver, allocator, + tflite::MicroResourceVariables::Create(allocator, kNumResourceVariables), + &profiler); + + TF_LITE_ENSURE_STATUS(interpreter.AllocateTensors()); + TFLITE_CHECK_EQ(interpreter.inputs_size(), 1); + interpreter.input(0)->data.f[0] = 1.f; + TF_LITE_ENSURE_STATUS(interpreter.Invoke()); + + MicroPrintf(""); // Print an empty new line + profiler.LogTicksPerTagCsv(); + + MicroPrintf(""); // Print an empty new line + interpreter.GetMicroAllocator().PrintAllocations(); + return kTfLiteOk; +} + +TfLiteStatus LoadFloatModelAndPerformInference() { + const tflite::Model* model = + ::tflite::GetModel(g_hello_world_float_model_data); + TFLITE_CHECK_EQ(model->version(), TFLITE_SCHEMA_VERSION); + + HelloWorldOpResolver op_resolver; + TF_LITE_ENSURE_STATUS(RegisterOps(op_resolver)); + + // Arena size just a round number. The exact arena usage can be determined + // using the RecordingMicroInterpreter. + constexpr int kTensorArenaSize = 3000; + uint8_t tensor_arena[kTensorArenaSize]; + + tflite::MicroInterpreter interpreter(model, op_resolver, tensor_arena, + kTensorArenaSize); + TF_LITE_ENSURE_STATUS(interpreter.AllocateTensors()); + + // Check if the predicted output is within a small range of the + // expected output + float epsilon = 0.05f; + constexpr int kNumTestValues = 4; + float golden_inputs[kNumTestValues] = {0.f, 1.f, 3.f, 5.f}; + + for (int i = 0; i < kNumTestValues; ++i) { + interpreter.input(0)->data.f[0] = golden_inputs[i]; + TF_LITE_ENSURE_STATUS(interpreter.Invoke()); + float y_pred = interpreter.output(0)->data.f[0]; + TFLITE_CHECK_LE(abs(sin(golden_inputs[i]) - y_pred), epsilon); + } - // Set up logging - tflite::MicroErrorReporter micro_error_reporter; + return kTfLiteOk; +} +TfLiteStatus LoadQuantModelAndPerformInference() { // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. - const tflite::Model* model = ::tflite::GetModel(g_model); - if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(µ_error_reporter, - "Model provided is schema version %d not equal " - "to supported version %d.\n", - model->version(), TFLITE_SCHEMA_VERSION); - } + const tflite::Model* model = + ::tflite::GetModel(g_hello_world_int8_model_data); + TFLITE_CHECK_EQ(model->version(), TFLITE_SCHEMA_VERSION); - // This pulls in all the operation implementations we need - tflite::AllOpsResolver resolver; + HelloWorldOpResolver op_resolver; + TF_LITE_ENSURE_STATUS(RegisterOps(op_resolver)); - constexpr int kTensorArenaSize = 2000; + // Arena size just a round number. The exact arena usage can be determined + // using the RecordingMicroInterpreter. + constexpr int kTensorArenaSize = 3000; uint8_t tensor_arena[kTensorArenaSize]; - // Build an interpreter to run the model with - tflite::MicroInterpreter interpreter(model, resolver, tensor_arena, - kTensorArenaSize, µ_error_reporter); - // Allocate memory from the tensor_arena for the model's tensors - TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); + tflite::MicroInterpreter interpreter(model, op_resolver, tensor_arena, + kTensorArenaSize); + + TF_LITE_ENSURE_STATUS(interpreter.AllocateTensors()); - // Obtain a pointer to the model's input tensor TfLiteTensor* input = interpreter.input(0); + TFLITE_CHECK_NE(input, nullptr); - // Make sure the input has the properties we expect - TF_LITE_MICRO_EXPECT_NE(nullptr, input); - // The property "dims" tells us the tensor's shape. It has one element for - // each dimension. Our input is a 2D tensor containing 1 element, so "dims" - // should have size 2. - TF_LITE_MICRO_EXPECT_EQ(2, input->dims->size); - // The value of each element gives the length of the corresponding tensor. - // We should expect two single element tensors (one is contained within the - // other). - TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[1]); - // The input is an 8 bit integer value - TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type); - - // Get the input quantization parameters - float input_scale = input->params.scale; - int input_zero_point = input->params.zero_point; - - // Quantize the input from floating-point to integer - int8_t x_quantized = x / input_scale + input_zero_point; - // Place the quantized input in the model's input tensor - input->data.int8[0] = x_quantized; - - // Run the model and check that it succeeds - TfLiteStatus invoke_status = interpreter.Invoke(); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); - - // Obtain a pointer to the output tensor and make sure it has the - // properties we expect. It should be the same as the input tensor. TfLiteTensor* output = interpreter.output(0); - TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size); - TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[1]); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type); + TFLITE_CHECK_NE(output, nullptr); - // Get the output quantization parameters float output_scale = output->params.scale; int output_zero_point = output->params.zero_point; - // Obtain the quantized output from model's output tensor - int8_t y_pred_quantized = output->data.int8[0]; - // Dequantize the output from integer to floating-point - float y_pred = (y_pred_quantized - output_zero_point) * output_scale; + // Check if the predicted output is within a small range of the + // expected output + float epsilon = 0.05; - // Check if the output is within a small range of the expected output - float epsilon = 0.05f; - TF_LITE_MICRO_EXPECT_NEAR(y_true, y_pred, epsilon); - - // Run inference on several more values and confirm the expected outputs - x = 1.f; - y_true = sin(x); - input->data.int8[0] = x / input_scale + input_zero_point; - interpreter.Invoke(); - y_pred = (output->data.int8[0] - output_zero_point) * output_scale; - TF_LITE_MICRO_EXPECT_NEAR(y_true, y_pred, epsilon); - - x = 3.f; - y_true = sin(x); - input->data.int8[0] = x / input_scale + input_zero_point; - interpreter.Invoke(); - y_pred = (output->data.int8[0] - output_zero_point) * output_scale; - TF_LITE_MICRO_EXPECT_NEAR(y_true, y_pred, epsilon); - - x = 5.f; - y_true = sin(x); - input->data.int8[0] = x / input_scale + input_zero_point; - interpreter.Invoke(); - y_pred = (output->data.int8[0] - output_zero_point) * output_scale; - TF_LITE_MICRO_EXPECT_NEAR(y_true, y_pred, epsilon); + constexpr int kNumTestValues = 4; + float golden_inputs_float[kNumTestValues] = {0.77, 1.57, 2.3, 3.14}; + + // The int8 values are calculated using the following formula + // (golden_inputs_float[i] / input->params.scale + input->params.scale) + int8_t golden_inputs_int8[kNumTestValues] = {-96, -63, -34, 0}; + + for (int i = 0; i < kNumTestValues; ++i) { + input->data.int8[0] = golden_inputs_int8[i]; + TF_LITE_ENSURE_STATUS(interpreter.Invoke()); + float y_pred = (output->data.int8[0] - output_zero_point) * output_scale; + TFLITE_CHECK_LE(abs(sin(golden_inputs_float[i]) - y_pred), epsilon); + } + + return kTfLiteOk; } -TF_LITE_MICRO_TESTS_END +int main(int argc, char* argv[]) { + tflite::InitializeTarget(); + TF_LITE_ENSURE_STATUS(ProfileMemoryAndLatency()); + TF_LITE_ENSURE_STATUS(LoadFloatModelAndPerformInference()); + TF_LITE_ENSURE_STATUS(LoadQuantModelAndPerformInference()); + MicroPrintf("~~~ALL TESTS PASSED~~~\n"); + return kTfLiteOk; +} diff --git a/examples/hello_world/main_functions.cpp b/examples/hello_world/main_functions.cpp index 22eb294..ef16104 100644 --- a/examples/hello_world/main_functions.cpp +++ b/examples/hello_world/main_functions.cpp @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,20 +13,18 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "main_functions.h" - -#include "tensorflow/lite/micro/all_ops_resolver.h" #include "constants.h" -#include "model.h" +#include "hello_world_float_model_data.h" +#include "main_functions.h" #include "output_handler.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_interpreter.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" +#include "tensorflow/lite/micro/system_setup.h" #include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" // Globals, used for compatibility with Arduino-style sketches. namespace { -tflite::ErrorReporter* error_reporter = nullptr; const tflite::Model* model = nullptr; tflite::MicroInterpreter* interpreter = nullptr; TfLiteTensor* input = nullptr; @@ -39,36 +37,37 @@ uint8_t tensor_arena[kTensorArenaSize]; // The name of this function is important for Arduino compatibility. void setup() { - // Set up logging. Google style is to avoid globals or statics because of - // lifetime uncertainty, but since this has a trivial destructor it's okay. - // NOLINTNEXTLINE(runtime-global-variables) - static tflite::MicroErrorReporter micro_error_reporter; - error_reporter = µ_error_reporter; + tflite::InitializeTarget(); // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. - model = tflite::GetModel(g_model); + model = tflite::GetModel(g_hello_world_float_model_data); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, - "Model provided is schema version %d not equal " - "to supported version %d.", - model->version(), TFLITE_SCHEMA_VERSION); + MicroPrintf( + "Model provided is schema version %d not equal " + "to supported version %d.", + model->version(), TFLITE_SCHEMA_VERSION); return; } // This pulls in all the operation implementations we need. // NOLINTNEXTLINE(runtime-global-variables) - static tflite::AllOpsResolver resolver; + static tflite::MicroMutableOpResolver<1> resolver; + TfLiteStatus resolve_status = resolver.AddFullyConnected(); + if (resolve_status != kTfLiteOk) { + MicroPrintf("Op resolution failed"); + return; + } // Build an interpreter to run the model with. static tflite::MicroInterpreter static_interpreter( - model, resolver, tensor_arena, kTensorArenaSize, error_reporter); + model, resolver, tensor_arena, kTensorArenaSize); interpreter = &static_interpreter; // Allocate memory from the tensor_arena for the model's tensors. TfLiteStatus allocate_status = interpreter->AllocateTensors(); if (allocate_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed"); + MicroPrintf("AllocateTensors() failed"); return; } @@ -98,8 +97,7 @@ void loop() { // Run inference, and report any error TfLiteStatus invoke_status = interpreter->Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed on x: %f\n", - static_cast(x)); + MicroPrintf("Invoke failed on x: %f\n", static_cast(x)); return; } @@ -110,7 +108,7 @@ void loop() { // Output the results. A custom HandleOutput function can be implemented // for each supported hardware target. - HandleOutput(error_reporter, x, y); + HandleOutput(x, y); // Increment the inference_counter, and reset it if we have reached // the total number per cycle diff --git a/examples/hello_world/model.cpp b/examples/hello_world/model.cpp deleted file mode 100644 index f04a9f6..0000000 --- a/examples/hello_world/model.cpp +++ /dev/null @@ -1,237 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Automatically created from a TensorFlow Lite flatbuffer using the command: -// xxd -i model.tflite > model.cc - -// This is a standard TensorFlow Lite model file that has been converted into a -// C data array, so it can be easily compiled into a binary for devices that -// don't have a file system. - -// See train/README.md for a full description of the creation process. - -#include "model.h" - -// Keep model aligned to 8 bytes to guarantee aligned 64-bit accesses. -alignas(8) const unsigned char g_model[] = { - 0x1c, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x14, 0x00, 0x20, 0x00, - 0x1c, 0x00, 0x18, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x98, 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x1c, 0x03, 0x00, 0x00, - 0x2c, 0x03, 0x00, 0x00, 0x30, 0x09, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x60, 0xf7, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, - 0x44, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xbc, 0xff, 0xff, 0xff, - 0x09, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x64, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x34, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x76, 0xfd, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x0d, 0x00, 0x00, 0x00, 0x64, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x32, 0x5f, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x13, 0x00, 0x00, 0x00, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x50, 0x02, 0x00, 0x00, 0x48, 0x02, 0x00, 0x00, - 0x34, 0x02, 0x00, 0x00, 0xdc, 0x01, 0x00, 0x00, 0x8c, 0x01, 0x00, 0x00, - 0x6c, 0x01, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xfa, 0xfd, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x31, 0x2e, 0x35, 0x2e, 0x30, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0xfd, 0xff, 0xff, - 0x88, 0xfd, 0xff, 0xff, 0x8c, 0xfd, 0xff, 0xff, 0x22, 0xfe, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x21, 0xa5, 0x8b, 0xca, - 0x5e, 0x1d, 0xce, 0x42, 0x9d, 0xce, 0x1f, 0xb0, 0xdf, 0x54, 0x2f, 0x81, - 0x3e, 0xfe, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0xee, 0xfc, 0x00, 0xec, 0x05, 0x17, 0xef, 0xec, 0xe6, 0xf8, 0x03, 0x01, - 0x00, 0xfa, 0xf8, 0xf5, 0xdc, 0xeb, 0x27, 0x14, 0xf1, 0xde, 0xe2, 0xdb, - 0xf0, 0xde, 0x31, 0x06, 0x02, 0xe6, 0xee, 0xf9, 0x00, 0x16, 0x07, 0xe0, - 0xfe, 0xff, 0xe9, 0x06, 0xe7, 0xef, 0x81, 0x1b, 0x18, 0xea, 0xc9, 0x01, - 0x0f, 0x00, 0xda, 0xf7, 0x0e, 0xec, 0x13, 0x1f, 0x04, 0x13, 0xb4, 0xe6, - 0xfd, 0x06, 0xb9, 0xe0, 0x0d, 0xec, 0xf0, 0xde, 0xeb, 0xf7, 0x05, 0x26, - 0x1a, 0xe4, 0x6f, 0x1a, 0xea, 0x1e, 0x35, 0xdf, 0x1a, 0xf3, 0xf1, 0x19, - 0x0f, 0x03, 0x1b, 0xe1, 0xde, 0x13, 0xf6, 0x19, 0xff, 0xf6, 0x1b, 0x18, - 0xf0, 0x1c, 0xda, 0x1b, 0x1b, 0x20, 0xe5, 0x1a, 0xf5, 0xff, 0x96, 0x0b, - 0x00, 0x01, 0xcd, 0xde, 0x0d, 0xf6, 0x16, 0xe3, 0xed, 0xfc, 0x0e, 0xe9, - 0xfa, 0xeb, 0x5c, 0xfc, 0x1d, 0x02, 0x5b, 0xe2, 0xe1, 0xf5, 0x15, 0xec, - 0xf4, 0x00, 0x13, 0x05, 0xec, 0x0c, 0x1d, 0x14, 0x0e, 0xe7, 0x0b, 0xf4, - 0x19, 0x00, 0xd7, 0x05, 0x27, 0x02, 0x15, 0xea, 0xea, 0x02, 0x9b, 0x00, - 0x0c, 0xfa, 0xe8, 0xea, 0xfd, 0x00, 0x14, 0xfd, 0x0b, 0x02, 0xef, 0xee, - 0x06, 0xee, 0x01, 0x0d, 0x06, 0xe6, 0xf7, 0x11, 0xf7, 0x09, 0xf8, 0xf1, - 0x21, 0xff, 0x0e, 0xf3, 0xec, 0x12, 0x26, 0x1d, 0xf2, 0xe9, 0x28, 0x18, - 0xe0, 0xfb, 0xf3, 0xf4, 0x05, 0x1d, 0x1d, 0xfb, 0xfd, 0x1e, 0xfc, 0x11, - 0xe8, 0x07, 0x09, 0x03, 0x12, 0xf2, 0x36, 0xfb, 0xdc, 0x1c, 0xf9, 0xef, - 0xf3, 0xe7, 0x6f, 0x0c, 0x1d, 0x00, 0x45, 0xfd, 0x0e, 0xf0, 0x0b, 0x19, - 0x1a, 0xfa, 0xe0, 0x19, 0x1f, 0x13, 0x36, 0x1c, 0x12, 0xeb, 0x3b, 0x0c, - 0xb4, 0xcb, 0xe6, 0x13, 0xfa, 0xeb, 0xf1, 0x06, 0x1c, 0xfa, 0x18, 0xe5, - 0xeb, 0xcb, 0x0c, 0xf4, 0x4a, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x75, 0x1c, 0x11, 0xe1, 0x0c, 0x81, 0xa5, 0x42, - 0xfe, 0xd5, 0xd4, 0xb2, 0x61, 0x78, 0x19, 0xdf, 0x66, 0xff, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x77, 0x0b, 0x00, 0x00, 0x53, 0xf6, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x77, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xd3, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x72, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x07, 0x00, 0x00, - 0x67, 0xf5, 0xff, 0xff, 0x34, 0xf0, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0xb2, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xb5, 0x04, 0x00, 0x00, 0x78, 0x0a, 0x00, 0x00, - 0x2d, 0x06, 0x00, 0x00, 0x71, 0xf8, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x9a, 0x0a, 0x00, 0x00, 0xfe, 0xf7, 0xff, 0xff, 0x0e, 0x05, 0x00, 0x00, - 0xd4, 0x09, 0x00, 0x00, 0x47, 0xfe, 0xff, 0xff, 0xb6, 0x04, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xac, 0xf7, 0xff, 0xff, 0x4b, 0xf9, 0xff, 0xff, - 0x4a, 0x05, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x8c, 0xef, 0xff, 0xff, 0x84, 0xff, 0xff, 0xff, 0x88, 0xff, 0xff, 0xff, - 0x0f, 0x00, 0x00, 0x00, 0x4d, 0x4c, 0x49, 0x52, 0x20, 0x43, 0x6f, 0x6e, - 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x14, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, - 0xe0, 0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x6d, 0x61, 0x69, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x84, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x96, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xca, 0xff, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0xba, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x16, 0x00, 0x00, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x04, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, - 0x08, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x4c, 0x04, 0x00, 0x00, - 0xd0, 0x03, 0x00, 0x00, 0x68, 0x03, 0x00, 0x00, 0x0c, 0x03, 0x00, 0x00, - 0x98, 0x02, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0xb0, 0x01, 0x00, 0x00, - 0x24, 0x01, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xf0, 0xfb, 0xff, 0xff, 0x18, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x54, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x6c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0xdc, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x4a, 0xce, 0x0a, 0x3c, 0x01, 0x00, 0x00, 0x00, - 0x34, 0x84, 0x85, 0x3f, 0x01, 0x00, 0x00, 0x00, 0xc5, 0x02, 0x8f, 0xbf, - 0x1e, 0x00, 0x00, 0x00, 0x53, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, - 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x43, - 0x61, 0x6c, 0x6c, 0x3a, 0x30, 0x5f, 0x69, 0x6e, 0x74, 0x38, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xfc, 0xff, 0xff, 0x18, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x54, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x64, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x6c, 0xfc, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x93, 0xd0, 0xc0, 0x3b, 0x01, 0x00, 0x00, 0x00, - 0xc2, 0x0f, 0xc0, 0x3f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x74, 0x66, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, - 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x31, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x08, 0xfd, 0xff, 0xff, 0x18, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x64, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0xf4, 0xfc, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xe0, 0xdb, 0x47, 0x3c, 0x01, 0x00, 0x00, 0x00, 0x04, 0x14, 0x47, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, - 0x74, 0x66, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x50, 0x00, 0x00, 0x00, 0x6c, 0xfd, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xfb, 0x4b, 0x0b, 0x3c, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x84, 0x4b, 0x3f, 0x01, 0x00, 0x00, 0x00, - 0x63, 0x35, 0x8a, 0xbf, 0x0d, 0x00, 0x00, 0x00, 0x73, 0x74, 0x64, 0x2e, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x32, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x72, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x50, 0x00, 0x00, 0x00, - 0xdc, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x60, 0x01, 0x4f, 0x3c, 0x01, 0x00, 0x00, 0x00, 0x47, 0x6d, 0xb3, 0x3f, - 0x01, 0x00, 0x00, 0x00, 0x5d, 0x63, 0xcd, 0xbf, 0x0d, 0x00, 0x00, 0x00, - 0x73, 0x74, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, - 0x31, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xe2, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x50, 0x00, 0x00, 0x00, 0x4c, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xd5, 0x6b, 0x8a, 0x3b, 0x01, 0x00, 0x00, 0x00, - 0xab, 0x49, 0x01, 0x3f, 0x01, 0x00, 0x00, 0x00, 0xfd, 0x56, 0x09, 0xbf, - 0x0c, 0x00, 0x00, 0x00, 0x73, 0x74, 0x64, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x52, 0xff, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x3c, 0x00, 0x00, 0x00, 0x44, 0xff, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x28, 0xb3, 0xd9, 0x38, 0x0c, 0x00, 0x00, 0x00, - 0x64, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x32, 0x2f, 0x62, 0x69, 0x61, 0x73, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xaa, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x38, 0x00, 0x00, 0x00, - 0x9c, 0xff, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xdd, 0x9b, 0x21, 0x39, 0x0c, 0x00, 0x00, 0x00, - 0x64, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x33, 0x2f, 0x62, 0x69, 0x61, 0x73, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x14, 0x00, 0x13, 0x00, 0x0c, 0x00, - 0x08, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x48, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xf4, 0xd4, 0x51, 0x38, 0x0c, 0x00, 0x00, 0x00, 0x64, 0x65, 0x6e, 0x73, - 0x65, 0x5f, 0x34, 0x2f, 0x62, 0x69, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x1c, 0x00, - 0x18, 0x00, 0x17, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x2c, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x84, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x14, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x5d, 0x4f, 0xc9, 0x3c, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x86, 0xc8, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x64, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x32, 0x5f, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3a, 0x30, 0x5f, 0x69, 0x6e, 0x74, 0x38, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xd8, 0xff, 0xff, 0xff, - 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - 0x0c, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, - 0x0c, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09}; -const int g_model_len = 2488; diff --git a/examples/hello_world/model.h b/examples/hello_world/model.h deleted file mode 100644 index 488f47b..0000000 --- a/examples/hello_world/model.h +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Automatically created from a TensorFlow Lite flatbuffer using the command: -// xxd -i model.tflite > model.cc - -// This is a standard TensorFlow Lite model file that has been converted into a -// C data array, so it can be easily compiled into a binary for devices that -// don't have a file system. - -// See train/README.md for a full description of the creation process. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_HELLO_WORLD_MODEL_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_HELLO_WORLD_MODEL_H_ - -extern const unsigned char g_model[]; -extern const int g_model_len; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_HELLO_WORLD_MODEL_H_ diff --git a/examples/hello_world/output_handler.h b/examples/hello_world/output_handler.h index 14e9d70..8459d38 100644 --- a/examples/hello_world/output_handler.h +++ b/examples/hello_world/output_handler.h @@ -17,10 +17,9 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_EXAMPLES_HELLO_WORLD_OUTPUT_HANDLER_H_ #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/micro/micro_log.h" // Called by the main loop to produce some output based on the x and y values -void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value, - float y_value); +void HandleOutput(float x_value, float y_value); #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_HELLO_WORLD_OUTPUT_HANDLER_H_ diff --git a/examples/hello_world/output_handler_test.cpp b/examples/hello_world/output_handler_test.cpp index 1ddcdad..01ac571 100644 --- a/examples/hello_world/output_handler_test.cpp +++ b/examples/hello_world/output_handler_test.cpp @@ -20,12 +20,10 @@ limitations under the License. TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestCallability) { - tflite::MicroErrorReporter micro_error_reporter; - // This will have external side-effects (like printing to the debug console // or lighting an LED) that are hard to observe, so the most we can do is // make sure the call doesn't crash. - HandleOutput(µ_error_reporter, 0, 0); + HandleOutput(0, 0); } TF_LITE_MICRO_TESTS_END diff --git a/examples/hello_world/rp2/output_handler.cpp b/examples/hello_world/rp2/output_handler.cpp index c78ec4e..76a5560 100644 --- a/examples/hello_world/rp2/output_handler.cpp +++ b/examples/hello_world/rp2/output_handler.cpp @@ -61,8 +61,7 @@ void init_pwm_fade() { } // namespace -void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value, - float y_value) { +void HandleOutput(float x_value, float y_value) { // Do this only once static bool is_initialized = false; if (!is_initialized) { @@ -75,7 +74,7 @@ void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value, g_led_brightness = (int)(127.5f * (y_value + 1)); // Log the current brightness value for display in the console. - TF_LITE_REPORT_ERROR(error_reporter, "%d\n", g_led_brightness); + MicroPrintf("%d\n", g_led_brightness); // By default the sine wave is too fast to see in the LED, so slow // down the whole program deliberately so it's more visible. diff --git a/examples/keyword_benchmark/CMakeLists.txt b/examples/keyword_benchmark/CMakeLists.txt deleted file mode 100644 index 95929c0..0000000 --- a/examples/keyword_benchmark/CMakeLists.txt +++ /dev/null @@ -1,39 +0,0 @@ - -cmake_minimum_required(VERSION 3.12) - -project(keyword_benchmark C CXX ASM) -set(CMAKE_C_STANDARD 11) -set(CMAKE_CXX_STANDARD 11) - - -add_executable(keyword_benchmark "") - -target_include_directories(keyword_benchmark - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - keyword_benchmark - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(keyword_benchmark - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/benchmarks/keyword_benchmark.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h -) - -target_link_libraries( - keyword_benchmark - pico-tflmicro - hardware_pwm -) - -pico_add_extra_outputs(keyword_benchmark) - diff --git a/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_benchmark.cpp b/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_benchmark.cpp deleted file mode 100644 index 815be07..0000000 --- a/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_benchmark.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h" -#include "tensorflow/lite/micro/benchmarks/micro_benchmark.h" -#include "tensorflow/lite/micro/kernels/fully_connected.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" - -/* - * Keyword Spotting Benchmark for performance optimizations. The model used in - * this benchmark only serves as a reference. The values assigned to the model - * weights and parameters are not representative of the original model. - */ - -namespace { - -using KeywordBenchmarkRunner = MicroBenchmarkRunner; -using KeywordOpResolver = tflite::MicroMutableOpResolver<6>; - -constexpr int kRandomSeed = 42; - -// Create an area of memory to use for input, output, and intermediate arrays. -// Align arena to 16 bytes to avoid alignment warnings on certain platforms. -constexpr int kTensorArenaSize = 21 * 1024; -alignas(16) uint8_t tensor_arena[kTensorArenaSize]; - -uint8_t benchmark_runner_buffer[sizeof(KeywordBenchmarkRunner)]; -uint8_t op_resolver_buffer[sizeof(KeywordOpResolver)]; -KeywordBenchmarkRunner* benchmark_runner = nullptr; - -// Initialize benchmark runner instance explicitly to avoid global init order -// issues on Sparkfun. Use new since static variables within a method -// are automatically surrounded by locking, which breaks bluepill and stm32f4. -void CreateBenchmarkRunner() { - // We allocate the KeywordOpResolver from a global buffer because the object's - // lifetime must exceed that of the KeywordBenchmarkRunner object. - KeywordOpResolver* op_resolver = new (op_resolver_buffer) KeywordOpResolver(); - op_resolver->AddDequantize(); - op_resolver->AddFullyConnected(tflite::Register_FULLY_CONNECTED_INT8()); - op_resolver->AddQuantize(); - op_resolver->AddSoftmax(); - op_resolver->AddSvdf(); - - benchmark_runner = new (benchmark_runner_buffer) - KeywordBenchmarkRunner(g_keyword_scrambled_model_data, op_resolver, - tensor_arena, kTensorArenaSize); -} - -// Initializes keyword runner and sets random inputs. -void InitializeKeywordRunner() { - CreateBenchmarkRunner(); - benchmark_runner->SetRandomInput(kRandomSeed); -} - -// This method assumes InitializeKeywordRunner has already been run. -void KeywordRunNIerations(int iterations) { - for (int i = 0; i < iterations; i++) { - benchmark_runner->RunSingleIteration(); - } -} - -} // namespace - -TF_LITE_MICRO_BENCHMARKS_BEGIN - -TF_LITE_MICRO_BENCHMARK(InitializeKeywordRunner()); - -TF_LITE_MICRO_BENCHMARK(KeywordRunNIerations(1)); - -TF_LITE_MICRO_BENCHMARK(KeywordRunNIerations(10)); - -TF_LITE_MICRO_BENCHMARKS_END diff --git a/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cpp b/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cpp deleted file mode 100644 index 254e194..0000000 --- a/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cpp +++ /dev/null @@ -1,2845 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h" - -// Keep model aligned to 8 bytes to guarantee aligned 64-bit accesses. -alignas(8) const unsigned char g_keyword_scrambled_model_data[] = { - 0x1c, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xd0, 0x6e, 0x00, 0x00, 0x60, 0x83, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xbc, 0x6e, 0x00, 0x00, 0xac, 0x56, 0x00, 0x00, - 0x9c, 0x52, 0x00, 0x00, 0x8c, 0x51, 0x00, 0x00, 0x7c, 0x4d, 0x00, 0x00, - 0x2c, 0x4d, 0x00, 0x00, 0x1c, 0x49, 0x00, 0x00, 0x0c, 0x45, 0x00, 0x00, - 0xfc, 0x43, 0x00, 0x00, 0xec, 0x3f, 0x00, 0x00, 0x9c, 0x3f, 0x00, 0x00, - 0x8c, 0x3b, 0x00, 0x00, 0x7c, 0x37, 0x00, 0x00, 0x6c, 0x36, 0x00, 0x00, - 0x5c, 0x32, 0x00, 0x00, 0x0c, 0x32, 0x00, 0x00, 0xfc, 0x2d, 0x00, 0x00, - 0xec, 0x29, 0x00, 0x00, 0xdc, 0x28, 0x00, 0x00, 0xcc, 0x24, 0x00, 0x00, - 0x7c, 0x24, 0x00, 0x00, 0x6c, 0x22, 0x00, 0x00, 0x5c, 0x1a, 0x00, 0x00, - 0xcc, 0x19, 0x00, 0x00, 0xbc, 0x15, 0x00, 0x00, 0xac, 0x0d, 0x00, 0x00, - 0x1c, 0x0d, 0x00, 0x00, 0x0c, 0x09, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, - 0x6c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x2a, 0x91, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x34, 0xe1, 0x4f, 0xa1, 0x63, 0xa4, 0x62, 0xbf, 0x3e, 0x91, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xa3, 0xb2, 0x8f, 0xee, - 0x35, 0xe6, 0xf2, 0xcc, 0x68, 0xa0, 0x33, 0xc4, 0x7d, 0x4e, 0xbb, 0xa9, - 0x10, 0x32, 0x8e, 0x3d, 0x76, 0x14, 0x1c, 0x33, 0x0e, 0x77, 0xf7, 0xc8, - 0x7b, 0x45, 0xc7, 0xdb, 0xcf, 0x87, 0xc7, 0x70, 0xa9, 0x29, 0xfd, 0x70, - 0x32, 0x96, 0x35, 0x7d, 0xe9, 0xac, 0x6d, 0x9b, 0xfd, 0xe4, 0xbc, 0x4a, - 0x57, 0xcd, 0x43, 0xcc, 0x73, 0x72, 0xdf, 0x07, 0x68, 0xc5, 0x67, 0xbd, - 0x8a, 0x91, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, - 0xb0, 0xfb, 0x5f, 0xdf, 0x0e, 0xb9, 0xa2, 0xfd, 0x66, 0x86, 0x13, 0x1b, - 0x6d, 0x1d, 0x53, 0xdb, 0x83, 0xbf, 0x44, 0x29, 0x3f, 0x93, 0xee, 0x42, - 0x9a, 0xf4, 0x31, 0x6e, 0xc3, 0x15, 0x7e, 0x48, 0x72, 0x50, 0xc3, 0x53, - 0xef, 0x35, 0x1f, 0xc2, 0x29, 0x42, 0xb4, 0xd7, 0x4b, 0xd7, 0x98, 0x60, - 0xb9, 0x3e, 0xbb, 0x31, 0x35, 0xc3, 0xf6, 0x15, 0x7a, 0x9a, 0x2c, 0xfd, - 0xff, 0x04, 0xd9, 0x04, 0x57, 0x52, 0xae, 0x99, 0xa3, 0x95, 0xae, 0x6a, - 0x66, 0x52, 0x5f, 0x91, 0x17, 0x83, 0x0d, 0x27, 0x16, 0x02, 0x06, 0x64, - 0x80, 0x05, 0x99, 0x1c, 0x6c, 0xab, 0xb1, 0xa1, 0x0e, 0x44, 0x1f, 0x63, - 0xe9, 0xc1, 0xab, 0x8d, 0x08, 0x79, 0x56, 0xe0, 0x90, 0xa5, 0xb8, 0x3b, - 0xc4, 0x1e, 0xa5, 0x1f, 0x64, 0xe4, 0x0b, 0x72, 0x62, 0x19, 0x5f, 0x66, - 0xc0, 0x9b, 0x7b, 0xc4, 0xe5, 0x9f, 0x82, 0xa7, 0x16, 0x92, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x3e, 0x3d, 0xf4, 0x61, - 0x45, 0x2a, 0x48, 0x53, 0x1f, 0x22, 0x74, 0x65, 0xea, 0x5a, 0x00, 0x83, - 0x68, 0xf9, 0xbb, 0xa3, 0xc2, 0x1a, 0x8f, 0xe1, 0xfb, 0x76, 0x6a, 0xe9, - 0x1a, 0x0e, 0x4d, 0x32, 0xc6, 0xf3, 0x8d, 0x85, 0x54, 0xa1, 0xe9, 0xb8, - 0x35, 0xee, 0xba, 0x53, 0x40, 0xa2, 0xea, 0x7f, 0xc3, 0x99, 0x71, 0x17, - 0xdd, 0xd5, 0xfe, 0xdf, 0x5e, 0x15, 0xa0, 0x73, 0xf8, 0x78, 0x49, 0x73, - 0xcc, 0xf0, 0x18, 0x12, 0x06, 0x81, 0xd6, 0x19, 0x2c, 0xa8, 0xd7, 0x80, - 0x19, 0x19, 0xbf, 0x1e, 0x50, 0xb1, 0xfb, 0xb3, 0xa6, 0x56, 0x6f, 0x52, - 0xa6, 0xc0, 0xdd, 0x3f, 0xbb, 0x13, 0x6e, 0x04, 0xdf, 0x79, 0xca, 0x8b, - 0xa5, 0x9c, 0xa1, 0x78, 0x49, 0xca, 0xe5, 0x29, 0xbb, 0x29, 0x7c, 0x96, - 0xc6, 0x29, 0x06, 0x99, 0xec, 0x50, 0xd1, 0xe8, 0x9b, 0xb7, 0x53, 0xd2, - 0x36, 0x89, 0xb1, 0x5c, 0x38, 0xf4, 0x2f, 0xa1, 0xda, 0x6f, 0xd8, 0xd1, - 0x62, 0xd2, 0xd4, 0x97, 0xce, 0xf1, 0xbd, 0x73, 0x2d, 0x92, 0xdb, 0x62, - 0x0c, 0xb0, 0x77, 0xed, 0x32, 0x3a, 0xfc, 0x59, 0x94, 0xef, 0x2b, 0x48, - 0x60, 0xb2, 0x82, 0xa2, 0xb6, 0x51, 0xdb, 0x51, 0x47, 0x99, 0x4c, 0x50, - 0x93, 0x53, 0x9d, 0xa9, 0x3c, 0x94, 0x34, 0x9f, 0xa6, 0x3e, 0x4f, 0x87, - 0xd4, 0xa0, 0x40, 0xeb, 0x7b, 0xfa, 0x1b, 0x7d, 0x03, 0xa8, 0xf8, 0x8b, - 0xa5, 0x32, 0x3a, 0xaf, 0x7e, 0x6b, 0x25, 0x08, 0x97, 0x71, 0x8d, 0x0c, - 0x30, 0xc9, 0xa7, 0x23, 0xe3, 0x51, 0xb3, 0xf2, 0x86, 0xad, 0x12, 0xe2, - 0x79, 0x94, 0x7f, 0xf3, 0xf7, 0x88, 0x67, 0x3e, 0x8e, 0x8e, 0x04, 0x5e, - 0x4f, 0x01, 0x6f, 0x1d, 0x78, 0x42, 0x9e, 0x47, 0x81, 0xdf, 0x03, 0x39, - 0x3d, 0x9b, 0xbd, 0xb6, 0x06, 0x21, 0x82, 0xfe, 0xf2, 0x50, 0xe1, 0x14, - 0xbc, 0xe3, 0x5e, 0xe1, 0xbd, 0x8f, 0xfa, 0x35, 0x31, 0x4e, 0x66, 0xeb, - 0x67, 0x49, 0x1c, 0x07, 0x88, 0xb6, 0x22, 0x0c, 0xeb, 0xd9, 0x9f, 0x9b, - 0x8b, 0xe0, 0x9c, 0x3c, 0xf7, 0x91, 0xab, 0x98, 0x5b, 0x0e, 0x09, 0xdd, - 0xe3, 0x0b, 0x14, 0x55, 0xe9, 0xe4, 0x42, 0xd8, 0xce, 0xd7, 0xfd, 0x4c, - 0x20, 0x9f, 0x44, 0x93, 0xa6, 0x17, 0x8a, 0x68, 0x8f, 0xec, 0x62, 0xd1, - 0x97, 0x9c, 0xcc, 0xc4, 0xd9, 0x42, 0xda, 0xf1, 0x34, 0x04, 0xc6, 0xb6, - 0x0f, 0xc7, 0xe6, 0x2d, 0x26, 0x6e, 0x6f, 0x92, 0x7e, 0xd9, 0xd4, 0x40, - 0xc6, 0x70, 0xfa, 0x12, 0x2a, 0x1b, 0xbc, 0x50, 0xeb, 0x3b, 0x24, 0x96, - 0x8d, 0x7c, 0xae, 0xbe, 0xc3, 0x27, 0xce, 0x97, 0xcf, 0xcd, 0x10, 0x13, - 0x01, 0xc6, 0x48, 0x6a, 0x99, 0x38, 0x79, 0xb9, 0x1c, 0xc9, 0x09, 0xac, - 0x96, 0x8c, 0xf7, 0x82, 0x8f, 0xb8, 0x17, 0x94, 0x2c, 0x5f, 0x40, 0xcc, - 0x80, 0xf4, 0x9f, 0xaa, 0xcb, 0x83, 0x13, 0x7b, 0x3a, 0x78, 0x0a, 0x9f, - 0x79, 0x9e, 0xfc, 0x0e, 0x8f, 0x98, 0x60, 0x39, 0x86, 0x44, 0x8e, 0x4b, - 0xc4, 0xad, 0xe6, 0x98, 0x92, 0x08, 0x84, 0x48, 0x8f, 0x1d, 0x78, 0x10, - 0x9e, 0xf7, 0xb8, 0x61, 0x65, 0x46, 0xdb, 0x4a, 0xcf, 0xc5, 0x37, 0xe3, - 0x77, 0x76, 0xcf, 0x0a, 0x7e, 0x72, 0x3f, 0xe4, 0x51, 0x30, 0x28, 0x57, - 0x13, 0xfd, 0xdb, 0x7e, 0xd6, 0xa3, 0xdd, 0x64, 0xdd, 0x00, 0xd0, 0x7f, - 0xbc, 0x48, 0x1d, 0xaf, 0xde, 0x0e, 0x45, 0xc4, 0xc9, 0xfa, 0xf6, 0xb2, - 0xb7, 0x9a, 0x42, 0x8b, 0x18, 0x08, 0xed, 0xdb, 0xa9, 0xc3, 0x32, 0xf1, - 0x9c, 0xcf, 0x16, 0x74, 0x57, 0xce, 0xe9, 0x44, 0x21, 0xdb, 0x8a, 0x45, - 0x89, 0x70, 0x41, 0x5c, 0xbf, 0x10, 0xdf, 0x83, 0x4a, 0xe4, 0x4c, 0xd8, - 0xc9, 0x2e, 0x5b, 0xa3, 0x05, 0xed, 0x73, 0xb1, 0xb0, 0xb7, 0xc4, 0xd7, - 0x0d, 0xea, 0xf6, 0xb4, 0xc1, 0x5e, 0x12, 0x54, 0x30, 0x73, 0x5c, 0x93, - 0xd9, 0xf7, 0xc9, 0x24, 0x43, 0x8f, 0x4f, 0x8e, 0x94, 0x95, 0xb6, 0xfd, - 0xa3, 0x14, 0x42, 0x50, 0xb8, 0x66, 0xfb, 0xc4, 0xed, 0x72, 0xcf, 0x7b, - 0xa9, 0x73, 0xeb, 0xc4, 0x4a, 0x05, 0xea, 0xb4, 0x47, 0xca, 0x21, 0x56, - 0x28, 0xa8, 0x87, 0xb8, 0x87, 0x0b, 0xe3, 0x8d, 0xfd, 0x70, 0xf7, 0x33, - 0x76, 0xf0, 0x3d, 0xa4, 0x3b, 0x83, 0xab, 0x14, 0x01, 0xe1, 0xb0, 0xa9, - 0x44, 0xe8, 0xd7, 0x50, 0x26, 0x0b, 0xbb, 0x2d, 0x57, 0x39, 0x82, 0x7c, - 0x71, 0xd8, 0x12, 0xaf, 0xf3, 0x9f, 0x46, 0xbd, 0x62, 0xd6, 0x61, 0xf5, - 0xb7, 0x04, 0x94, 0xbf, 0x87, 0xea, 0xc4, 0xc4, 0x33, 0xcf, 0x36, 0x3b, - 0x4f, 0xc7, 0x71, 0xf1, 0x98, 0xe6, 0xb0, 0x96, 0x25, 0xd7, 0xac, 0x75, - 0xfc, 0x92, 0xe0, 0x69, 0x72, 0x37, 0x8d, 0x40, 0x31, 0xaa, 0x2c, 0x86, - 0xfb, 0x95, 0x3f, 0x9c, 0x23, 0xd4, 0x39, 0x99, 0xff, 0xea, 0x95, 0x79, - 0xb9, 0x2e, 0xb0, 0x33, 0xf1, 0xe8, 0xd0, 0x42, 0xb5, 0x70, 0x5c, 0xca, - 0x69, 0x48, 0x28, 0x23, 0x58, 0xb4, 0x07, 0xfc, 0x3e, 0x15, 0x29, 0x00, - 0xa9, 0x22, 0x44, 0x70, 0xd0, 0xc7, 0x01, 0x0d, 0x3e, 0xfc, 0x57, 0xb7, - 0x54, 0x3a, 0xc3, 0x43, 0xd6, 0x2f, 0x55, 0x09, 0x52, 0x4a, 0x6b, 0x8e, - 0x4c, 0x82, 0xbb, 0x4e, 0x3e, 0x38, 0xe1, 0x9e, 0x72, 0x83, 0xec, 0x40, - 0xf5, 0xf7, 0x0e, 0x3c, 0x24, 0xed, 0xda, 0xf2, 0x39, 0x6c, 0xad, 0xeb, - 0xff, 0xfb, 0x4a, 0x38, 0x50, 0x49, 0x28, 0x3d, 0x05, 0xb2, 0x98, 0x44, - 0x2b, 0x61, 0xa2, 0x9b, 0x3a, 0x3c, 0xad, 0xd9, 0x8c, 0xef, 0x3c, 0x72, - 0x50, 0x74, 0x13, 0x80, 0xc4, 0x7e, 0x6e, 0xf3, 0xc9, 0xdf, 0x63, 0xf6, - 0x41, 0xb2, 0x08, 0x78, 0x9b, 0x7c, 0xa9, 0x13, 0xd1, 0x21, 0xe7, 0x5e, - 0x6a, 0x0d, 0x64, 0xf7, 0x52, 0x75, 0xf2, 0x80, 0x69, 0xbe, 0x43, 0xf8, - 0xd4, 0xad, 0x49, 0xfc, 0x97, 0x76, 0x1c, 0xb6, 0x43, 0x9e, 0xcb, 0x45, - 0x4d, 0x75, 0x07, 0xae, 0xdb, 0xbf, 0xf5, 0x8a, 0xeb, 0xb9, 0x6b, 0x12, - 0x06, 0xbf, 0x94, 0xad, 0x77, 0x29, 0xb1, 0xae, 0x24, 0x9b, 0x4d, 0xdc, - 0xe1, 0x5e, 0xd7, 0x57, 0xec, 0xd1, 0xd8, 0xad, 0xf0, 0x06, 0x08, 0x43, - 0x33, 0x99, 0xd2, 0x04, 0xfc, 0xc8, 0xf6, 0x53, 0x3d, 0x73, 0xd4, 0x36, - 0xd3, 0x8e, 0x4a, 0xcd, 0xb1, 0xe9, 0xcb, 0x3a, 0x5f, 0x54, 0xbc, 0xde, - 0x16, 0xa2, 0x85, 0xde, 0x35, 0x27, 0x99, 0x32, 0x4f, 0xb9, 0x2c, 0x16, - 0xa2, 0x6e, 0xae, 0x75, 0x60, 0x77, 0xe9, 0x08, 0x0f, 0x08, 0xc4, 0xd0, - 0x62, 0xc7, 0xd2, 0x1f, 0x3b, 0x29, 0xdd, 0xb7, 0xea, 0xa3, 0x58, 0xaf, - 0x4c, 0x05, 0xd2, 0x82, 0x6a, 0xe0, 0xc4, 0xe9, 0x70, 0x7e, 0xf2, 0xca, - 0x82, 0x6a, 0xae, 0xc1, 0x9a, 0x42, 0x5d, 0x46, 0x4a, 0xb7, 0x8f, 0x4d, - 0x33, 0xfe, 0x6f, 0x47, 0xb5, 0x49, 0xb3, 0x89, 0x51, 0x31, 0x74, 0x68, - 0x14, 0xda, 0x0a, 0x41, 0x3d, 0x1f, 0x8e, 0x30, 0x8c, 0x77, 0xd1, 0xa9, - 0x36, 0x41, 0x78, 0x34, 0xb7, 0x7e, 0x4e, 0x7a, 0x77, 0x12, 0x43, 0x97, - 0x43, 0xba, 0xd6, 0x28, 0x14, 0x2a, 0x9f, 0x98, 0xb4, 0x39, 0x08, 0x5c, - 0xb7, 0xb8, 0x03, 0x63, 0x62, 0x68, 0xc6, 0x9a, 0x4d, 0xf5, 0xdc, 0x7c, - 0x0f, 0x7e, 0x77, 0xdc, 0x85, 0x53, 0x31, 0x8c, 0x53, 0x8b, 0x27, 0xc4, - 0xb7, 0x3d, 0xd0, 0x94, 0x9b, 0x7e, 0x59, 0x59, 0x03, 0x09, 0x8c, 0x30, - 0x70, 0x7d, 0x9c, 0x73, 0x89, 0x6c, 0x5f, 0xbf, 0xf9, 0xc7, 0x72, 0x76, - 0x12, 0x98, 0xe3, 0xbe, 0xc3, 0x67, 0xdf, 0xa1, 0x76, 0xa3, 0xec, 0x44, - 0x30, 0x70, 0x2f, 0x6a, 0x86, 0x28, 0xb9, 0x9d, 0x7f, 0x93, 0xf2, 0x4a, - 0x34, 0x48, 0x1f, 0x2e, 0x2e, 0x95, 0x88, 0xdb, 0x1f, 0x2c, 0x19, 0x46, - 0x2e, 0x91, 0x5f, 0x81, 0x0d, 0x08, 0x9d, 0x03, 0x0b, 0xaf, 0x59, 0x0a, - 0x41, 0xad, 0x4d, 0x6c, 0x09, 0x0e, 0x9f, 0xd1, 0xc4, 0xdb, 0xac, 0x59, - 0x27, 0x04, 0x1c, 0x73, 0xe9, 0xf3, 0xe8, 0x54, 0xd9, 0x11, 0x31, 0xb2, - 0xed, 0x2d, 0x8c, 0xeb, 0x99, 0x26, 0x48, 0x9e, 0xac, 0x88, 0x96, 0xcb, - 0x19, 0x49, 0xfa, 0x4a, 0x82, 0xd5, 0x5d, 0xb8, 0x0f, 0x22, 0x3f, 0xb6, - 0x5c, 0x02, 0x2a, 0xb9, 0xd9, 0xfe, 0x4d, 0x9d, 0xdb, 0x85, 0x90, 0x19, - 0x7f, 0x1a, 0x44, 0xa3, 0x74, 0x68, 0xbf, 0xa2, 0x3b, 0xb4, 0x3b, 0xeb, - 0xab, 0x99, 0xc2, 0x46, 0x50, 0x7e, 0xec, 0xa9, 0xb4, 0x86, 0xfa, 0x50, - 0xcb, 0x71, 0x7e, 0x75, 0xa5, 0xca, 0xa6, 0x2f, 0x40, 0x1d, 0xa1, 0x4a, - 0x5c, 0x91, 0xd7, 0x2a, 0xa6, 0x17, 0x11, 0x4d, 0x19, 0x2b, 0xb3, 0x0f, - 0xf0, 0xb3, 0x06, 0x70, 0x51, 0x5c, 0x52, 0x8c, 0xdf, 0xe3, 0x19, 0x92, - 0x08, 0x40, 0xa2, 0xb4, 0xc0, 0xf2, 0xe8, 0x44, 0xcc, 0x36, 0xaa, 0xf9, - 0xf8, 0xfc, 0x2d, 0x83, 0x79, 0xc6, 0x58, 0xc1, 0xdf, 0x32, 0xb7, 0xde, - 0x0f, 0x3e, 0xc0, 0xa8, 0x7e, 0xeb, 0xf2, 0x30, 0x16, 0xdf, 0x38, 0xcb, - 0x69, 0xd9, 0x44, 0x0d, 0x44, 0xf4, 0x45, 0x9c, 0x81, 0xc8, 0xe7, 0x06, - 0xae, 0x95, 0xaf, 0xff, 0x17, 0x3b, 0x1c, 0x3f, 0xda, 0xa5, 0xf8, 0xfd, - 0x9c, 0xf1, 0x0a, 0xca, 0xda, 0xc0, 0xfa, 0x02, 0xc4, 0xce, 0x78, 0xfb, - 0x35, 0x8c, 0xfe, 0x55, 0xad, 0x0d, 0x9b, 0xeb, 0x10, 0xf1, 0x7b, 0xb1, - 0x09, 0xf8, 0xef, 0xfc, 0xde, 0x7a, 0x69, 0x74, 0x76, 0xef, 0x91, 0x64, - 0x33, 0xc4, 0x08, 0x15, 0x73, 0x85, 0x56, 0xae, 0x9c, 0xf6, 0xdd, 0x55, - 0x19, 0x96, 0xe6, 0x41, 0x12, 0xc9, 0x87, 0x91, 0x9e, 0xc6, 0x18, 0xe8, - 0xbf, 0xa0, 0x59, 0xfd, 0x20, 0xab, 0xb5, 0xcf, 0x0f, 0x6e, 0x30, 0xd3, - 0xc5, 0x70, 0xf2, 0x50, 0xa4, 0x2a, 0xdf, 0xb0, 0x45, 0xfc, 0x82, 0x1a, - 0x3b, 0xfe, 0x0c, 0xad, 0x41, 0x95, 0xf1, 0xd6, 0x85, 0xa2, 0xc9, 0xff, - 0xbe, 0x3a, 0x64, 0x70, 0x43, 0xc0, 0xc5, 0xc8, 0x80, 0x11, 0x0d, 0x20, - 0xcd, 0xf2, 0xa2, 0xbb, 0x43, 0x68, 0x0e, 0xf4, 0x01, 0xb3, 0x73, 0x79, - 0x9f, 0x68, 0x41, 0x63, 0x3e, 0xda, 0xf9, 0xf4, 0x23, 0x57, 0x97, 0x84, - 0x99, 0xe8, 0x5e, 0xdb, 0xaa, 0x24, 0xab, 0x9c, 0x40, 0x83, 0xf9, 0x3f, - 0x4f, 0x5a, 0x53, 0xa6, 0xf1, 0xe8, 0x95, 0xcf, 0xcb, 0x50, 0x13, 0x51, - 0xa7, 0x8c, 0x71, 0x1d, 0xff, 0xcc, 0x66, 0xab, 0xff, 0xca, 0xc5, 0xc3, - 0x73, 0x45, 0xb7, 0x21, 0x1d, 0x65, 0x7a, 0xe5, 0x1f, 0x3f, 0x1a, 0x58, - 0x23, 0x28, 0xc8, 0xf3, 0xbf, 0x98, 0x25, 0xc0, 0x83, 0x68, 0xf0, 0x62, - 0x63, 0x90, 0xcf, 0x1f, 0x20, 0xb8, 0x04, 0x5c, 0xc4, 0x80, 0x5b, 0xf4, - 0x6d, 0xdc, 0xe9, 0xac, 0xd8, 0x13, 0x3b, 0x42, 0xf8, 0x4e, 0xa2, 0x1c, - 0xce, 0x3f, 0x8d, 0x15, 0xd3, 0x87, 0x1b, 0x44, 0x79, 0x52, 0x34, 0x4b, - 0x63, 0x4d, 0xbf, 0x95, 0xec, 0xae, 0xf9, 0xc6, 0x7b, 0x7b, 0x85, 0x8c, - 0x4f, 0x20, 0x58, 0x9d, 0x48, 0x03, 0x2f, 0x77, 0x2e, 0x8b, 0x6f, 0x66, - 0x76, 0xb9, 0xb8, 0xb7, 0x34, 0x5a, 0x63, 0x06, 0x85, 0x82, 0x5f, 0x23, - 0x8f, 0x8d, 0x0c, 0x92, 0x3b, 0xd2, 0x8a, 0x1b, 0x39, 0xee, 0x6a, 0xbc, - 0xf6, 0x94, 0x2a, 0xc6, 0x73, 0xa6, 0x99, 0x98, 0xdc, 0x96, 0xd7, 0xc1, - 0xfe, 0x9b, 0xc8, 0xfb, 0x86, 0x5a, 0xad, 0xce, 0xf8, 0xd5, 0x32, 0x62, - 0x96, 0x63, 0xaf, 0x4c, 0x4a, 0xae, 0xec, 0x26, 0x3d, 0x84, 0x69, 0x50, - 0x5f, 0x37, 0x9b, 0x29, 0xac, 0x15, 0x76, 0x3d, 0x33, 0x96, 0x06, 0xde, - 0xc1, 0x6d, 0xa2, 0xc7, 0xc3, 0x8a, 0x20, 0x2e, 0xf7, 0x08, 0x55, 0x83, - 0x23, 0x9c, 0x23, 0x2d, 0x3a, 0xa1, 0x32, 0xbc, 0x47, 0x48, 0xd5, 0x6a, - 0x71, 0xb9, 0xcc, 0x2d, 0x99, 0xa0, 0x37, 0x07, 0x46, 0x45, 0xbe, 0xf0, - 0x27, 0x5a, 0x25, 0x72, 0x58, 0x47, 0x6d, 0xbf, 0x23, 0xdc, 0x48, 0x44, - 0x45, 0x95, 0xb1, 0x62, 0xf1, 0x7e, 0x4c, 0x95, 0x1c, 0xb4, 0x17, 0x8b, - 0x59, 0x2e, 0xf3, 0x4f, 0x45, 0x3b, 0x5d, 0x67, 0x92, 0x52, 0xd8, 0xc1, - 0x91, 0xfa, 0x53, 0xaa, 0x87, 0xc0, 0xa7, 0xb0, 0x9f, 0x10, 0xe8, 0xac, - 0x45, 0x52, 0xbb, 0x17, 0xee, 0xf6, 0x18, 0xbe, 0x02, 0x70, 0xce, 0x79, - 0x66, 0x72, 0xf9, 0xf6, 0xca, 0x66, 0xff, 0xa4, 0x9a, 0xd9, 0xb7, 0x07, - 0xa9, 0xc1, 0x23, 0x7e, 0x7b, 0x9c, 0xe3, 0x02, 0x7a, 0xcc, 0xa3, 0x67, - 0xb7, 0xb0, 0x37, 0xba, 0xae, 0x12, 0xda, 0x48, 0x6e, 0x7f, 0xde, 0x5f, - 0x75, 0x15, 0xca, 0xd2, 0x46, 0xdd, 0xb0, 0x82, 0xbf, 0x6d, 0xe9, 0x51, - 0x66, 0xa5, 0x9e, 0x0c, 0xd5, 0x03, 0xbd, 0x97, 0x0e, 0x1b, 0x88, 0xf6, - 0x61, 0x5a, 0x8b, 0xe0, 0xdd, 0x3e, 0x59, 0x4c, 0x35, 0xfd, 0xb0, 0x3b, - 0x79, 0x8c, 0x1c, 0x96, 0x97, 0x35, 0x62, 0x36, 0x62, 0x4c, 0x4b, 0x46, - 0xb1, 0x21, 0xf7, 0xf0, 0x34, 0xdc, 0xd9, 0x9f, 0xf8, 0x53, 0x7d, 0xca, - 0xbc, 0x4d, 0xaf, 0xf4, 0xb7, 0x2f, 0xa7, 0x5d, 0x18, 0xf9, 0x3b, 0xa9, - 0xb0, 0xbb, 0xdf, 0xfa, 0x28, 0x2b, 0x58, 0xce, 0x46, 0x01, 0x3f, 0x76, - 0xf2, 0x39, 0x45, 0x8b, 0x3c, 0xda, 0x62, 0x2b, 0x6b, 0xe1, 0x5f, 0x14, - 0xfc, 0x79, 0x17, 0x2d, 0xe2, 0xe5, 0x8c, 0xc5, 0xde, 0x91, 0xfd, 0xf5, - 0x6d, 0x9b, 0x6b, 0xbb, 0xb0, 0x13, 0xae, 0xbe, 0x1e, 0xa8, 0x8f, 0x3c, - 0xfd, 0x24, 0xbe, 0xb8, 0x39, 0x80, 0x03, 0x06, 0x8b, 0xff, 0xca, 0x90, - 0x88, 0x0f, 0x45, 0xc4, 0xeb, 0x50, 0x52, 0xf5, 0x00, 0x8c, 0x16, 0x9d, - 0x26, 0xaa, 0xec, 0xb1, 0x44, 0xd6, 0xfe, 0x67, 0xa3, 0xc1, 0xec, 0x4a, - 0x12, 0xa6, 0x7c, 0x7c, 0xc3, 0x46, 0x1c, 0x64, 0x61, 0x67, 0xec, 0xce, - 0x1e, 0xa2, 0xb4, 0xdd, 0x6e, 0x7f, 0x02, 0x14, 0xf4, 0x1c, 0x17, 0xa7, - 0x31, 0x9f, 0xc2, 0xc6, 0xc0, 0x21, 0x41, 0x88, 0x61, 0xd8, 0xca, 0x06, - 0xa5, 0xe4, 0xef, 0xa4, 0xaa, 0x4d, 0xa3, 0xad, 0x5f, 0xd4, 0x0c, 0x6b, - 0x14, 0x38, 0x2e, 0xe8, 0x87, 0x5a, 0x68, 0x10, 0x51, 0xd8, 0xbb, 0xa6, - 0xd9, 0xdc, 0xd3, 0x7f, 0x1f, 0xea, 0xa8, 0xcc, 0x3f, 0x43, 0xa4, 0x04, - 0x95, 0xb4, 0xde, 0x2f, 0x07, 0x5d, 0x91, 0x1c, 0x8e, 0xc3, 0xbc, 0xaa, - 0x46, 0x8a, 0xa8, 0x42, 0xa7, 0x2c, 0x0f, 0x1f, 0xb3, 0xe2, 0x8a, 0x0b, - 0xa0, 0x3f, 0xfb, 0x87, 0x9e, 0x42, 0xa5, 0x60, 0xce, 0x5a, 0x54, 0x91, - 0x26, 0x51, 0xea, 0x81, 0x6f, 0xf1, 0x54, 0x93, 0xe7, 0xa0, 0xf8, 0x64, - 0xab, 0x1d, 0x0d, 0x9d, 0x64, 0x6a, 0xd5, 0x19, 0x03, 0xbb, 0x94, 0x7f, - 0x0a, 0xb8, 0x6b, 0x87, 0xc3, 0x1a, 0x38, 0xe5, 0xe8, 0xba, 0x13, 0x17, - 0xeb, 0x13, 0xcc, 0xac, 0xcb, 0x1f, 0x96, 0x4c, 0x3b, 0x18, 0xfb, 0xe8, - 0x5c, 0x54, 0xce, 0x1a, 0x91, 0x44, 0xf5, 0x49, 0x6c, 0x38, 0x2a, 0x92, - 0x8a, 0x0d, 0x3d, 0x08, 0xc2, 0x5f, 0x6c, 0xac, 0x48, 0xb3, 0xdc, 0x2e, - 0xa6, 0x5a, 0xa8, 0xee, 0x22, 0x9a, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x96, 0xc5, 0x3a, 0x4e, 0x42, 0x7d, 0x27, 0xce, - 0x44, 0x84, 0xf1, 0x67, 0x8c, 0xc5, 0xdd, 0x75, 0x3b, 0x8a, 0xed, 0x2e, - 0x29, 0x62, 0x7b, 0xb0, 0xe6, 0xa3, 0xb4, 0x61, 0x73, 0x10, 0xff, 0x0e, - 0x0c, 0x98, 0x74, 0xef, 0xbb, 0xc4, 0xca, 0x03, 0x88, 0xa4, 0x96, 0x61, - 0xef, 0x36, 0x6d, 0xa2, 0xb1, 0xc8, 0xf0, 0xac, 0xf1, 0xb2, 0x08, 0x56, - 0xc7, 0x99, 0xcf, 0xae, 0x0a, 0x37, 0x85, 0x60, 0x78, 0x2d, 0x14, 0xda, - 0xb1, 0xa7, 0x00, 0xb6, 0x00, 0x04, 0x76, 0x80, 0x0e, 0x9f, 0x2a, 0x30, - 0x8b, 0x85, 0xd9, 0xc1, 0xaf, 0xee, 0x27, 0x80, 0x20, 0xed, 0xef, 0x25, - 0x5c, 0x98, 0x6b, 0xcc, 0xf8, 0x72, 0xfb, 0x3f, 0x13, 0xe6, 0x9b, 0x47, - 0xee, 0xa1, 0x18, 0x55, 0xa0, 0x68, 0xbe, 0xd4, 0x21, 0x59, 0x72, 0xa8, - 0xa4, 0xd2, 0x33, 0x57, 0x50, 0xfc, 0x6b, 0xa8, 0x49, 0x1b, 0x74, 0xdb, - 0x5a, 0x16, 0xb8, 0x52, 0x0c, 0xda, 0xa0, 0xa3, 0xff, 0x33, 0x56, 0x82, - 0x0f, 0x0a, 0x90, 0x82, 0xee, 0xf1, 0x1b, 0xb3, 0x05, 0x44, 0x39, 0x01, - 0xf7, 0x1e, 0xff, 0xcb, 0xea, 0xd0, 0xb6, 0x20, 0xbc, 0x84, 0xb1, 0xf9, - 0xa2, 0xc1, 0x56, 0xe6, 0xfa, 0x47, 0xc9, 0xfd, 0x45, 0x77, 0x51, 0x8e, - 0x01, 0xe4, 0x17, 0x20, 0x6f, 0x99, 0xe3, 0x90, 0x2f, 0xcc, 0xaf, 0xd9, - 0x61, 0x32, 0x91, 0x62, 0x58, 0xf4, 0x98, 0xf5, 0xf4, 0xeb, 0x13, 0xeb, - 0xdc, 0x8a, 0xac, 0xb2, 0x9e, 0xcf, 0xe7, 0xa7, 0xd4, 0x97, 0x22, 0x12, - 0x08, 0x10, 0x6d, 0x40, 0xea, 0x26, 0xea, 0x42, 0x29, 0x6e, 0x75, 0x62, - 0x47, 0x08, 0x17, 0xa8, 0x69, 0x0f, 0xf7, 0x35, 0x59, 0x23, 0x86, 0x83, - 0xfd, 0xb5, 0x61, 0x98, 0x9c, 0x4d, 0x37, 0xda, 0x9f, 0xfc, 0xfb, 0x16, - 0xb7, 0x6c, 0x52, 0xee, 0xa8, 0x9c, 0x3e, 0x93, 0x43, 0xc5, 0x2b, 0xd4, - 0xd0, 0x9f, 0x69, 0x2c, 0xc9, 0x1f, 0x2e, 0xdf, 0x5b, 0xe6, 0xc6, 0x5f, - 0x71, 0xd1, 0xd7, 0xb2, 0x8f, 0x3a, 0xba, 0x60, 0x75, 0x3d, 0x34, 0x41, - 0x43, 0x9b, 0x13, 0xc0, 0x3b, 0x30, 0xc5, 0xe9, 0x84, 0x81, 0xde, 0x85, - 0x4e, 0x65, 0x7b, 0x21, 0x37, 0xb8, 0xef, 0x24, 0x19, 0xaa, 0x26, 0x0c, - 0x27, 0xa7, 0xd9, 0x29, 0x47, 0x1a, 0x15, 0x42, 0x1e, 0x30, 0x79, 0x79, - 0x96, 0x09, 0x62, 0x26, 0xad, 0x98, 0x8b, 0xcb, 0x3d, 0xeb, 0x66, 0x83, - 0x77, 0xd9, 0x79, 0x4d, 0x05, 0x81, 0x72, 0xe9, 0xe0, 0x6f, 0x13, 0x00, - 0x7e, 0xa3, 0x92, 0x82, 0x1c, 0x90, 0x83, 0x4b, 0x15, 0x97, 0x0f, 0x92, - 0xe2, 0xd3, 0x3d, 0xd7, 0x6c, 0xb9, 0x60, 0x9a, 0x23, 0x52, 0xbe, 0x59, - 0xc9, 0x36, 0x9e, 0xf7, 0x77, 0x09, 0x79, 0x01, 0xcc, 0xec, 0x17, 0xd1, - 0x74, 0xbc, 0x58, 0x65, 0x45, 0x3c, 0x86, 0xf1, 0xbc, 0xbd, 0x95, 0x54, - 0x46, 0x45, 0x7b, 0x4c, 0xa2, 0xea, 0x2a, 0x6e, 0xa8, 0xd1, 0x66, 0x03, - 0xb2, 0x6a, 0xe0, 0xd3, 0x07, 0x8d, 0xe0, 0x09, 0x81, 0x42, 0xe3, 0x97, - 0xc4, 0xe7, 0x37, 0xc5, 0x82, 0xcf, 0xb1, 0xec, 0xba, 0xbd, 0xf4, 0xb6, - 0x41, 0xb2, 0xb8, 0xa6, 0x3a, 0x85, 0x4b, 0x4f, 0x46, 0x48, 0xe9, 0x9b, - 0x72, 0xf5, 0xb0, 0x64, 0x66, 0x75, 0x42, 0xb4, 0x00, 0xbe, 0x11, 0x6d, - 0x86, 0x93, 0x07, 0x50, 0xa7, 0xef, 0x55, 0x42, 0xcf, 0xe8, 0x61, 0xd0, - 0x9b, 0x11, 0x84, 0x8c, 0x74, 0xe4, 0xb8, 0x3f, 0x48, 0xb3, 0x61, 0xe3, - 0xea, 0x66, 0x86, 0x94, 0x95, 0x12, 0x77, 0x26, 0x75, 0x30, 0xb5, 0xd3, - 0x7a, 0xad, 0x2d, 0x58, 0x46, 0x1b, 0x4b, 0xd9, 0x2d, 0x1e, 0x0b, 0xff, - 0xd7, 0x03, 0x56, 0x3b, 0xbd, 0x65, 0xb0, 0xf9, 0xfe, 0x43, 0x1c, 0x9c, - 0x18, 0x82, 0x78, 0x5e, 0x06, 0x02, 0x21, 0x70, 0xb2, 0x7f, 0xb5, 0x63, - 0x71, 0x85, 0x95, 0x79, 0xae, 0x1e, 0xc6, 0x62, 0x7a, 0x7c, 0x63, 0x46, - 0x70, 0x1c, 0x58, 0x72, 0x1d, 0xde, 0xca, 0xb4, 0xfc, 0xc8, 0x56, 0x38, - 0x32, 0xf4, 0x0b, 0x56, 0x87, 0x6b, 0x5b, 0x53, 0xd2, 0x2c, 0x35, 0xef, - 0x5b, 0x33, 0x59, 0x13, 0x76, 0x82, 0x30, 0x80, 0x23, 0x10, 0x07, 0x4c, - 0x3f, 0xac, 0x9c, 0x58, 0x2d, 0x04, 0xe6, 0x6a, 0xd3, 0x5c, 0xf9, 0xb6, - 0x59, 0x4e, 0x85, 0xfe, 0x01, 0x71, 0xf0, 0xf7, 0xf2, 0x1f, 0x46, 0xd5, - 0x20, 0x3c, 0x9b, 0xc2, 0x1e, 0x73, 0x1c, 0x56, 0x9c, 0x76, 0x8c, 0x12, - 0x95, 0x51, 0xd4, 0x6f, 0x5b, 0x3a, 0xa7, 0x5f, 0xa7, 0xe4, 0xfa, 0xb7, - 0x1a, 0xdd, 0xb6, 0x4c, 0x01, 0x02, 0xae, 0x9c, 0x02, 0x0d, 0x66, 0x2f, - 0x40, 0x87, 0xa1, 0xbc, 0xf3, 0xde, 0xf4, 0xdb, 0x65, 0xee, 0xcc, 0xca, - 0xe1, 0x7a, 0xa2, 0xf4, 0xf7, 0xf5, 0x7c, 0x2a, 0x3f, 0xa4, 0x67, 0xbb, - 0x07, 0x50, 0x7a, 0x29, 0x8a, 0xcf, 0x2c, 0x7a, 0x0e, 0x0d, 0xc7, 0x95, - 0x8b, 0xf4, 0xe2, 0x50, 0xe1, 0xc1, 0x40, 0x16, 0x99, 0x5c, 0x72, 0xe7, - 0xe4, 0x01, 0xeb, 0x29, 0x6a, 0x99, 0xf2, 0x67, 0x23, 0x46, 0x1f, 0xaa, - 0xea, 0xc1, 0x51, 0x30, 0xeb, 0x7d, 0x34, 0x52, 0x91, 0x37, 0x2d, 0xc6, - 0x5c, 0x3a, 0x7c, 0x54, 0xc0, 0x79, 0xdc, 0xf9, 0xbf, 0x08, 0x2a, 0xf6, - 0xe1, 0x1e, 0xee, 0xc6, 0xd2, 0xe9, 0x30, 0x27, 0x60, 0x0c, 0xa2, 0x63, - 0x16, 0x06, 0x3d, 0xe2, 0xf5, 0x6f, 0xea, 0xe4, 0x4d, 0x9f, 0x2d, 0x36, - 0x62, 0x95, 0x47, 0x5d, 0x00, 0x22, 0x9f, 0x0c, 0xbb, 0x71, 0xad, 0xea, - 0xe7, 0x62, 0x59, 0x21, 0xd1, 0xaf, 0x04, 0x5a, 0xfc, 0x1f, 0x28, 0x6b, - 0x6f, 0x71, 0xec, 0xd4, 0xbd, 0x9c, 0x88, 0xfb, 0x3f, 0x04, 0xea, 0xd6, - 0xb2, 0x24, 0xe5, 0x28, 0xfe, 0xc5, 0x3e, 0x15, 0x00, 0x8c, 0xa2, 0xdf, - 0x18, 0x3d, 0x10, 0x9a, 0xb1, 0xcd, 0x64, 0xda, 0x87, 0x41, 0xc8, 0xa1, - 0x1c, 0x97, 0xd5, 0x44, 0xd9, 0x51, 0xd2, 0x96, 0xed, 0xad, 0x28, 0x1f, - 0x03, 0x89, 0x21, 0xbd, 0x79, 0x91, 0x48, 0x9c, 0x8e, 0x17, 0xfd, 0x36, - 0x72, 0xf6, 0x69, 0x4f, 0x3f, 0x02, 0x57, 0xcc, 0x3f, 0x1c, 0x49, 0x82, - 0x00, 0x45, 0x9e, 0x29, 0x83, 0x14, 0x12, 0xbb, 0xd2, 0xd0, 0x1a, 0x66, - 0x0f, 0x57, 0x24, 0xd4, 0x9f, 0x46, 0x0c, 0xf4, 0xb8, 0x28, 0x85, 0x52, - 0xe2, 0xa1, 0xc2, 0x3a, 0x8c, 0x34, 0x4a, 0x81, 0xe3, 0xbc, 0xa2, 0x67, - 0x67, 0x12, 0x13, 0xc4, 0xe7, 0xd7, 0x2c, 0x4e, 0xa9, 0xf5, 0xed, 0x63, - 0xf2, 0x18, 0x9c, 0x0c, 0xe2, 0x4d, 0x25, 0x23, 0x30, 0x3e, 0x49, 0x29, - 0xa6, 0x37, 0xdf, 0xc2, 0xdc, 0xf6, 0x5e, 0xae, 0x45, 0xd7, 0x8d, 0x56, - 0xba, 0x29, 0x4f, 0xee, 0xc9, 0x26, 0xd7, 0xbf, 0x10, 0x4d, 0x0a, 0x3b, - 0x3d, 0x1f, 0xd5, 0x72, 0xe1, 0xe6, 0xf5, 0x23, 0x4a, 0x17, 0x2d, 0xe4, - 0x40, 0x55, 0x9b, 0x39, 0x66, 0x36, 0xe4, 0x6d, 0x6d, 0xb6, 0x8d, 0x2a, - 0x7e, 0x76, 0x73, 0xa5, 0x86, 0x20, 0x3d, 0x18, 0xa0, 0x6c, 0x35, 0x59, - 0xc8, 0x1c, 0xef, 0x0f, 0x36, 0x1d, 0x6f, 0xba, 0x89, 0xb9, 0x9e, 0x7a, - 0x58, 0x1d, 0x43, 0xad, 0x85, 0x8b, 0x6b, 0xcc, 0x25, 0xb8, 0xe4, 0xdd, - 0xa1, 0x35, 0xd9, 0xef, 0xc4, 0xb1, 0xf6, 0x99, 0x27, 0x17, 0xb7, 0xbe, - 0xd1, 0x4f, 0xa1, 0x81, 0x4e, 0xb6, 0x19, 0xcd, 0xa0, 0x92, 0xeb, 0x56, - 0x41, 0x4f, 0x37, 0xca, 0x3b, 0x43, 0x85, 0x86, 0xdf, 0x5d, 0x5a, 0x8c, - 0xd4, 0x5b, 0xc4, 0x28, 0xdb, 0x16, 0xea, 0x3a, 0x2e, 0x9e, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xea, 0x59, 0x40, 0xc4, - 0x40, 0x8b, 0x6a, 0x8a, 0xb8, 0x7f, 0x1e, 0x0b, 0xfe, 0xab, 0xa4, 0xac, - 0x42, 0x91, 0xc5, 0xfa, 0x2c, 0x7e, 0xb4, 0xf9, 0x5c, 0xd5, 0x4c, 0x6a, - 0x74, 0x82, 0x90, 0x81, 0x96, 0xb0, 0xf4, 0xd4, 0xba, 0xc9, 0xa3, 0x2e, - 0x26, 0x0a, 0xc9, 0x55, 0x65, 0xac, 0xde, 0x83, 0x37, 0xec, 0x0e, 0xf6, - 0xdc, 0x8c, 0x34, 0xe6, 0x57, 0xde, 0x32, 0x0a, 0x02, 0x62, 0x4f, 0x6a, - 0x92, 0xa5, 0xb4, 0x40, 0xde, 0x57, 0xf4, 0xd1, 0xa3, 0x1c, 0xd3, 0xf7, - 0x4a, 0x15, 0xcc, 0x27, 0x26, 0x00, 0xba, 0xf3, 0xfa, 0x4e, 0xc6, 0xe9, - 0xc3, 0x05, 0x3d, 0x3a, 0x89, 0x96, 0x7d, 0x41, 0xac, 0xca, 0x28, 0x7f, - 0x69, 0x02, 0x40, 0x03, 0x93, 0x86, 0x85, 0x85, 0x73, 0x00, 0x09, 0x5a, - 0xcf, 0x5f, 0x1d, 0xaa, 0x46, 0x41, 0x9d, 0x08, 0xbf, 0xea, 0x45, 0x9b, - 0x93, 0xda, 0x9e, 0x81, 0xba, 0x9e, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x08, 0x00, 0x00, 0x6a, 0x1f, 0x9b, 0x03, 0xdd, 0xe4, 0x16, 0x07, - 0x7f, 0x5b, 0xb0, 0xee, 0xac, 0x55, 0xc4, 0x50, 0xe6, 0x2b, 0x17, 0xed, - 0x7f, 0x50, 0x4d, 0x71, 0x73, 0xae, 0xe0, 0x4d, 0xce, 0x08, 0xd9, 0x8b, - 0x83, 0x2c, 0x01, 0x48, 0x02, 0xd3, 0xbb, 0xca, 0x86, 0xd7, 0xca, 0x5f, - 0xc7, 0xce, 0x59, 0xdf, 0xc1, 0xcc, 0xf7, 0x7b, 0x54, 0xf8, 0x0d, 0x4f, - 0x81, 0x9e, 0x50, 0x6a, 0x65, 0x66, 0x4a, 0xec, 0x7a, 0x1b, 0x92, 0xb2, - 0x39, 0x8f, 0x5d, 0x41, 0x33, 0xcf, 0xe6, 0x1b, 0x34, 0x5d, 0xe1, 0xf6, - 0xef, 0xcb, 0xa0, 0x55, 0x7e, 0x1f, 0x45, 0x38, 0xb9, 0x56, 0x15, 0x3b, - 0x70, 0xab, 0xc8, 0x2f, 0x1c, 0xb9, 0x7d, 0x37, 0xe1, 0xb4, 0x03, 0x44, - 0x5a, 0xf6, 0x57, 0x97, 0x03, 0x54, 0x4c, 0x22, 0x88, 0xc3, 0x82, 0xfd, - 0x91, 0xc1, 0xf1, 0x63, 0xb4, 0x50, 0x46, 0x11, 0x64, 0x07, 0xfd, 0x85, - 0xe5, 0x78, 0x57, 0xdd, 0x19, 0x2a, 0x6b, 0x64, 0x3e, 0xec, 0xb8, 0xf3, - 0xb5, 0x95, 0x29, 0x72, 0xf1, 0x9d, 0xdd, 0xb9, 0xad, 0xd0, 0x78, 0x26, - 0x86, 0x10, 0x10, 0x19, 0xe4, 0x79, 0xae, 0xdc, 0x56, 0xb7, 0x54, 0x4f, - 0x94, 0xc6, 0x26, 0x9a, 0x93, 0xa8, 0x2e, 0x1b, 0x1c, 0xda, 0x87, 0x3a, - 0xa2, 0x44, 0xb9, 0x0b, 0x0f, 0xab, 0x70, 0x3b, 0xb7, 0x6c, 0xbf, 0x58, - 0x67, 0x32, 0x7d, 0xa3, 0x2a, 0xcb, 0x4e, 0x02, 0x92, 0xa1, 0x26, 0x0e, - 0x20, 0x5e, 0xb3, 0xec, 0xc4, 0x04, 0x5b, 0x7f, 0xe5, 0xbd, 0x30, 0xeb, - 0xc8, 0xdd, 0xf1, 0x72, 0x5a, 0x7e, 0xcb, 0x93, 0x22, 0xa0, 0x01, 0x9f, - 0xbb, 0x24, 0x9f, 0x50, 0x01, 0x1f, 0x24, 0x02, 0x85, 0x6d, 0xe6, 0x4d, - 0x55, 0xc4, 0x07, 0xe9, 0x87, 0x38, 0xbf, 0x1a, 0x3b, 0x05, 0x82, 0xc4, - 0x73, 0x4b, 0x87, 0x3c, 0xb4, 0x0a, 0x48, 0x8c, 0x06, 0x67, 0xe7, 0xbf, - 0xcc, 0xe7, 0xe5, 0xc3, 0xb2, 0x81, 0x60, 0xe2, 0xd1, 0xb1, 0x8f, 0x98, - 0xbd, 0x7d, 0xbd, 0x4e, 0x9a, 0xca, 0xbe, 0xcb, 0x81, 0x47, 0x25, 0xaa, - 0xfa, 0x91, 0xcf, 0x78, 0xce, 0xcb, 0x1a, 0x11, 0x79, 0xcf, 0x97, 0xa3, - 0x95, 0x95, 0x6f, 0xd7, 0xae, 0x80, 0xc9, 0xd5, 0x95, 0xb7, 0xcf, 0xe2, - 0x9d, 0x98, 0x65, 0x80, 0xfd, 0x2e, 0xee, 0x46, 0x5e, 0x46, 0x8c, 0xde, - 0x52, 0xb4, 0xdc, 0xce, 0xa8, 0xab, 0x4e, 0x0c, 0x12, 0x9f, 0x89, 0x9c, - 0x84, 0x80, 0xfe, 0x08, 0x64, 0x12, 0x12, 0x95, 0x62, 0xea, 0x65, 0xcc, - 0x34, 0x80, 0xcf, 0x92, 0x5f, 0xc2, 0xae, 0x76, 0xe7, 0x2f, 0xbb, 0xa8, - 0xdb, 0x6a, 0x66, 0x60, 0xaf, 0x88, 0xba, 0x65, 0x32, 0xcf, 0xf7, 0x6e, - 0xd8, 0xd0, 0x69, 0xb0, 0x12, 0x23, 0xd6, 0xc2, 0x32, 0xe5, 0x8e, 0x51, - 0xc5, 0x61, 0x28, 0x45, 0xf7, 0xf9, 0xea, 0x73, 0xce, 0x04, 0x2d, 0x56, - 0x43, 0x10, 0x8b, 0x4f, 0x6b, 0xfa, 0x32, 0xa8, 0x92, 0x8f, 0xd9, 0xb4, - 0xfd, 0xa4, 0x74, 0xa8, 0xea, 0xca, 0xd3, 0x84, 0xbb, 0x5a, 0x34, 0x57, - 0xf9, 0xda, 0x25, 0x40, 0x1f, 0x5e, 0xc2, 0x66, 0x43, 0x05, 0xdd, 0x13, - 0x88, 0x91, 0x60, 0xa1, 0x75, 0xd3, 0xc4, 0x27, 0xff, 0xda, 0x24, 0x3d, - 0xd9, 0xd7, 0x47, 0x46, 0x30, 0xd0, 0x76, 0xc4, 0x9e, 0x97, 0xe3, 0x43, - 0xd7, 0x45, 0xaf, 0x49, 0x36, 0xf2, 0x18, 0xdd, 0x3f, 0x86, 0x9a, 0xec, - 0x9a, 0x70, 0xeb, 0x5a, 0xe2, 0xa0, 0x4b, 0x45, 0x21, 0xb3, 0x32, 0x3d, - 0x0c, 0x8c, 0x03, 0x13, 0xae, 0x46, 0xb5, 0x1a, 0x0a, 0x03, 0x36, 0xfe, - 0xfe, 0xfa, 0xc9, 0x4d, 0x46, 0xf8, 0xfe, 0x6f, 0x99, 0x8c, 0xe4, 0x77, - 0x0c, 0x27, 0x59, 0xf7, 0xc3, 0xfc, 0x32, 0xb3, 0xa5, 0xae, 0xdc, 0x49, - 0xac, 0x31, 0x27, 0xa6, 0x14, 0x92, 0xfb, 0xe3, 0x69, 0x35, 0x8d, 0xa0, - 0x50, 0x55, 0x09, 0x90, 0xdf, 0x67, 0x08, 0x4c, 0x0e, 0xaf, 0x71, 0xc2, - 0xe8, 0xb8, 0xdc, 0x45, 0xe3, 0x6d, 0x58, 0x3f, 0x19, 0x8d, 0xcd, 0xeb, - 0xe3, 0x02, 0x49, 0xd8, 0xc8, 0x8b, 0x29, 0xb3, 0xef, 0x2b, 0xf0, 0x39, - 0x5c, 0x11, 0xaa, 0x52, 0x44, 0x0d, 0x1a, 0x3a, 0x7a, 0x62, 0xda, 0x6d, - 0xe3, 0xdd, 0x03, 0x30, 0x6d, 0x3e, 0x18, 0x30, 0x1d, 0xc0, 0xd0, 0x05, - 0x67, 0x98, 0xf5, 0x2a, 0xc7, 0xa1, 0x58, 0xd7, 0xf8, 0x6f, 0x7d, 0x07, - 0x59, 0x27, 0x95, 0xb9, 0x8d, 0x4d, 0xd7, 0xc8, 0x5e, 0x8b, 0x89, 0x14, - 0xb7, 0x1b, 0x35, 0xaa, 0x72, 0x02, 0x39, 0x3c, 0x41, 0x7c, 0x91, 0x93, - 0x81, 0xe1, 0xad, 0xbe, 0x77, 0x28, 0x80, 0xa2, 0x9c, 0xa8, 0x00, 0x18, - 0xa5, 0x70, 0xec, 0xec, 0x96, 0x95, 0x37, 0xa3, 0xee, 0x15, 0xa0, 0x69, - 0x0e, 0x05, 0xb5, 0xb4, 0xb6, 0xa7, 0x8b, 0xb9, 0x41, 0x88, 0x4f, 0x56, - 0x39, 0xa7, 0xbe, 0x24, 0xce, 0x4c, 0xe0, 0x9c, 0x24, 0x5a, 0xa1, 0xab, - 0xcd, 0x82, 0xf1, 0x16, 0x3f, 0xc0, 0xaf, 0xe1, 0x42, 0xe0, 0x7d, 0x1b, - 0xd9, 0x8f, 0xb8, 0x04, 0xa1, 0x88, 0xd9, 0xc3, 0xaf, 0x4f, 0xda, 0xfd, - 0x0b, 0x5c, 0xc3, 0x04, 0xf3, 0xdb, 0xe6, 0x76, 0x6e, 0xe9, 0xdc, 0xea, - 0x6f, 0xa2, 0xa5, 0x75, 0x2c, 0xc7, 0x91, 0x7d, 0x4b, 0xd5, 0x68, 0x55, - 0xbb, 0x2d, 0x14, 0xdb, 0x06, 0x76, 0xf7, 0xcc, 0x0a, 0x88, 0x6c, 0x2b, - 0xa1, 0x57, 0xd6, 0x15, 0x9c, 0x46, 0xcf, 0x5b, 0x6f, 0x9e, 0x7e, 0xc5, - 0x39, 0xda, 0x97, 0x26, 0x5e, 0xf5, 0x25, 0x06, 0xed, 0x8e, 0x9b, 0x1d, - 0x1b, 0x91, 0x07, 0x89, 0x08, 0xce, 0xd7, 0x38, 0x43, 0x64, 0x8e, 0xf5, - 0x3a, 0x52, 0x4a, 0xfb, 0x3e, 0xff, 0x2c, 0xb3, 0x78, 0x40, 0xb5, 0xdd, - 0xb2, 0x8a, 0xd3, 0x6a, 0xc5, 0xb0, 0xa3, 0x4a, 0xb8, 0xe7, 0x27, 0xa0, - 0x5a, 0x8f, 0x0f, 0xda, 0x53, 0x49, 0xc9, 0x77, 0x2a, 0xef, 0x78, 0xc6, - 0xec, 0xaf, 0x10, 0xe5, 0x71, 0xc5, 0x7a, 0x85, 0xdf, 0xb2, 0x85, 0x02, - 0xe3, 0x55, 0x7a, 0x91, 0x3a, 0x68, 0xb2, 0x9d, 0x3d, 0xd9, 0x01, 0xc5, - 0x5f, 0x3c, 0xa8, 0x1d, 0x99, 0xc6, 0xe7, 0xad, 0x09, 0xd1, 0x39, 0x3a, - 0x92, 0xc5, 0x77, 0x9c, 0xdf, 0x99, 0x56, 0x9f, 0xfe, 0xf8, 0xfd, 0xc8, - 0x4f, 0x19, 0xa3, 0xa0, 0xdf, 0xff, 0x17, 0xac, 0xa9, 0x03, 0x32, 0x85, - 0x4c, 0x29, 0xca, 0x89, 0x58, 0xdc, 0x88, 0xdd, 0xeb, 0x79, 0x68, 0x5e, - 0x0f, 0x37, 0x1a, 0xf7, 0x05, 0xfd, 0x39, 0x91, 0x25, 0x61, 0xf3, 0x04, - 0xda, 0x97, 0xfc, 0x7b, 0xcc, 0x40, 0x63, 0xfd, 0x5b, 0x3b, 0x27, 0x8e, - 0x92, 0x6d, 0x98, 0x0f, 0xcc, 0x9c, 0x9b, 0xda, 0xb2, 0xc6, 0xca, 0x56, - 0xff, 0x7e, 0xcc, 0xa2, 0xc0, 0x45, 0x3e, 0xf6, 0xdf, 0xa7, 0xe8, 0x2a, - 0xef, 0x0c, 0xde, 0xec, 0xa4, 0x1d, 0x2c, 0x3e, 0x03, 0xfd, 0xa4, 0x44, - 0x60, 0x4a, 0xf5, 0x83, 0x8f, 0x09, 0x2d, 0xe8, 0xd5, 0x46, 0xf6, 0x1c, - 0x2d, 0x39, 0x28, 0x0c, 0xdf, 0xa1, 0x2b, 0x05, 0x6e, 0x3c, 0x36, 0xdd, - 0x91, 0x81, 0x52, 0xf1, 0x56, 0xdc, 0xbb, 0x79, 0x62, 0xd8, 0x2e, 0x27, - 0x5d, 0x9f, 0x3c, 0xce, 0x81, 0x5c, 0x70, 0xe5, 0x4d, 0x33, 0x06, 0xd5, - 0x14, 0x04, 0xb7, 0xbc, 0x7b, 0x7a, 0xb4, 0xf7, 0x4a, 0x48, 0x8f, 0x97, - 0x85, 0x96, 0x69, 0xc9, 0x40, 0x52, 0xb1, 0x1c, 0x28, 0x82, 0xb3, 0x63, - 0xee, 0x94, 0x2f, 0xcb, 0x40, 0xad, 0xd7, 0x78, 0xb1, 0xc4, 0x21, 0x05, - 0x36, 0xd9, 0x46, 0xf0, 0x83, 0xcd, 0xee, 0x52, 0x7a, 0xa6, 0xa4, 0x40, - 0xb0, 0x2f, 0xf0, 0x1c, 0xfa, 0x42, 0x98, 0x54, 0x5b, 0xfe, 0x5e, 0xd6, - 0x84, 0x73, 0xca, 0x39, 0xbe, 0x87, 0xf2, 0x92, 0xee, 0x3d, 0x21, 0xcc, - 0x69, 0x81, 0xe5, 0xe8, 0x8a, 0xc3, 0x23, 0x64, 0x98, 0xd5, 0x1d, 0xcd, - 0x5c, 0x6c, 0x37, 0xc8, 0x8b, 0x08, 0x22, 0x12, 0x9f, 0x85, 0xc9, 0xed, - 0xb4, 0xa6, 0x07, 0xe1, 0x62, 0x79, 0x35, 0x5d, 0x26, 0x11, 0x4a, 0x6b, - 0x33, 0x37, 0x91, 0x78, 0xe8, 0xe2, 0xba, 0x8b, 0x8a, 0xb7, 0xbb, 0x0f, - 0xd2, 0xb3, 0xa2, 0x02, 0x0c, 0x57, 0x35, 0x99, 0x88, 0x6b, 0x9b, 0x64, - 0x79, 0x1f, 0x4a, 0x48, 0xd4, 0x3b, 0x5c, 0xeb, 0xb4, 0x83, 0xc3, 0xad, - 0x9c, 0x6a, 0xb0, 0xcf, 0x7f, 0x70, 0xe8, 0x22, 0x46, 0x25, 0xfe, 0x7e, - 0x02, 0x44, 0x83, 0x02, 0xb3, 0x08, 0x2e, 0x34, 0x08, 0x4b, 0xff, 0xa2, - 0xc1, 0x60, 0xbb, 0xd8, 0x89, 0x16, 0xf8, 0xaa, 0xab, 0xea, 0xf7, 0xa0, - 0x10, 0x9a, 0xc9, 0xe9, 0xa4, 0x81, 0xa7, 0x87, 0x32, 0x5b, 0xc1, 0xd0, - 0xd9, 0x70, 0x6f, 0xb6, 0x7c, 0x65, 0xd5, 0x0e, 0x65, 0x93, 0xfe, 0x6d, - 0x66, 0xaa, 0xab, 0xd0, 0x03, 0x07, 0xf2, 0xbe, 0x39, 0xd6, 0xc8, 0xac, - 0xf2, 0x06, 0x58, 0x58, 0x46, 0xc0, 0x1a, 0xbd, 0xa4, 0x96, 0x38, 0x31, - 0x32, 0x89, 0x04, 0xdf, 0xcd, 0x3c, 0x2e, 0x98, 0xb8, 0x39, 0xba, 0xe2, - 0xca, 0x6b, 0xd0, 0x53, 0xce, 0x4a, 0xc8, 0x95, 0x81, 0x84, 0x17, 0xce, - 0x7f, 0x1d, 0xc1, 0x5a, 0xc4, 0xc2, 0x73, 0x30, 0x6d, 0x0b, 0x8c, 0xf8, - 0x66, 0x38, 0x4e, 0xa3, 0x14, 0x84, 0x15, 0x36, 0x9e, 0x0d, 0x56, 0x6b, - 0xa6, 0x77, 0x65, 0xa4, 0x2c, 0x77, 0x00, 0x8b, 0x43, 0x57, 0xc6, 0x25, - 0xc5, 0xd0, 0x17, 0x79, 0x6b, 0x5d, 0xbc, 0xcd, 0xc8, 0x25, 0x8f, 0x20, - 0x09, 0xcc, 0xbd, 0x80, 0x10, 0xdf, 0x35, 0xf6, 0x9c, 0x04, 0x80, 0x23, - 0xdc, 0x97, 0xe0, 0xba, 0x29, 0x48, 0x2e, 0x95, 0x0f, 0xb1, 0x9b, 0xc7, - 0xe6, 0x0b, 0x89, 0x16, 0xe2, 0x81, 0x3b, 0x32, 0x69, 0xc4, 0xde, 0xc6, - 0x12, 0x09, 0x47, 0xff, 0x50, 0xe4, 0x45, 0xb7, 0x35, 0xd2, 0x61, 0x9b, - 0x52, 0x6e, 0xbe, 0xaf, 0xd2, 0xeb, 0x0c, 0x50, 0xf1, 0x57, 0x9f, 0x59, - 0xe1, 0xc1, 0x4f, 0x8c, 0x79, 0x07, 0x05, 0xce, 0x8d, 0x64, 0xb2, 0xf0, - 0xd3, 0x4f, 0xe1, 0x7b, 0xfa, 0x30, 0x0a, 0xc2, 0x5d, 0x0c, 0x47, 0x6c, - 0x17, 0x77, 0x1f, 0xe5, 0xd8, 0x14, 0xfd, 0xc1, 0x01, 0x70, 0x51, 0x60, - 0xb2, 0x20, 0xfd, 0x86, 0xbc, 0x19, 0x5e, 0x01, 0xa6, 0x19, 0x3a, 0x21, - 0xa5, 0x0a, 0x1c, 0xd9, 0xa9, 0x78, 0xbb, 0xc9, 0x01, 0x65, 0xe4, 0xb3, - 0x48, 0xb8, 0xe1, 0xe7, 0xb5, 0xf4, 0x4e, 0xa9, 0xb6, 0xe2, 0x5b, 0xeb, - 0xf5, 0x76, 0x06, 0x1a, 0xd9, 0x08, 0x40, 0xff, 0x72, 0xb2, 0xe3, 0x01, - 0x50, 0xb1, 0xad, 0xb3, 0xa3, 0xf6, 0xef, 0x72, 0x05, 0x0c, 0xf4, 0xce, - 0x24, 0x2c, 0x63, 0x89, 0x63, 0x9e, 0x21, 0xb8, 0xb0, 0xbe, 0xc7, 0x45, - 0xae, 0x47, 0x2b, 0x9e, 0x61, 0x81, 0x4c, 0x76, 0x96, 0x7b, 0x18, 0x37, - 0x74, 0xcb, 0x00, 0xef, 0x38, 0x72, 0x24, 0x0a, 0x63, 0xc1, 0x64, 0xd6, - 0x41, 0xc8, 0x6a, 0xf1, 0xe7, 0x11, 0x20, 0x4b, 0xc2, 0x95, 0x70, 0xb8, - 0xf8, 0x8f, 0xd9, 0xae, 0x8c, 0x12, 0xd8, 0x6f, 0x63, 0x30, 0xca, 0x56, - 0x46, 0x11, 0xda, 0x49, 0x1f, 0x84, 0x3d, 0xae, 0xab, 0x78, 0x29, 0x02, - 0x6c, 0x43, 0xa3, 0xef, 0x9d, 0x97, 0x59, 0x15, 0x53, 0xcd, 0xc7, 0x47, - 0x65, 0x30, 0xc7, 0xae, 0x31, 0x4a, 0x41, 0xb4, 0x66, 0x9c, 0xbb, 0x51, - 0x0b, 0xbd, 0xe2, 0x7d, 0x41, 0x2c, 0xd0, 0x75, 0x57, 0x93, 0xce, 0x2e, - 0xeb, 0x31, 0x7f, 0x56, 0xb2, 0xa4, 0x2b, 0x9f, 0xcc, 0xef, 0x6f, 0xf0, - 0x77, 0x19, 0xad, 0x4d, 0x2e, 0x37, 0x00, 0x75, 0x53, 0xae, 0x22, 0x44, - 0x69, 0x1c, 0x8a, 0x90, 0xf2, 0xcd, 0x0f, 0x6b, 0x37, 0xdb, 0xfd, 0x71, - 0x64, 0x80, 0xd8, 0x57, 0x1b, 0x8f, 0xff, 0x14, 0xd4, 0x5f, 0xe1, 0xd1, - 0x0f, 0x06, 0x13, 0x61, 0x29, 0xa9, 0x80, 0x9d, 0xc7, 0x8a, 0xa0, 0xb5, - 0xaa, 0xfc, 0xe0, 0xb4, 0xb4, 0xf0, 0x31, 0xf0, 0xec, 0x78, 0x03, 0x28, - 0xb9, 0xf7, 0xd9, 0xa7, 0xc8, 0xad, 0x2e, 0x16, 0xb8, 0x18, 0x82, 0x43, - 0x66, 0x8b, 0xae, 0xb2, 0x45, 0x2b, 0x0c, 0x9d, 0x69, 0xbd, 0x1b, 0xc5, - 0x20, 0xc6, 0x41, 0xe7, 0x4f, 0x4b, 0x7b, 0x46, 0x3d, 0x7a, 0x6d, 0x9f, - 0x13, 0x2e, 0x0f, 0xf3, 0x85, 0x3e, 0x5b, 0x12, 0xe5, 0xbf, 0x1b, 0x20, - 0xc3, 0x5f, 0x6b, 0xf7, 0xf7, 0xa3, 0xd7, 0x33, 0xd2, 0xcb, 0x18, 0xa5, - 0xa4, 0xa2, 0xd3, 0x59, 0x91, 0x9a, 0x04, 0xfa, 0x9d, 0xa5, 0x55, 0xad, - 0x09, 0x5a, 0x1e, 0x0b, 0x10, 0xd0, 0x46, 0x18, 0xe4, 0x09, 0xe8, 0x1b, - 0x44, 0xd3, 0x78, 0x45, 0xc0, 0xdf, 0xa2, 0xef, 0xfc, 0x59, 0x8a, 0x1b, - 0x22, 0x60, 0xc9, 0x58, 0x7d, 0x65, 0x45, 0xa9, 0xac, 0xd5, 0xd4, 0xc4, - 0x44, 0xd3, 0x08, 0x44, 0x40, 0x4d, 0x3d, 0x7e, 0x39, 0x81, 0x72, 0x15, - 0x49, 0xd7, 0x2c, 0xda, 0x33, 0xaf, 0xc5, 0xb5, 0x8a, 0x3c, 0xbf, 0x81, - 0x88, 0x4f, 0x12, 0xe4, 0xe8, 0xe6, 0x00, 0xb6, 0xd9, 0xcd, 0xb2, 0x70, - 0x08, 0x15, 0x72, 0xf6, 0x46, 0xc7, 0x98, 0x7c, 0x1d, 0x54, 0xd0, 0x66, - 0x2d, 0xa1, 0xd8, 0xda, 0xb0, 0xe5, 0x9f, 0xa3, 0x2f, 0x2c, 0xfb, 0x34, - 0xb3, 0x21, 0x8b, 0x61, 0xf4, 0xce, 0x60, 0x2b, 0xb5, 0x5e, 0x3d, 0x14, - 0x2c, 0xbe, 0x19, 0x9d, 0x5f, 0x01, 0xe1, 0x21, 0x34, 0x11, 0x6b, 0x10, - 0xd4, 0x17, 0x58, 0xb3, 0x0a, 0x30, 0xe4, 0x17, 0x51, 0x0b, 0xf2, 0xbb, - 0xa6, 0xb7, 0x00, 0xa2, 0xe8, 0xa5, 0xa3, 0x41, 0x1d, 0x65, 0x2d, 0x26, - 0x93, 0x26, 0x7d, 0xdc, 0xad, 0x6f, 0x83, 0xeb, 0x66, 0x55, 0xde, 0x60, - 0x21, 0x56, 0x19, 0x4f, 0x9b, 0x7b, 0x26, 0x4a, 0x80, 0xf5, 0xab, 0x8b, - 0xbf, 0xe4, 0xb1, 0xa1, 0xd6, 0x33, 0x32, 0xbf, 0x86, 0x8c, 0x3c, 0xd0, - 0x12, 0x03, 0xd4, 0xb9, 0x23, 0x54, 0x1b, 0x94, 0x2f, 0xa5, 0x34, 0x4d, - 0x59, 0x18, 0x33, 0x8e, 0x8c, 0xf7, 0x1f, 0xc9, 0x6d, 0x75, 0xfb, 0x2a, - 0x22, 0x6c, 0x64, 0xb7, 0x79, 0xd8, 0x3b, 0xf6, 0x4e, 0x98, 0xd8, 0xa8, - 0x2c, 0x06, 0xd1, 0x92, 0x32, 0x44, 0xec, 0x38, 0x40, 0x3b, 0x53, 0x16, - 0x40, 0x8f, 0x92, 0x72, 0x87, 0xa8, 0xb8, 0xc0, 0x8f, 0x25, 0x4c, 0x4f, - 0x24, 0xfc, 0x8d, 0xc6, 0xa6, 0xeb, 0x2f, 0xdf, 0x2f, 0x0d, 0x2f, 0xd3, - 0x6e, 0x70, 0x71, 0xfe, 0xf0, 0x2e, 0xe9, 0x84, 0xd3, 0xc1, 0xd1, 0x70, - 0x4b, 0x8f, 0x7b, 0x60, 0xb0, 0xb7, 0xe3, 0x79, 0x52, 0x6a, 0x6b, 0x26, - 0x03, 0x8f, 0x6a, 0x0f, 0x8d, 0x85, 0xd7, 0x5f, 0xf7, 0x39, 0x31, 0x0e, - 0x26, 0x73, 0x84, 0x3f, 0x9b, 0x10, 0x6f, 0x29, 0x63, 0x14, 0x36, 0xa2, - 0xec, 0x44, 0x7d, 0x84, 0xc6, 0x4a, 0xec, 0xfe, 0xac, 0xcb, 0xe4, 0xfa, - 0xf6, 0x68, 0x83, 0x68, 0xe0, 0x8f, 0xd3, 0x8a, 0x60, 0x73, 0xf1, 0x5c, - 0x71, 0x02, 0x0c, 0xa2, 0x88, 0x2c, 0xa2, 0x35, 0x35, 0x5c, 0x3f, 0xb1, - 0xbe, 0xb3, 0x6b, 0x5c, 0xe1, 0x78, 0x75, 0x40, 0x20, 0x87, 0x67, 0xca, - 0x07, 0x1c, 0x9c, 0x02, 0xc7, 0xf2, 0x9d, 0x1c, 0xda, 0x1b, 0x86, 0x1b, - 0xc6, 0xa6, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x93, 0xca, 0x30, 0xae, 0xea, 0x26, 0x6a, 0x1b, 0x15, 0x46, 0x0a, 0xe3, - 0x57, 0x23, 0x4c, 0x0c, 0x98, 0x8e, 0x3e, 0xbb, 0x43, 0x14, 0x73, 0xdf, - 0x17, 0x91, 0xe2, 0xee, 0x39, 0xf9, 0xc2, 0x2f, 0xdc, 0xad, 0x0e, 0x00, - 0xf5, 0xdd, 0xe3, 0x97, 0xba, 0x8c, 0xee, 0x53, 0xc4, 0x70, 0x37, 0x46, - 0xcf, 0x04, 0xc3, 0xc8, 0x56, 0x38, 0x2e, 0x39, 0x75, 0x32, 0x6d, 0x98, - 0xc4, 0x14, 0xae, 0xa4, 0x29, 0xa3, 0xc6, 0xb6, 0x66, 0x45, 0x48, 0xdf, - 0xc0, 0xa9, 0x4b, 0x4f, 0xef, 0xb9, 0xb4, 0x89, 0x0d, 0x64, 0x00, 0x5c, - 0xd1, 0xc8, 0x2b, 0xf7, 0xc5, 0x1a, 0x1b, 0x06, 0xb7, 0x49, 0xb1, 0xe3, - 0x4d, 0x87, 0xf9, 0x3f, 0xba, 0x39, 0xa3, 0x56, 0x7f, 0x43, 0xcc, 0x15, - 0x9c, 0x3d, 0xba, 0x71, 0x7b, 0xeb, 0x45, 0x0f, 0x15, 0x1b, 0x6c, 0x84, - 0x75, 0x6d, 0x43, 0x0b, 0x27, 0x12, 0x6b, 0xbc, 0x0a, 0x6d, 0xe4, 0xf6, - 0x4f, 0xc7, 0xbb, 0x9e, 0x91, 0xb5, 0x09, 0x5f, 0x79, 0x2a, 0xbf, 0xda, - 0x34, 0x91, 0x44, 0x47, 0x52, 0x64, 0x00, 0x89, 0x27, 0x17, 0x5c, 0xe9, - 0x90, 0x8b, 0xcb, 0xbe, 0x21, 0x47, 0x65, 0x1c, 0x54, 0x61, 0x48, 0x17, - 0x66, 0xb7, 0xa1, 0x60, 0x27, 0x31, 0x04, 0x42, 0x3b, 0x33, 0x3d, 0xda, - 0xf7, 0x61, 0x3d, 0x4b, 0x91, 0xa5, 0x74, 0x4b, 0xde, 0x16, 0xf2, 0x79, - 0x3e, 0xf7, 0x89, 0x87, 0xb3, 0xdd, 0xa2, 0x49, 0xd7, 0x54, 0x1b, 0x39, - 0xff, 0xb5, 0xec, 0x9d, 0x1d, 0x09, 0x7e, 0x5a, 0x3c, 0xd1, 0xdc, 0x0e, - 0x2a, 0x0e, 0x2c, 0x40, 0x4e, 0xa5, 0x8c, 0x9d, 0xc8, 0x9b, 0xa5, 0xb2, - 0x40, 0xa4, 0xaa, 0x3b, 0xac, 0x93, 0x19, 0xf7, 0xa1, 0x8b, 0xf8, 0x4a, - 0x40, 0x08, 0x5d, 0x1d, 0xb0, 0xae, 0x0f, 0x67, 0xa7, 0x21, 0xaf, 0xe3, - 0xb1, 0xfc, 0xff, 0xa0, 0x95, 0x66, 0x2b, 0xf7, 0x82, 0x2d, 0x8a, 0x26, - 0x0f, 0xc3, 0xed, 0x62, 0xb6, 0xcb, 0x4c, 0x86, 0xe9, 0x20, 0x78, 0x3f, - 0x08, 0x53, 0x8f, 0x41, 0xf1, 0xa1, 0x04, 0x77, 0xd9, 0xe6, 0xea, 0x26, - 0x6d, 0x33, 0x48, 0xb3, 0xbb, 0xed, 0xfc, 0xd7, 0xa3, 0x2b, 0xe2, 0x39, - 0xcf, 0x78, 0x4e, 0x11, 0x26, 0xad, 0x39, 0x83, 0x6e, 0x72, 0xbf, 0xc6, - 0x34, 0x23, 0x97, 0x5d, 0x7b, 0x64, 0x1e, 0x78, 0x00, 0x34, 0x92, 0x5d, - 0x3f, 0x23, 0x28, 0x60, 0x7f, 0x88, 0xf0, 0xca, 0x96, 0x4a, 0x15, 0xbf, - 0x8a, 0xb7, 0xd0, 0xd9, 0x99, 0x8b, 0xdb, 0x26, 0xdc, 0x7e, 0x8d, 0x35, - 0x53, 0x60, 0x07, 0x85, 0x80, 0xc4, 0x9c, 0x0d, 0x81, 0xe2, 0x93, 0x85, - 0x76, 0x2d, 0x85, 0x21, 0x6e, 0xda, 0x29, 0xe5, 0xb1, 0x08, 0x46, 0x09, - 0x1b, 0x8a, 0xd9, 0xd2, 0xd7, 0x16, 0x74, 0xee, 0x26, 0x3e, 0xc4, 0x8c, - 0x2e, 0x6b, 0x0c, 0xbc, 0x95, 0xea, 0x4a, 0xb2, 0xd6, 0x6f, 0x43, 0xd1, - 0x3a, 0x8f, 0xbd, 0x77, 0xb4, 0x67, 0x63, 0x6b, 0xd2, 0xe0, 0xf0, 0x81, - 0x74, 0xb7, 0xc5, 0x11, 0x60, 0x10, 0x6b, 0xc6, 0x0f, 0xfd, 0x84, 0x2e, - 0x5c, 0x8f, 0x3b, 0xf5, 0x68, 0xa7, 0x62, 0xc6, 0x4f, 0xa6, 0xee, 0x19, - 0x44, 0xea, 0xc0, 0xe4, 0x64, 0x12, 0x71, 0x2f, 0xfb, 0xa3, 0x4d, 0xb0, - 0x8e, 0x5e, 0xe1, 0x79, 0x65, 0xd4, 0xf3, 0xed, 0x73, 0x04, 0xf1, 0x6d, - 0xc6, 0x75, 0x54, 0x28, 0x13, 0xe2, 0xd6, 0xa1, 0x26, 0xf9, 0xa4, 0x29, - 0x20, 0x5b, 0xd0, 0x3c, 0x3d, 0xf3, 0x7a, 0x18, 0x9a, 0x3d, 0xec, 0x6a, - 0x4c, 0xfd, 0xa5, 0x00, 0xdf, 0xec, 0xfd, 0x64, 0x38, 0x66, 0xa7, 0xba, - 0x59, 0xb3, 0x9b, 0x9c, 0x44, 0xfb, 0x10, 0x08, 0xb8, 0x79, 0xea, 0x85, - 0xbf, 0xa4, 0x14, 0xce, 0xce, 0x85, 0x22, 0x3f, 0x16, 0x00, 0x1c, 0x57, - 0xc8, 0x5a, 0x1b, 0xf5, 0xff, 0xde, 0x7e, 0xa9, 0xcc, 0xf3, 0xb5, 0x1d, - 0x57, 0x06, 0xda, 0xbb, 0x6c, 0x0a, 0x1e, 0xd4, 0x09, 0x74, 0x84, 0x1d, - 0xfa, 0xdf, 0x33, 0x1e, 0xe2, 0x8f, 0x10, 0xf7, 0x73, 0xab, 0x71, 0xb8, - 0x64, 0xce, 0xc0, 0x49, 0xc0, 0x36, 0xd3, 0x39, 0x31, 0x4c, 0x12, 0x5b, - 0xf3, 0xf9, 0xb4, 0x2c, 0x88, 0xba, 0xd4, 0x1a, 0xbd, 0x0c, 0x99, 0xbd, - 0x0e, 0xad, 0x51, 0xe0, 0xca, 0xdb, 0x25, 0x66, 0x83, 0xe0, 0x55, 0x18, - 0xeb, 0xa6, 0x4e, 0x56, 0xcb, 0x2f, 0xa5, 0xf2, 0x42, 0x7a, 0xa1, 0x05, - 0xf0, 0x3a, 0x71, 0x5a, 0x78, 0x3a, 0x7a, 0x6d, 0x12, 0x9f, 0x43, 0xc5, - 0xcc, 0xb3, 0xfd, 0xf2, 0xbf, 0x05, 0x16, 0xef, 0x07, 0xf9, 0xde, 0x0d, - 0x51, 0xf0, 0x33, 0x86, 0x43, 0x57, 0x40, 0xbc, 0xa9, 0xbd, 0xa0, 0x23, - 0xff, 0xbb, 0xe6, 0x15, 0xa1, 0xeb, 0xe9, 0x78, 0x0d, 0x72, 0x76, 0xf2, - 0xb6, 0x6e, 0x46, 0xe2, 0x86, 0xab, 0x3c, 0x52, 0x2c, 0xc6, 0x77, 0xdd, - 0x57, 0xf7, 0x4d, 0x36, 0xbb, 0x41, 0x08, 0x21, 0xaa, 0xe6, 0x44, 0x50, - 0xed, 0xaf, 0x18, 0xb3, 0xdd, 0x6b, 0x57, 0x46, 0x9e, 0x44, 0x93, 0x20, - 0xe0, 0x62, 0x95, 0xcd, 0xcf, 0xe4, 0x96, 0x92, 0xc3, 0x0d, 0x16, 0xb2, - 0xc3, 0xf4, 0x0f, 0x3f, 0x87, 0x17, 0xb9, 0x7b, 0x60, 0x60, 0xfa, 0xfb, - 0x81, 0x5c, 0xb3, 0xb7, 0x89, 0x73, 0xf7, 0x35, 0xf7, 0x27, 0xf1, 0x0e, - 0xa4, 0xa1, 0xba, 0xea, 0x6a, 0xe3, 0x5c, 0x0f, 0xf7, 0x15, 0xbc, 0x28, - 0x57, 0x27, 0x8f, 0xd8, 0xca, 0x82, 0x19, 0xd0, 0xa3, 0x9d, 0xe5, 0xe0, - 0x44, 0xbf, 0x78, 0xa4, 0x09, 0x69, 0x27, 0xa0, 0x69, 0xb5, 0xd4, 0xbe, - 0x00, 0xe6, 0x03, 0x97, 0xbc, 0x8b, 0xfc, 0x25, 0x70, 0xb3, 0x49, 0x30, - 0xe3, 0x24, 0x19, 0x77, 0xb4, 0x93, 0x46, 0x03, 0xe6, 0x22, 0xaf, 0x76, - 0xd2, 0x90, 0x00, 0x05, 0x46, 0xb8, 0xa4, 0xf5, 0x4c, 0xaa, 0x04, 0x63, - 0xa0, 0x57, 0xe0, 0x20, 0x6e, 0x1a, 0xed, 0x21, 0x86, 0xd0, 0x38, 0x5b, - 0xe6, 0xa7, 0xb0, 0xe7, 0x75, 0xe3, 0x76, 0xb3, 0x15, 0x8b, 0xdc, 0x10, - 0x52, 0x15, 0x21, 0x7b, 0xd0, 0xc4, 0x75, 0x26, 0x1d, 0x6e, 0x0d, 0x4c, - 0x08, 0x5b, 0x95, 0x9a, 0xd0, 0xda, 0xbe, 0x23, 0x98, 0xde, 0x60, 0x2a, - 0xe9, 0xa4, 0x92, 0xf0, 0x92, 0x84, 0xdc, 0x86, 0x60, 0xf5, 0x23, 0x31, - 0xf5, 0xe9, 0xd6, 0x00, 0xc1, 0x78, 0xab, 0x05, 0x94, 0xd3, 0x47, 0x4d, - 0x32, 0x0f, 0x82, 0xa0, 0x99, 0x0b, 0xfe, 0x6b, 0x58, 0xf9, 0x24, 0xf6, - 0x17, 0xa0, 0x5f, 0x24, 0x6a, 0xc6, 0x01, 0xa8, 0xfa, 0xca, 0xdc, 0xb6, - 0x83, 0xcb, 0xd2, 0x3b, 0xb7, 0x0b, 0x04, 0x3e, 0x6a, 0xaf, 0x23, 0x17, - 0x3e, 0x14, 0xce, 0x52, 0x1c, 0xe3, 0x06, 0x66, 0x29, 0x17, 0x6f, 0x7e, - 0x66, 0x06, 0xa9, 0x68, 0x7f, 0xca, 0xad, 0xa8, 0xb7, 0x2d, 0xa4, 0x5d, - 0xa6, 0x16, 0xcd, 0xed, 0xee, 0x14, 0x96, 0xc8, 0x12, 0x69, 0x4e, 0x70, - 0x72, 0x2a, 0x75, 0x82, 0x08, 0x3f, 0x3e, 0x27, 0xa0, 0xea, 0x43, 0x84, - 0xa9, 0x9a, 0x91, 0x87, 0x4f, 0x20, 0x61, 0x55, 0x8d, 0x70, 0xad, 0x6c, - 0x59, 0x5d, 0x13, 0x80, 0xbb, 0x52, 0x55, 0x81, 0x8b, 0x59, 0x94, 0x0f, - 0xc2, 0x54, 0x79, 0x59, 0xe8, 0x9d, 0x58, 0xe5, 0x91, 0x10, 0xb3, 0xef, - 0x1c, 0xda, 0xaa, 0xdd, 0x91, 0x0b, 0xb0, 0x14, 0x3b, 0xad, 0x02, 0x98, - 0x40, 0x3c, 0x54, 0xc4, 0x23, 0xb9, 0x40, 0x54, 0x7e, 0x88, 0x10, 0x3e, - 0x24, 0xe5, 0xf6, 0xdf, 0x5c, 0x9e, 0x7a, 0x9f, 0xd0, 0xff, 0x5e, 0x9c, - 0xb6, 0x30, 0x17, 0x94, 0xd2, 0xaa, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x96, 0xff, 0x2f, 0x01, 0x60, 0x2c, 0x1b, 0xe3, - 0xc6, 0xcb, 0xa4, 0x41, 0xa1, 0x44, 0x13, 0x14, 0xe2, 0x44, 0x77, 0x1c, - 0x96, 0xe8, 0xe6, 0x4f, 0x70, 0x99, 0x3a, 0xef, 0xa1, 0x6f, 0x1f, 0x7f, - 0xb9, 0xe9, 0x1e, 0x35, 0x37, 0x5b, 0x94, 0x90, 0x78, 0xcc, 0x8d, 0xcd, - 0x6c, 0x9f, 0xf6, 0x73, 0xed, 0x23, 0xa2, 0x28, 0x64, 0x58, 0x50, 0x64, - 0x05, 0xbc, 0xc9, 0x9b, 0x5a, 0xec, 0x3f, 0x2b, 0x61, 0xcf, 0xa7, 0x35, - 0x56, 0x8c, 0x77, 0x68, 0xd6, 0xcf, 0x9b, 0xc5, 0x62, 0xee, 0x3a, 0xb2, - 0xfe, 0x78, 0xba, 0x02, 0xe7, 0x26, 0x8a, 0x89, 0x30, 0x19, 0xcc, 0xb0, - 0x98, 0xbf, 0x30, 0x2c, 0xae, 0x13, 0x6c, 0x93, 0x86, 0x19, 0x84, 0x13, - 0x01, 0x2f, 0x39, 0x4e, 0x33, 0xd1, 0x15, 0x99, 0xf7, 0x1e, 0xb8, 0x86, - 0xdb, 0xb6, 0xf9, 0x56, 0x42, 0x0e, 0x4a, 0xb1, 0x5e, 0xf0, 0x9a, 0x06, - 0x5e, 0xab, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, - 0xcd, 0xde, 0xad, 0x40, 0x34, 0xcd, 0x79, 0x0a, 0x29, 0x84, 0x05, 0x3f, - 0xb5, 0xbe, 0x49, 0x84, 0x43, 0xcc, 0xa6, 0xe3, 0xe9, 0xdc, 0x84, 0x14, - 0xe7, 0xb3, 0x1b, 0x96, 0xe8, 0xda, 0x35, 0x15, 0x38, 0xf5, 0xb3, 0xb5, - 0x91, 0xc3, 0xc3, 0x94, 0xc6, 0x79, 0xeb, 0xf5, 0x22, 0x78, 0xf0, 0x0b, - 0xda, 0xb0, 0x91, 0xa7, 0x43, 0x71, 0x8e, 0xa6, 0x52, 0x0f, 0x81, 0x06, - 0xc8, 0xdf, 0xb5, 0x1f, 0x92, 0xb0, 0xfe, 0x93, 0x38, 0x4c, 0xf4, 0x17, - 0x66, 0x31, 0xea, 0x08, 0x72, 0xb9, 0xaa, 0xfd, 0x40, 0x8d, 0xbf, 0x56, - 0x19, 0xb1, 0xb5, 0x8e, 0x4e, 0x4e, 0x73, 0x7f, 0x4b, 0x0c, 0x70, 0x94, - 0x7c, 0x9f, 0xfc, 0x23, 0x35, 0xba, 0xd2, 0x23, 0x88, 0x1d, 0x83, 0x28, - 0x45, 0xd7, 0x1b, 0x63, 0xfb, 0x36, 0x86, 0x06, 0xf3, 0x99, 0x81, 0x6e, - 0xd7, 0xf1, 0xd4, 0x53, 0x6d, 0x30, 0x3c, 0x8d, 0xac, 0xc6, 0x9a, 0xd5, - 0xe8, 0x4f, 0x11, 0x58, 0xba, 0xfd, 0x67, 0x06, 0xe7, 0x1a, 0xb4, 0xa1, - 0x45, 0x13, 0xf2, 0x3b, 0xdc, 0x71, 0xf0, 0xc6, 0x53, 0xfc, 0x8b, 0x2f, - 0x14, 0xe4, 0xe0, 0xd6, 0x8c, 0x96, 0x4c, 0x48, 0xc0, 0x30, 0x6e, 0x00, - 0x0f, 0x42, 0xfe, 0xa7, 0x9d, 0x0f, 0xf2, 0x52, 0x58, 0xf9, 0x35, 0x33, - 0x99, 0xda, 0xd5, 0x9d, 0x61, 0x26, 0x6b, 0x80, 0xff, 0x08, 0x51, 0x54, - 0x26, 0xfa, 0x8d, 0xfc, 0x67, 0x60, 0x93, 0x0e, 0xcd, 0x78, 0x41, 0x5a, - 0x31, 0x47, 0x14, 0xb0, 0x65, 0x89, 0x30, 0xcb, 0x0c, 0xc5, 0xa0, 0x37, - 0xa8, 0xe0, 0xcf, 0x24, 0xa4, 0x2f, 0xad, 0xa7, 0x9c, 0xa2, 0xe8, 0x81, - 0x17, 0xbe, 0x2f, 0xd5, 0xd1, 0xa8, 0xff, 0x9d, 0x5e, 0x7f, 0xd9, 0x6c, - 0x56, 0xe6, 0xc4, 0x60, 0x8d, 0xa5, 0x47, 0x5e, 0x43, 0x1e, 0x34, 0x23, - 0xb3, 0x6a, 0xdf, 0x6c, 0xf8, 0xd1, 0x85, 0x11, 0xaa, 0x74, 0x85, 0x71, - 0x27, 0xc5, 0x80, 0x37, 0x60, 0xb4, 0x2b, 0x53, 0x5a, 0xc4, 0x35, 0xd1, - 0xe8, 0x4b, 0x01, 0x58, 0x1f, 0xdb, 0x73, 0xf3, 0x2c, 0x8b, 0xbb, 0x17, - 0x36, 0x76, 0x35, 0x6b, 0xa0, 0x82, 0x47, 0xf5, 0x16, 0x21, 0x41, 0x43, - 0xc9, 0x1f, 0x53, 0xf9, 0xe9, 0x47, 0xf0, 0x9c, 0x6d, 0xe3, 0x23, 0x59, - 0x74, 0xdc, 0x1a, 0x8f, 0x4e, 0x6c, 0x71, 0x83, 0x7e, 0xd0, 0x2b, 0x50, - 0x44, 0x86, 0x5f, 0xbf, 0x60, 0x92, 0xeb, 0x9a, 0x9b, 0xa2, 0xc9, 0x2b, - 0xa8, 0xc4, 0x77, 0x4e, 0x3f, 0xf8, 0xa6, 0x39, 0x50, 0x5c, 0x7e, 0x2a, - 0x70, 0xb0, 0x5d, 0x28, 0xb2, 0x81, 0xa9, 0xaf, 0x16, 0x5e, 0x27, 0xeb, - 0x03, 0x0e, 0x82, 0xad, 0x28, 0x51, 0x16, 0xd1, 0xf4, 0x58, 0x75, 0x1a, - 0xf9, 0x6a, 0xbf, 0x73, 0xd7, 0x84, 0x07, 0x7f, 0x4c, 0x4e, 0x29, 0x02, - 0x9b, 0x60, 0x81, 0x85, 0xa9, 0xbf, 0xc7, 0xa0, 0x8f, 0x8a, 0xdc, 0xa4, - 0xc5, 0x17, 0x51, 0x24, 0x15, 0x28, 0x9e, 0x5e, 0x78, 0x84, 0x21, 0x02, - 0xca, 0x26, 0x61, 0x4e, 0x95, 0xa6, 0x8d, 0xa6, 0x98, 0x7d, 0x1f, 0x84, - 0x19, 0x24, 0x8b, 0x31, 0x76, 0x89, 0x2a, 0x5f, 0xa9, 0xfb, 0xaa, 0x8a, - 0x8c, 0xce, 0xe4, 0x30, 0xd6, 0xec, 0x5b, 0x39, 0xb7, 0x09, 0x80, 0x23, - 0x4c, 0xe1, 0x6e, 0x8f, 0x7c, 0x10, 0xe8, 0x8a, 0x60, 0x35, 0xd7, 0xa3, - 0xe0, 0x5f, 0xcd, 0xfa, 0x3d, 0x8f, 0xd8, 0x5d, 0xec, 0xc9, 0xc5, 0xa0, - 0x73, 0x41, 0x89, 0xe5, 0x39, 0xf2, 0x42, 0xff, 0x08, 0xa0, 0x12, 0xb7, - 0x4a, 0x5e, 0x46, 0x06, 0x31, 0xbd, 0x88, 0x5e, 0x9e, 0x05, 0x17, 0x51, - 0xb3, 0xe7, 0x88, 0x10, 0x19, 0x32, 0xff, 0x8a, 0x1e, 0xce, 0x66, 0xbc, - 0x84, 0x1f, 0xed, 0x52, 0x52, 0x77, 0xe1, 0x5e, 0xa6, 0x21, 0xe4, 0xad, - 0x59, 0xca, 0xa3, 0x77, 0xea, 0x66, 0x28, 0x15, 0x73, 0x3a, 0xfd, 0xe4, - 0x75, 0x46, 0x99, 0x59, 0x5c, 0x7a, 0x9b, 0x9d, 0x11, 0xb4, 0x76, 0x45, - 0x06, 0x45, 0x41, 0x1e, 0x94, 0xb7, 0xd9, 0xb8, 0xcb, 0xbf, 0x71, 0xec, - 0xba, 0x9f, 0x4a, 0x1b, 0xbc, 0xfd, 0x5c, 0x06, 0x64, 0xfd, 0x31, 0x52, - 0xc0, 0xe4, 0xa7, 0x21, 0x2f, 0x22, 0x92, 0xf0, 0x51, 0x33, 0x92, 0x1d, - 0x40, 0x3c, 0x01, 0x81, 0x3b, 0xa8, 0x2e, 0x4e, 0xb6, 0x60, 0xcd, 0xd4, - 0x36, 0x3b, 0x2e, 0x1d, 0x5e, 0x43, 0xd9, 0x94, 0xf1, 0x51, 0xd3, 0x59, - 0x94, 0x6a, 0xd5, 0x5f, 0x1f, 0xd3, 0xa6, 0x55, 0xda, 0x15, 0xf1, 0x3e, - 0x2c, 0x60, 0xb8, 0xc3, 0xda, 0x0e, 0x56, 0x53, 0xea, 0xcd, 0x39, 0x27, - 0x94, 0x86, 0x94, 0xb2, 0x5b, 0xd8, 0x9a, 0x12, 0x94, 0xb0, 0xb6, 0x77, - 0x28, 0xba, 0xde, 0xb6, 0x60, 0x4d, 0x2b, 0x6e, 0x3d, 0xf6, 0xf1, 0x48, - 0xf7, 0x77, 0xa1, 0x49, 0xe0, 0x9f, 0x1e, 0xc9, 0xe6, 0xcb, 0x95, 0x26, - 0x61, 0x5a, 0xc9, 0xed, 0x49, 0x40, 0x17, 0x57, 0x15, 0xfc, 0x3c, 0xb8, - 0x28, 0x79, 0xb8, 0x42, 0x2a, 0xf9, 0xd4, 0x19, 0xb9, 0x5f, 0x41, 0xc2, - 0x25, 0xd7, 0x88, 0x34, 0xb3, 0x25, 0x4e, 0xca, 0xff, 0x9e, 0x59, 0x9a, - 0x33, 0xc8, 0x12, 0xf9, 0xd5, 0x70, 0xc0, 0x8b, 0x43, 0x13, 0xc4, 0x8d, - 0x45, 0x99, 0xaa, 0xd7, 0xeb, 0xb1, 0xe9, 0xb7, 0x5b, 0xab, 0x48, 0xd1, - 0x26, 0x60, 0x8c, 0x13, 0x55, 0x8a, 0x41, 0xd3, 0x68, 0x58, 0xd4, 0xa6, - 0x30, 0x6e, 0x88, 0x3e, 0x81, 0x6e, 0x61, 0x06, 0x13, 0x66, 0xd5, 0x8e, - 0x5d, 0x87, 0x4f, 0xd9, 0xb1, 0x66, 0xb3, 0xc5, 0x88, 0xa9, 0xc0, 0x73, - 0xcb, 0x7f, 0x42, 0xec, 0x96, 0x64, 0xad, 0x72, 0x85, 0x72, 0xaf, 0xeb, - 0xa9, 0xc4, 0x17, 0x86, 0xab, 0xe7, 0x23, 0xd7, 0x96, 0xf7, 0xb2, 0xb3, - 0x51, 0xe1, 0x9a, 0x3b, 0x0e, 0xaf, 0x89, 0xca, 0x7b, 0xf1, 0x70, 0x7b, - 0xc7, 0x82, 0xfc, 0xc7, 0x6c, 0x37, 0xd9, 0x7b, 0x82, 0x0f, 0x94, 0xcf, - 0xd1, 0xa9, 0x33, 0xc2, 0xa4, 0xab, 0xed, 0xad, 0xee, 0x64, 0x5d, 0x04, - 0xf2, 0xcb, 0x8e, 0x99, 0x22, 0x33, 0x69, 0x85, 0x85, 0xb6, 0x1a, 0x9b, - 0x09, 0x18, 0xbe, 0xcd, 0x63, 0xf6, 0x5d, 0x52, 0xbc, 0x26, 0x99, 0x3e, - 0x52, 0xe5, 0x0c, 0xc5, 0xee, 0xdd, 0xbb, 0x07, 0xbc, 0x38, 0xc1, 0x67, - 0x96, 0x8c, 0xe6, 0xe4, 0x18, 0xfa, 0x07, 0x91, 0x48, 0xef, 0x9c, 0x70, - 0x9d, 0x5b, 0x1c, 0x0e, 0xd5, 0xd3, 0x59, 0xee, 0x44, 0x13, 0xf7, 0x00, - 0xa6, 0x20, 0xad, 0x65, 0x1d, 0xb7, 0x96, 0x2f, 0x79, 0x7b, 0x04, 0xa3, - 0x10, 0x90, 0x29, 0x8c, 0xa3, 0x2e, 0x14, 0x39, 0xd3, 0xe4, 0x6e, 0x46, - 0xf7, 0x6e, 0x96, 0x68, 0xd9, 0xef, 0x45, 0xf7, 0x3c, 0xcd, 0xc7, 0xca, - 0x33, 0x64, 0x8e, 0x31, 0x80, 0x48, 0x7b, 0x7c, 0x81, 0x9a, 0x48, 0xff, - 0xd5, 0x0d, 0x74, 0xe7, 0x77, 0x46, 0x61, 0x9b, 0xde, 0xed, 0x83, 0xe9, - 0x4f, 0x92, 0xc1, 0x16, 0xad, 0x44, 0x40, 0x23, 0xce, 0x04, 0x31, 0xbf, - 0xcf, 0xe2, 0x5a, 0x68, 0x5a, 0xf4, 0x0f, 0xe1, 0x87, 0x79, 0xb0, 0x32, - 0x0b, 0x09, 0x6b, 0x72, 0x2b, 0x16, 0x06, 0x67, 0x82, 0x0b, 0x92, 0x35, - 0xdb, 0x4c, 0xe2, 0x4a, 0x60, 0x99, 0xaf, 0x52, 0x10, 0x4b, 0xa5, 0xcf, - 0xac, 0x66, 0x49, 0x56, 0x04, 0xc0, 0xd6, 0x6f, 0x62, 0x53, 0x6f, 0xcb, - 0x62, 0xe9, 0xa5, 0xca, 0x18, 0x8e, 0x86, 0x3f, 0x36, 0xfd, 0xea, 0x55, - 0x16, 0x6d, 0x6c, 0x6a, 0x8f, 0xa7, 0x9c, 0x70, 0x15, 0xd7, 0xf4, 0x57, - 0x68, 0x04, 0x84, 0x60, 0x3b, 0xb0, 0x32, 0xc4, 0xea, 0x9d, 0x70, 0xb9, - 0xa6, 0x34, 0xe5, 0xfa, 0xa1, 0x24, 0x54, 0x7f, 0xef, 0xac, 0xb4, 0x5f, - 0xa0, 0xc0, 0x40, 0x3f, 0x73, 0xdf, 0x56, 0xa6, 0xd9, 0x17, 0xf4, 0xff, - 0x50, 0xae, 0x21, 0x0d, 0x5a, 0xe0, 0xb0, 0xf9, 0x5b, 0x7a, 0x61, 0x6e, - 0xa6, 0x85, 0x85, 0xbf, 0x19, 0x03, 0xe2, 0x74, 0x1f, 0x03, 0x70, 0x76, - 0x3c, 0xed, 0x02, 0x7d, 0xfa, 0xf9, 0x1e, 0x17, 0xdd, 0x42, 0x30, 0xf0, - 0x32, 0x47, 0x46, 0xae, 0xf5, 0x64, 0xe6, 0x5e, 0x2b, 0x40, 0x86, 0x97, - 0xb1, 0x24, 0x52, 0x69, 0x67, 0x79, 0x8e, 0x0d, 0xcc, 0x07, 0xcb, 0x72, - 0x29, 0xe9, 0xba, 0x2d, 0xf7, 0xcb, 0xe3, 0x86, 0x06, 0xaa, 0x6d, 0x79, - 0xf8, 0xb6, 0x93, 0x0a, 0x9c, 0x97, 0xef, 0x47, 0x37, 0x13, 0x2e, 0x6b, - 0xfd, 0x59, 0x0c, 0xc9, 0x5e, 0x5e, 0xcd, 0x71, 0x6f, 0x99, 0x0d, 0x88, - 0x9d, 0xbb, 0x7c, 0x2b, 0x22, 0xd5, 0xbe, 0xee, 0x26, 0x1c, 0xe1, 0xad, - 0xc8, 0x4d, 0x5f, 0x6b, 0xd1, 0xf4, 0x30, 0x4d, 0x46, 0x1d, 0x54, 0x11, - 0x4b, 0xa0, 0x7f, 0x94, 0x71, 0xc0, 0x44, 0x4a, 0x42, 0x11, 0xf5, 0x89, - 0xec, 0xb5, 0x24, 0x45, 0xf1, 0xf0, 0x30, 0x54, 0xf8, 0x62, 0xdb, 0x58, - 0x3d, 0x7c, 0x2a, 0x82, 0xe5, 0xbe, 0x13, 0xcf, 0xdc, 0x88, 0xfb, 0xd3, - 0x1e, 0x4d, 0xa5, 0x3e, 0xad, 0x95, 0xa2, 0xe6, 0x48, 0x73, 0xb2, 0xbe, - 0x96, 0xef, 0x8e, 0x0b, 0x28, 0xf9, 0xbe, 0x2a, 0xd6, 0x68, 0x9e, 0x9c, - 0x7b, 0x5a, 0xaf, 0x20, 0xf6, 0xa5, 0x3f, 0x99, 0x61, 0x57, 0xe8, 0x1c, - 0xb2, 0xc3, 0xd0, 0x7f, 0x2c, 0xb5, 0xe9, 0x66, 0x8e, 0x88, 0xec, 0x13, - 0x51, 0xbc, 0x8e, 0xb6, 0xe2, 0x91, 0xbf, 0x5e, 0x8c, 0x1c, 0xdd, 0x0e, - 0x0a, 0x13, 0x06, 0xc6, 0x62, 0x1c, 0x41, 0x8d, 0xa1, 0xc0, 0xf2, 0xfa, - 0x76, 0x35, 0xaa, 0x77, 0x06, 0x3f, 0x76, 0x50, 0xf6, 0x43, 0xf2, 0x25, - 0x00, 0x79, 0xde, 0xca, 0xa1, 0x06, 0x6f, 0xb4, 0x17, 0x4b, 0x99, 0x5a, - 0x00, 0x32, 0xd6, 0xb0, 0x1f, 0x80, 0x53, 0x16, 0xaa, 0x87, 0x72, 0xa2, - 0x34, 0xaf, 0x90, 0x3d, 0x60, 0xde, 0x0e, 0x6d, 0x83, 0xda, 0xb2, 0x11, - 0x2f, 0x39, 0xdc, 0x1a, 0xfe, 0x51, 0x74, 0x10, 0x3c, 0x41, 0xd5, 0x41, - 0x65, 0x4a, 0xa0, 0x11, 0xde, 0x95, 0x34, 0xef, 0xa0, 0xc9, 0xa8, 0xd3, - 0xcb, 0xb9, 0x7d, 0x51, 0x7d, 0xff, 0x26, 0x88, 0xd8, 0x29, 0x0e, 0xa0, - 0xd4, 0xa7, 0x07, 0x33, 0xe7, 0x7d, 0x59, 0x9f, 0x35, 0xc1, 0xb5, 0xf7, - 0x78, 0x78, 0x84, 0xf0, 0x20, 0x41, 0x3f, 0x02, 0x7d, 0x41, 0x90, 0x01, - 0x8d, 0xa4, 0xd8, 0xd7, 0xeb, 0x56, 0x7f, 0x38, 0xbc, 0x1e, 0x15, 0xdf, - 0xfc, 0x34, 0xe7, 0x99, 0xd4, 0x92, 0xd5, 0xf3, 0x9e, 0x16, 0x0b, 0x5c, - 0xeb, 0xb6, 0x78, 0xac, 0x84, 0x06, 0x8e, 0xfe, 0xd0, 0x7c, 0xce, 0x4a, - 0x43, 0x49, 0x3b, 0xe1, 0xab, 0x57, 0xc0, 0x12, 0xd6, 0x9d, 0xa4, 0xee, - 0x91, 0x10, 0x81, 0xe2, 0xfc, 0x02, 0x26, 0x7a, 0xca, 0x81, 0x5b, 0x2f, - 0x34, 0x51, 0xdd, 0x25, 0x4d, 0xc8, 0xf9, 0x3e, 0x59, 0x0f, 0x3d, 0x64, - 0x51, 0xbf, 0x42, 0xc4, 0x92, 0x9d, 0x8f, 0x39, 0x8a, 0x31, 0x09, 0x24, - 0x19, 0x44, 0xc0, 0xf4, 0xea, 0xca, 0x59, 0xcb, 0x86, 0x6c, 0x02, 0x7a, - 0xe5, 0x30, 0x79, 0xe2, 0x2c, 0x76, 0x08, 0x8f, 0x98, 0x0d, 0x4d, 0x12, - 0xc3, 0x98, 0xb4, 0x24, 0x04, 0x4f, 0x51, 0xec, 0x4e, 0xec, 0xbd, 0x8c, - 0xc4, 0x79, 0x51, 0x7f, 0xe1, 0xce, 0x76, 0x28, 0x0b, 0x7b, 0xc5, 0x3f, - 0x5b, 0x48, 0x19, 0x76, 0x68, 0x31, 0x8e, 0x28, 0xff, 0x18, 0x24, 0xe3, - 0x91, 0xe7, 0x49, 0x0d, 0x10, 0xbd, 0x00, 0xc6, 0x58, 0xfd, 0xb6, 0x88, - 0x63, 0xbd, 0xb4, 0x4b, 0xb8, 0xed, 0xdd, 0xb7, 0x53, 0xce, 0x89, 0xdb, - 0x7f, 0xf4, 0xc3, 0x21, 0x31, 0xad, 0x20, 0x78, 0x06, 0x71, 0xaf, 0xc0, - 0xe3, 0xdc, 0xb8, 0xf4, 0x80, 0xc8, 0x33, 0x1d, 0x8b, 0xff, 0x5a, 0x92, - 0x68, 0x4d, 0xc1, 0x5b, 0x58, 0x3e, 0xf6, 0x7f, 0xba, 0x42, 0xa5, 0x6d, - 0xec, 0x03, 0x36, 0xc9, 0x3f, 0x83, 0x1f, 0x0c, 0x33, 0x57, 0x6a, 0x43, - 0x5f, 0x11, 0x72, 0x19, 0x2c, 0xda, 0x71, 0x58, 0xf2, 0x50, 0x50, 0x06, - 0x97, 0xd0, 0xdf, 0xd1, 0x4f, 0x0b, 0x00, 0x1a, 0xea, 0x85, 0x3b, 0x37, - 0x2f, 0xf0, 0x40, 0x52, 0xd9, 0x2a, 0xe8, 0x54, 0xa5, 0xee, 0x0f, 0x49, - 0x74, 0x39, 0x96, 0x5d, 0x60, 0x8f, 0x14, 0x59, 0x86, 0x59, 0x86, 0xfb, - 0x67, 0x71, 0x5c, 0x26, 0x5f, 0xe9, 0xab, 0x32, 0x77, 0x83, 0xdf, 0x02, - 0x19, 0x85, 0xae, 0x4d, 0x7d, 0x9c, 0x8d, 0x4f, 0x61, 0x05, 0x3c, 0x0c, - 0xc6, 0x74, 0x9e, 0x36, 0x33, 0xb8, 0x14, 0x85, 0xab, 0xa2, 0x0b, 0x5d, - 0x22, 0xf2, 0x50, 0x3e, 0xa4, 0x88, 0xac, 0x67, 0xf9, 0x06, 0xe5, 0x30, - 0x8e, 0xf9, 0x67, 0x34, 0xd5, 0x94, 0x5b, 0x35, 0xb7, 0x3d, 0x39, 0x5f, - 0x4e, 0xae, 0xfe, 0xf7, 0x57, 0xd3, 0x95, 0x7b, 0x0a, 0xd9, 0x92, 0x4a, - 0x66, 0x29, 0xa0, 0x18, 0x35, 0x54, 0x14, 0x44, 0x79, 0x72, 0xc3, 0xbc, - 0xa8, 0x1a, 0xd3, 0xa3, 0xbe, 0x6f, 0x9e, 0xcc, 0x68, 0xb6, 0x5f, 0xd4, - 0x42, 0xab, 0xe8, 0x09, 0x60, 0x57, 0x2e, 0xb2, 0x9a, 0x5b, 0x62, 0x38, - 0xfb, 0x0a, 0x35, 0x9c, 0x4f, 0xf7, 0xe0, 0xd2, 0x06, 0x04, 0x1f, 0x79, - 0x7f, 0xa7, 0x7b, 0xd3, 0x63, 0xc9, 0xbd, 0x16, 0x58, 0x38, 0x7b, 0xaa, - 0x08, 0xf3, 0x14, 0x6c, 0x25, 0xf8, 0xa5, 0xe9, 0x4b, 0x45, 0x34, 0x89, - 0x76, 0x74, 0xcb, 0x41, 0x9c, 0x2a, 0xd9, 0xca, 0xb3, 0x12, 0x46, 0x6d, - 0x85, 0x4d, 0x63, 0x2d, 0x24, 0x1b, 0x19, 0x6b, 0x3f, 0x61, 0x6b, 0x4b, - 0x15, 0x83, 0x2d, 0x8f, 0x61, 0xab, 0xd1, 0x55, 0x93, 0x4e, 0x26, 0xd6, - 0x7a, 0x0a, 0x8a, 0xff, 0x58, 0x44, 0xf7, 0x39, 0x31, 0x1a, 0xab, 0xa6, - 0x98, 0x31, 0x41, 0x03, 0xb6, 0xc9, 0xf5, 0x50, 0xe3, 0x7b, 0xc0, 0x59, - 0x74, 0x60, 0x91, 0xb4, 0x79, 0x02, 0x25, 0xc1, 0xb5, 0xbd, 0xcb, 0x6e, - 0x40, 0x61, 0xfe, 0x68, 0x29, 0x83, 0x1b, 0xd2, 0x49, 0xe1, 0x31, 0xde, - 0xdd, 0x53, 0xb0, 0xb8, 0x96, 0xa2, 0xce, 0xea, 0x8b, 0x66, 0x2c, 0x5a, - 0x80, 0x51, 0x0b, 0xc1, 0x2d, 0x9a, 0xfa, 0x9d, 0xc6, 0xcc, 0x2b, 0xbb, - 0xaa, 0xce, 0x98, 0xaa, 0x26, 0x15, 0x8f, 0x4a, 0xe7, 0xdb, 0x17, 0x6c, - 0xe5, 0x58, 0xc9, 0xae, 0xe4, 0x9c, 0x1d, 0xab, 0x59, 0x84, 0x3e, 0x27, - 0x76, 0x03, 0xe3, 0x82, 0x64, 0x6f, 0x6e, 0x6f, 0x63, 0xd2, 0x12, 0x84, - 0xe3, 0x9b, 0x9d, 0x7e, 0x53, 0x1a, 0x54, 0x8d, 0xc1, 0xf0, 0x94, 0xae, - 0xad, 0x8f, 0x6a, 0x12, 0x4e, 0xa7, 0x30, 0xdb, 0x55, 0xbe, 0x09, 0xe2, - 0x56, 0x08, 0xc4, 0x3a, 0xb0, 0x55, 0xb0, 0x24, 0x96, 0xa6, 0x3e, 0x28, - 0xd0, 0x35, 0xfb, 0x58, 0x47, 0xba, 0x2d, 0x51, 0xbb, 0x72, 0x20, 0x59, - 0xd2, 0xdd, 0x9c, 0xe2, 0xb5, 0x31, 0x90, 0xac, 0x74, 0x5d, 0x9f, 0x3d, - 0x8c, 0x1c, 0x96, 0xc0, 0x60, 0x61, 0xa8, 0xbb, 0x3c, 0xb3, 0x6d, 0x6d, - 0x92, 0x4a, 0xca, 0xbb, 0x60, 0x5e, 0x82, 0x0d, 0x7f, 0xab, 0x4b, 0x36, - 0x4c, 0x93, 0x0d, 0x88, 0x71, 0xaf, 0xb6, 0x53, 0xb0, 0x38, 0xb4, 0x1c, - 0xb4, 0x7b, 0xd4, 0x13, 0x32, 0x6c, 0xe4, 0xee, 0x6a, 0xb3, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x88, 0x83, 0x91, 0x4c, - 0x2e, 0x1e, 0xbe, 0xa4, 0xb5, 0x96, 0xff, 0x67, 0x50, 0xe9, 0x81, 0x0e, - 0x5d, 0x0e, 0xad, 0xc4, 0x1f, 0xeb, 0x98, 0x38, 0xcc, 0x54, 0x9d, 0x27, - 0xa6, 0xf1, 0x37, 0x23, 0xce, 0xb4, 0x5b, 0xff, 0x12, 0xb1, 0xb8, 0x35, - 0x5e, 0x03, 0x02, 0x04, 0xad, 0xa6, 0x6f, 0x43, 0xfc, 0xe4, 0xbe, 0x0c, - 0xe0, 0x93, 0xd5, 0xef, 0x09, 0xfa, 0x04, 0xe9, 0x5a, 0x22, 0xd4, 0x81, - 0xc1, 0x27, 0x4f, 0x5f, 0x6e, 0x83, 0x5a, 0x8a, 0x2d, 0xbb, 0x8f, 0xa4, - 0x91, 0xcc, 0x82, 0x37, 0x3b, 0x14, 0x98, 0x58, 0x86, 0x44, 0xb7, 0xa9, - 0x58, 0xf3, 0x3d, 0x49, 0x71, 0x7a, 0x37, 0xcd, 0xc5, 0xb9, 0xc9, 0x46, - 0xd5, 0xd4, 0x17, 0x60, 0x1a, 0xbf, 0x93, 0xa9, 0xe9, 0x08, 0x25, 0x40, - 0xd1, 0x65, 0xae, 0xdd, 0x85, 0xa6, 0xcc, 0x06, 0xca, 0x91, 0xe1, 0x63, - 0xf9, 0x6b, 0x15, 0xa8, 0x04, 0x61, 0xd2, 0xa6, 0x59, 0x21, 0x1a, 0x1c, - 0xc9, 0xa9, 0xa9, 0xc8, 0x54, 0x86, 0xac, 0xa5, 0xd6, 0x95, 0x39, 0x83, - 0x4b, 0x6b, 0x69, 0xa6, 0x94, 0xd8, 0xc0, 0xfb, 0x66, 0x0f, 0x3a, 0xbe, - 0xc7, 0xf3, 0xcc, 0xd5, 0xb7, 0x1b, 0x60, 0x02, 0x95, 0x45, 0x4a, 0x12, - 0xc9, 0xfe, 0x75, 0x7c, 0x1b, 0xb2, 0x86, 0x96, 0x28, 0x07, 0xa2, 0x18, - 0x7a, 0x6c, 0x90, 0x6f, 0x32, 0x0c, 0xc8, 0x34, 0xbc, 0x75, 0x4d, 0x96, - 0x03, 0xa6, 0x0f, 0x3d, 0x35, 0x1b, 0x64, 0x76, 0x95, 0x55, 0xff, 0x25, - 0xd4, 0x71, 0xcf, 0x8a, 0x73, 0x6d, 0x9b, 0x74, 0xfe, 0xff, 0x9e, 0x31, - 0x9e, 0x5e, 0x89, 0x5a, 0x1a, 0xeb, 0x8d, 0x06, 0x3b, 0xf2, 0xf6, 0x06, - 0x5d, 0xc3, 0xba, 0x04, 0xca, 0x0f, 0x07, 0x2c, 0xbd, 0x54, 0x52, 0xd9, - 0x1c, 0x2f, 0x0e, 0x13, 0x5e, 0x25, 0x13, 0xe5, 0xd7, 0x8e, 0x19, 0x42, - 0x1b, 0x52, 0x2e, 0xd2, 0x8f, 0xc5, 0x8e, 0x1c, 0x34, 0x2e, 0x4d, 0xd5, - 0x51, 0x7d, 0x91, 0x64, 0xbc, 0xb4, 0x0d, 0xc9, 0xe7, 0x1c, 0x6c, 0x47, - 0xe9, 0xbb, 0x67, 0x9a, 0x96, 0xde, 0xad, 0xff, 0xba, 0x35, 0x25, 0x6d, - 0x57, 0xa1, 0x93, 0xfe, 0xe2, 0x8d, 0x02, 0xeb, 0xf0, 0x2f, 0x54, 0xfd, - 0x46, 0xc0, 0x8f, 0xea, 0x32, 0x7b, 0x57, 0xda, 0xe0, 0x29, 0x1c, 0x19, - 0xba, 0xa4, 0xa6, 0x1c, 0x6e, 0xeb, 0x7a, 0xa8, 0x8a, 0xe1, 0xc6, 0x12, - 0xf5, 0xa3, 0x24, 0x1a, 0x96, 0xe1, 0x02, 0xc0, 0xf4, 0x7d, 0x14, 0x72, - 0xd6, 0x12, 0x8e, 0x6c, 0x8c, 0xd2, 0xfd, 0x88, 0x78, 0x48, 0xf3, 0x74, - 0x38, 0x86, 0x04, 0x68, 0x6d, 0x7c, 0xf4, 0x4c, 0x40, 0x17, 0xf6, 0x8f, - 0xb2, 0x6c, 0xd7, 0x66, 0x66, 0x3b, 0x38, 0xa1, 0xbb, 0x1e, 0xff, 0x72, - 0x1f, 0x64, 0x56, 0xc2, 0x53, 0x1c, 0x6f, 0x84, 0x2b, 0xbd, 0x23, 0xd9, - 0xb4, 0x6b, 0x87, 0x79, 0x99, 0xec, 0x81, 0x8d, 0x1a, 0x58, 0x00, 0xf0, - 0x2c, 0xc1, 0xc4, 0x57, 0x74, 0x0f, 0xce, 0x32, 0xe2, 0x5e, 0xae, 0x02, - 0x1c, 0xe8, 0x94, 0xc6, 0x44, 0xaa, 0x7b, 0x9a, 0x32, 0xb5, 0x33, 0xac, - 0xfc, 0x41, 0x65, 0xf2, 0xca, 0xcc, 0xc6, 0x74, 0x36, 0xb2, 0xc9, 0x0e, - 0x26, 0x73, 0xae, 0x68, 0x98, 0xa4, 0x36, 0xe8, 0x98, 0x39, 0xad, 0x05, - 0x3f, 0xca, 0x12, 0xcc, 0x86, 0xfd, 0xc6, 0x57, 0xf0, 0x02, 0x4e, 0x45, - 0xcb, 0x54, 0x34, 0xdd, 0x66, 0x26, 0xab, 0xda, 0x95, 0xa5, 0x85, 0xec, - 0x02, 0x03, 0xb6, 0x29, 0x30, 0x11, 0x40, 0x54, 0x9a, 0x6a, 0x87, 0x2e, - 0x97, 0xa1, 0x7e, 0xeb, 0x34, 0x39, 0x78, 0x3b, 0xbc, 0x5f, 0x8e, 0xc5, - 0x0e, 0x21, 0x29, 0x4b, 0xb7, 0x1b, 0xe7, 0x14, 0x08, 0x34, 0xb7, 0x9a, - 0x0a, 0xb2, 0x6c, 0x25, 0x76, 0xb5, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xe2, 0x7d, 0x48, 0xdd, 0x1a, 0xcb, 0xb6, 0x5c, - 0x6f, 0xbe, 0x32, 0x9d, 0xd2, 0x2b, 0x9e, 0x10, 0x65, 0xd7, 0x1e, 0xec, - 0xc8, 0xb5, 0x10, 0x64, 0x8f, 0x5d, 0xef, 0xfe, 0x9b, 0x6c, 0x9b, 0x02, - 0x6a, 0x6d, 0xf7, 0x98, 0x7b, 0xf7, 0x17, 0xfd, 0x49, 0x1b, 0x6a, 0xc5, - 0x3c, 0xa0, 0xfc, 0xa8, 0x94, 0x95, 0xed, 0x48, 0x81, 0x04, 0x53, 0x8c, - 0xbe, 0xe4, 0x4e, 0xaf, 0xc1, 0x9d, 0xc3, 0xdf, 0xc2, 0xb5, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xae, 0xb0, 0x67, 0x5b, - 0x99, 0x26, 0x07, 0xfb, 0x6c, 0x98, 0xfe, 0xbb, 0x35, 0xf1, 0x5b, 0x02, - 0xc6, 0x03, 0xfc, 0x97, 0x21, 0x16, 0x8d, 0x48, 0xd4, 0x4f, 0x03, 0xd9, - 0x7c, 0x9f, 0xa6, 0x1e, 0x6f, 0x5a, 0x58, 0x17, 0x6d, 0x26, 0xb4, 0xc5, - 0x4c, 0xe5, 0x93, 0x0a, 0x9c, 0xb2, 0x40, 0xbc, 0x60, 0xc7, 0x2b, 0xdb, - 0x3b, 0xc0, 0x3c, 0x5c, 0x44, 0x4b, 0xdd, 0x58, 0xbe, 0xdc, 0xc5, 0xb5, - 0x6a, 0xf9, 0x5e, 0x73, 0x07, 0x58, 0x8f, 0x45, 0x7b, 0xac, 0xba, 0x82, - 0x96, 0x49, 0x4d, 0x22, 0x70, 0x7a, 0x3d, 0x69, 0x26, 0x8b, 0x88, 0x13, - 0xf1, 0x8d, 0xfc, 0xdf, 0x73, 0xd5, 0x20, 0x3c, 0x52, 0x92, 0x16, 0xb1, - 0x6e, 0xb7, 0x41, 0xbe, 0x23, 0x9b, 0x51, 0xf7, 0xc9, 0x38, 0x8a, 0xc7, - 0x6e, 0x68, 0x82, 0xd1, 0x59, 0x50, 0x09, 0x4b, 0x44, 0x3b, 0x28, 0x06, - 0x60, 0x75, 0x7a, 0xe5, 0xa1, 0x36, 0xbb, 0x62, 0x44, 0xe3, 0xd0, 0x68, - 0x14, 0xea, 0xad, 0xf9, 0x18, 0xcc, 0xd5, 0x42, 0x5d, 0x18, 0x53, 0xe6, - 0x4a, 0xfe, 0xde, 0x32, 0xe1, 0xe7, 0xf8, 0x8c, 0x9d, 0x35, 0xf4, 0x4a, - 0xcb, 0x23, 0x2f, 0x91, 0xb5, 0xb0, 0xb2, 0x01, 0x5c, 0x22, 0x8c, 0x42, - 0x42, 0xd5, 0xf0, 0x82, 0x6f, 0x9f, 0x64, 0xe5, 0x99, 0x4d, 0x36, 0x0b, - 0xfc, 0x78, 0x38, 0x30, 0x47, 0x8f, 0x0b, 0x57, 0x86, 0x4f, 0x1b, 0xc9, - 0x05, 0x0e, 0x08, 0xc4, 0xf4, 0xab, 0x9e, 0x90, 0xb4, 0x4f, 0x36, 0x54, - 0xe8, 0xa1, 0x3f, 0x90, 0xd2, 0xf3, 0xb4, 0xb4, 0xdd, 0xf3, 0x43, 0x2f, - 0xc4, 0x43, 0xbb, 0x99, 0x8e, 0xb8, 0x61, 0x59, 0x5e, 0xfa, 0x1b, 0x3c, - 0xc1, 0xeb, 0x9d, 0x35, 0x62, 0x34, 0x82, 0x45, 0xef, 0x41, 0xe9, 0xfc, - 0x35, 0xae, 0xb4, 0x0b, 0xce, 0x52, 0x5b, 0x40, 0x7d, 0xdd, 0x86, 0x83, - 0x52, 0x74, 0x77, 0x11, 0xc2, 0x9b, 0x8c, 0xa3, 0x63, 0xc2, 0x2d, 0xdd, - 0x8c, 0x76, 0x13, 0xc5, 0xc0, 0xde, 0x3e, 0x6b, 0xe1, 0x0f, 0xeb, 0x0f, - 0x0a, 0x25, 0x41, 0x2f, 0x8b, 0x4a, 0x98, 0x30, 0xcb, 0x1a, 0x43, 0xa3, - 0xc1, 0xcc, 0x44, 0x9a, 0x6c, 0xdc, 0x92, 0x40, 0xc4, 0x7a, 0x1f, 0x8a, - 0x6f, 0x74, 0xf3, 0xf5, 0x52, 0x72, 0xf7, 0x81, 0x6e, 0x74, 0x75, 0xe6, - 0xea, 0xd9, 0x57, 0x91, 0xae, 0xf2, 0x3f, 0x35, 0x4b, 0x99, 0xd9, 0x3f, - 0x85, 0xe0, 0x92, 0xaa, 0x35, 0xac, 0x28, 0xbf, 0x43, 0xb8, 0xad, 0xc7, - 0xc5, 0xf6, 0x15, 0x2f, 0x7c, 0xfb, 0x34, 0x48, 0xf3, 0x04, 0x12, 0xf4, - 0x2f, 0x92, 0x74, 0xc8, 0xea, 0xbc, 0x24, 0x6e, 0x3b, 0x0e, 0x9e, 0xf0, - 0xaf, 0x02, 0x97, 0x95, 0xbc, 0x90, 0x7f, 0xc4, 0xf8, 0xe2, 0x04, 0x9a, - 0x8f, 0xfc, 0xbc, 0x50, 0xfe, 0xf7, 0x89, 0x17, 0x2c, 0xdb, 0xd6, 0x5e, - 0xbf, 0xd9, 0x8e, 0x89, 0x8b, 0x06, 0x1d, 0x0b, 0x81, 0x2a, 0x55, 0x5c, - 0x5f, 0xb6, 0xa6, 0xa5, 0xd2, 0xaa, 0x79, 0x9c, 0x39, 0x31, 0x76, 0x03, - 0x98, 0x42, 0xd6, 0xb7, 0x37, 0x1f, 0xc8, 0x51, 0x8a, 0x1c, 0x5d, 0xcd, - 0x9c, 0x78, 0xa4, 0x22, 0x6e, 0x12, 0x10, 0x0a, 0x33, 0xc9, 0xe0, 0xfe, - 0xfc, 0xe8, 0x15, 0xe7, 0xef, 0xd8, 0x6d, 0xc7, 0xc9, 0xc2, 0x8e, 0x18, - 0x82, 0x2f, 0xa6, 0x09, 0x8a, 0xdc, 0x41, 0x6b, 0x89, 0xea, 0xd9, 0xd6, - 0x96, 0xfd, 0xba, 0x6e, 0xae, 0x2d, 0x0c, 0xf9, 0x3c, 0x4c, 0x1a, 0xfa, - 0x98, 0x83, 0x51, 0x45, 0x9d, 0x1e, 0xa5, 0xc1, 0x81, 0x54, 0x37, 0x5d, - 0x28, 0xca, 0xa6, 0xfe, 0x48, 0xf4, 0x77, 0x17, 0x92, 0x1d, 0x0c, 0xb3, - 0x39, 0x77, 0x22, 0xd9, 0xc7, 0xc2, 0xaf, 0x70, 0x0a, 0xd3, 0xa6, 0x57, - 0x69, 0xfb, 0xb9, 0xe0, 0xc4, 0x73, 0x7a, 0x68, 0xee, 0x27, 0x6e, 0x3a, - 0x6e, 0xae, 0x32, 0xf6, 0x09, 0xb3, 0x0b, 0x40, 0x72, 0xc6, 0x26, 0x6e, - 0xc5, 0x88, 0x6b, 0xce, 0x99, 0x88, 0x60, 0x6f, 0x6e, 0xa9, 0xe6, 0xd7, - 0x35, 0x5e, 0x3b, 0x36, 0x0d, 0x14, 0xb8, 0x2f, 0xde, 0x67, 0xc8, 0x2e, - 0x52, 0xc1, 0xf1, 0x58, 0x87, 0x32, 0x2a, 0x52, 0x21, 0x27, 0x1e, 0x04, - 0xed, 0xc4, 0x82, 0xd7, 0xeb, 0x85, 0x12, 0x3e, 0xea, 0xd0, 0x07, 0xa0, - 0x80, 0x48, 0xe9, 0xbd, 0x9b, 0x3a, 0x8e, 0x8b, 0xa0, 0xfc, 0x07, 0xf0, - 0x69, 0x4e, 0xc7, 0x1d, 0xd9, 0x9a, 0x73, 0x18, 0x63, 0xb8, 0xe6, 0x4a, - 0xa0, 0x81, 0xf0, 0xdb, 0xb9, 0x88, 0xf4, 0x2b, 0x1f, 0x0d, 0xda, 0x31, - 0xc0, 0xb0, 0x55, 0x79, 0x56, 0x48, 0x22, 0xbb, 0x49, 0x7f, 0xb1, 0xf1, - 0xf6, 0x6f, 0x42, 0xd3, 0xba, 0x68, 0x3a, 0x8f, 0xe7, 0xac, 0x53, 0x30, - 0x96, 0xec, 0x51, 0x7d, 0xfc, 0xc0, 0x35, 0xe9, 0x59, 0xe7, 0x0e, 0xed, - 0x29, 0x46, 0x50, 0x3c, 0x4b, 0x36, 0xc6, 0x2a, 0xaa, 0x3b, 0xbe, 0xce, - 0xd3, 0xda, 0x4d, 0x65, 0xb0, 0xe8, 0x52, 0x68, 0xf0, 0x23, 0xde, 0x02, - 0x77, 0xb3, 0xcc, 0xce, 0x78, 0xdd, 0x8c, 0xf8, 0xbe, 0x5d, 0x0d, 0xa9, - 0xb6, 0x96, 0x85, 0xbf, 0x92, 0x2a, 0x6b, 0x1b, 0xe8, 0x76, 0x05, 0x13, - 0x30, 0xd8, 0x3d, 0x80, 0xaa, 0xa2, 0xa3, 0xbc, 0x07, 0xba, 0x9c, 0x75, - 0x5b, 0x42, 0x03, 0xd8, 0xde, 0x42, 0x44, 0xf7, 0x29, 0x43, 0x29, 0x0d, - 0x48, 0x2b, 0x02, 0xd0, 0xcc, 0xe9, 0x17, 0x47, 0x23, 0x73, 0x6d, 0xc5, - 0x91, 0x6d, 0x4e, 0xc5, 0xcf, 0xc3, 0x58, 0xaf, 0x6e, 0xa2, 0x9e, 0xe7, - 0xe1, 0x88, 0xac, 0x62, 0xff, 0xbc, 0x69, 0x57, 0xad, 0x0f, 0x08, 0xf8, - 0x32, 0xfd, 0x79, 0xcb, 0x30, 0xbc, 0xd2, 0xe5, 0x20, 0xd9, 0x0f, 0xd1, - 0x33, 0xbf, 0xe4, 0x49, 0x7a, 0x2b, 0x5c, 0xb3, 0x63, 0x13, 0x4d, 0xed, - 0x17, 0xe7, 0x5b, 0xf4, 0x36, 0x9d, 0x3c, 0x4e, 0x51, 0xb2, 0xf7, 0xf2, - 0xcd, 0xfb, 0xec, 0x42, 0x79, 0x46, 0xae, 0x18, 0x50, 0xdf, 0xbf, 0x5b, - 0xb1, 0x9a, 0x49, 0x22, 0xae, 0xe9, 0xf3, 0x86, 0x3f, 0xe0, 0xb4, 0xc6, - 0x9c, 0x08, 0xd6, 0xd9, 0xf4, 0x68, 0xbb, 0x33, 0x0e, 0x59, 0x3d, 0x76, - 0xf0, 0xd7, 0x54, 0x04, 0x19, 0x66, 0xee, 0x61, 0x11, 0x0d, 0x48, 0x10, - 0x21, 0x16, 0x7c, 0xac, 0x49, 0xab, 0xe0, 0x19, 0x85, 0x93, 0x48, 0x65, - 0x7c, 0x5e, 0x6c, 0x1a, 0xf5, 0xb0, 0xc6, 0x80, 0xa1, 0x2a, 0xd5, 0x71, - 0x42, 0xec, 0x2f, 0x25, 0xf7, 0xb8, 0x84, 0xcd, 0xf0, 0x5c, 0xcd, 0xee, - 0x44, 0xcb, 0xeb, 0x74, 0x96, 0x3c, 0xb0, 0x56, 0xcb, 0xaf, 0x7e, 0x9e, - 0x4a, 0x12, 0x06, 0xae, 0x57, 0x43, 0x2d, 0xb2, 0x11, 0x96, 0x05, 0xdb, - 0xb3, 0x1a, 0x01, 0xa7, 0x1d, 0x02, 0x81, 0x1c, 0x36, 0x41, 0x65, 0xf0, - 0x67, 0xd6, 0xd0, 0x0f, 0xec, 0x34, 0x7d, 0xd3, 0x89, 0xac, 0x60, 0x67, - 0x95, 0x81, 0x84, 0xe7, 0xbb, 0x9a, 0x59, 0x36, 0x3b, 0xde, 0xa4, 0x88, - 0xda, 0xf2, 0xd2, 0xa2, 0x0c, 0xba, 0xfb, 0x93, 0xbf, 0xc8, 0xad, 0xe8, - 0x57, 0xa0, 0x2b, 0xbb, 0x4e, 0xa9, 0x38, 0xe7, 0x86, 0x6b, 0x95, 0x34, - 0x24, 0x96, 0xc0, 0x09, 0xd9, 0xfd, 0x5f, 0x1c, 0x93, 0xd9, 0x72, 0xfa, - 0xc4, 0x14, 0x72, 0x9c, 0x19, 0x6f, 0xee, 0x12, 0x17, 0xee, 0x65, 0xb4, - 0x8c, 0x83, 0x39, 0x3c, 0x0f, 0xbf, 0x25, 0xcf, 0xee, 0x05, 0x8c, 0x6a, - 0x56, 0x18, 0xf0, 0x20, 0x72, 0xc1, 0xbf, 0xe4, 0xce, 0x37, 0xbf, 0x2b, - 0xba, 0x70, 0x1e, 0xc2, 0xc8, 0xcd, 0x58, 0xb9, 0x60, 0xc7, 0xfb, 0xd0, - 0xce, 0xb9, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x7c, 0x63, 0x50, 0x90, 0xcb, 0x9c, 0xce, 0x59, 0xb1, 0x47, 0xb0, 0x49, - 0x9b, 0xfc, 0xfb, 0x3d, 0x3d, 0x62, 0xcf, 0x58, 0x4c, 0x2a, 0x79, 0xf0, - 0x72, 0x7f, 0x81, 0x41, 0xac, 0x82, 0x2d, 0xa9, 0xf0, 0x0e, 0x4d, 0xd2, - 0xe0, 0xbd, 0xca, 0x17, 0xb7, 0x59, 0x9f, 0xdb, 0xfe, 0x51, 0x90, 0x88, - 0xb9, 0xeb, 0x4e, 0xac, 0x80, 0x30, 0x64, 0xc4, 0x49, 0xd1, 0xb6, 0x65, - 0x67, 0xef, 0x9d, 0x5c, 0x04, 0xe0, 0x9d, 0xbe, 0x47, 0x75, 0x9b, 0x6e, - 0x30, 0x76, 0xad, 0x37, 0x9a, 0x56, 0xff, 0xcd, 0x40, 0x26, 0x3e, 0xe2, - 0x7d, 0x30, 0x55, 0x09, 0x92, 0x25, 0x36, 0x2f, 0xf8, 0x55, 0xb8, 0x9b, - 0x66, 0x49, 0x41, 0x9d, 0x78, 0x6d, 0x3f, 0x54, 0x41, 0x01, 0x93, 0x9c, - 0x5e, 0x0c, 0x4a, 0x38, 0x79, 0x76, 0xb4, 0x98, 0xae, 0xf9, 0x99, 0x21, - 0x05, 0x6a, 0xfb, 0xbc, 0x44, 0xf7, 0xdc, 0x85, 0x5e, 0x5f, 0x18, 0x49, - 0x22, 0x11, 0x6d, 0xa5, 0x9e, 0x6b, 0x59, 0x60, 0xf8, 0x73, 0x8b, 0xcb, - 0x38, 0xbb, 0xc9, 0xbf, 0x49, 0x0e, 0x57, 0x65, 0x48, 0x41, 0x41, 0xa2, - 0x40, 0x67, 0x91, 0x1d, 0x54, 0xac, 0xa7, 0xef, 0x16, 0x8b, 0xc7, 0xd1, - 0xe6, 0xdb, 0xc5, 0x9c, 0xd4, 0x04, 0x67, 0xd8, 0x75, 0x21, 0x2b, 0x1d, - 0x11, 0xc1, 0x79, 0x45, 0xb4, 0x91, 0x7a, 0x97, 0x00, 0xde, 0xc6, 0xc5, - 0x8a, 0xd1, 0xd7, 0xea, 0xc1, 0x22, 0xe1, 0x58, 0x61, 0xf2, 0x89, 0x3d, - 0xdb, 0x04, 0x3d, 0xe4, 0xe9, 0xe7, 0xbf, 0x4b, 0x99, 0x8a, 0xc6, 0xf2, - 0x09, 0xc4, 0xe2, 0x6d, 0x0b, 0xda, 0x13, 0xfb, 0xff, 0xbf, 0x0b, 0xfc, - 0x78, 0x33, 0xb8, 0x7b, 0x3e, 0xd8, 0xba, 0x27, 0xba, 0xae, 0xdf, 0xce, - 0xea, 0x80, 0x08, 0x38, 0xd8, 0x33, 0x00, 0xa9, 0xb6, 0x88, 0x48, 0xa9, - 0x3b, 0x54, 0xf0, 0x95, 0xda, 0xba, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xb1, 0xd7, 0x8d, 0x6c, 0xb9, 0x96, 0xdc, 0x64, - 0x9b, 0x0c, 0x74, 0x54, 0x59, 0x82, 0xf6, 0x6e, 0x7c, 0x4e, 0x23, 0x83, - 0x04, 0x2e, 0x49, 0xfb, 0x56, 0x4b, 0xcd, 0x0d, 0x76, 0x29, 0xb1, 0xce, - 0x40, 0xa3, 0xd0, 0x02, 0x16, 0x8e, 0x1c, 0x0a, 0x00, 0x5b, 0x8c, 0x06, - 0xf9, 0x07, 0x97, 0x12, 0x0c, 0x33, 0xd5, 0x48, 0x6d, 0xae, 0x7d, 0x2c, - 0x8f, 0x74, 0x32, 0x24, 0xcf, 0x91, 0xd7, 0xbe, 0xb2, 0x05, 0xcf, 0x2f, - 0x93, 0xd5, 0x43, 0x90, 0xce, 0x02, 0x97, 0xf8, 0x51, 0xb3, 0xba, 0x56, - 0x5d, 0x94, 0x41, 0xa4, 0x11, 0xf3, 0x21, 0xc0, 0xcc, 0x28, 0xf8, 0x5a, - 0x00, 0x0a, 0xd4, 0x53, 0xdd, 0xac, 0xfe, 0x25, 0x03, 0xea, 0x2b, 0x6b, - 0x9d, 0x7e, 0x1a, 0xe1, 0x5f, 0x5c, 0xa7, 0x47, 0xa2, 0x72, 0x4f, 0x92, - 0x60, 0x25, 0x7c, 0x1c, 0xa5, 0x34, 0xa6, 0x86, 0x0e, 0xda, 0x8f, 0x3f, - 0xec, 0xe2, 0xe4, 0xad, 0xa9, 0x41, 0xcc, 0x3d, 0x94, 0x43, 0xfd, 0x28, - 0xd8, 0xb0, 0x0f, 0x05, 0x9e, 0x2b, 0x27, 0x3f, 0xe0, 0x84, 0xbc, 0x9e, - 0x7a, 0xa5, 0x83, 0x3d, 0x3b, 0xac, 0x83, 0xd3, 0x16, 0x92, 0x8c, 0xd2, - 0x4a, 0x81, 0xdd, 0xba, 0x0a, 0xb7, 0xc5, 0x9f, 0x83, 0x0f, 0x78, 0xb8, - 0xab, 0x2d, 0xca, 0xf8, 0x6c, 0x06, 0xd7, 0x82, 0xb8, 0x61, 0x7d, 0x2a, - 0x31, 0x3a, 0x39, 0x97, 0x5f, 0xc7, 0x00, 0x6e, 0x46, 0xf2, 0xc5, 0x12, - 0x71, 0x55, 0x5b, 0x10, 0xaf, 0xbb, 0x07, 0x4c, 0x2f, 0xa3, 0x51, 0x53, - 0x22, 0x20, 0xab, 0xed, 0x02, 0x95, 0xc6, 0x5f, 0xaa, 0xb8, 0xc0, 0xcb, - 0xe5, 0xe0, 0x25, 0x97, 0xf7, 0xda, 0x1d, 0xd8, 0x5a, 0xff, 0x76, 0x0c, - 0x3e, 0x33, 0x1b, 0x7a, 0x15, 0xb8, 0x34, 0x75, 0xcf, 0xe9, 0xf3, 0x53, - 0x61, 0x03, 0x2d, 0x52, 0x29, 0x69, 0x3a, 0xc3, 0xd9, 0x22, 0xc0, 0x2d, - 0x80, 0xed, 0x66, 0xc4, 0xf4, 0x89, 0x60, 0x14, 0xdb, 0xec, 0x7d, 0xcc, - 0x99, 0x5c, 0x94, 0x27, 0xab, 0xed, 0xd2, 0x17, 0xf4, 0x36, 0xfc, 0x7e, - 0x99, 0x98, 0xb6, 0x86, 0xb6, 0x7c, 0x54, 0xd6, 0xec, 0xb5, 0xad, 0x62, - 0xcc, 0xb0, 0xf7, 0x8c, 0x52, 0x99, 0xf2, 0x44, 0x27, 0x3a, 0xb0, 0xff, - 0x8f, 0x09, 0xae, 0xe1, 0x61, 0xd8, 0x9f, 0xdd, 0x2f, 0x6b, 0xea, 0xd0, - 0x12, 0x70, 0x8c, 0x9d, 0x8f, 0x4c, 0x36, 0x98, 0x1e, 0x2e, 0xb5, 0x50, - 0x63, 0x33, 0x9c, 0x4b, 0xc3, 0xd4, 0xa0, 0xe6, 0x96, 0x96, 0x75, 0xfd, - 0x8a, 0xc4, 0x0c, 0xa7, 0xea, 0x9d, 0xf1, 0x23, 0x9e, 0x38, 0xff, 0x1a, - 0x67, 0x36, 0x5f, 0x5f, 0x17, 0x88, 0x1a, 0x43, 0x25, 0xea, 0x76, 0xb5, - 0xcd, 0xce, 0x43, 0xf8, 0x71, 0x2b, 0xdb, 0xf0, 0xcd, 0x76, 0xbd, 0x94, - 0x57, 0xdb, 0x77, 0xcd, 0xb2, 0x8f, 0xd1, 0xc0, 0xeb, 0x00, 0x61, 0x7f, - 0x66, 0xb0, 0x43, 0x6e, 0xe0, 0x9f, 0x11, 0x0e, 0x65, 0xf7, 0x4e, 0x00, - 0x74, 0xc3, 0xeb, 0xb1, 0xeb, 0x0c, 0x24, 0x5d, 0x15, 0x56, 0x16, 0x47, - 0x87, 0xcf, 0x34, 0xbe, 0x2a, 0xdd, 0x77, 0x55, 0xa4, 0x09, 0x15, 0x79, - 0x8c, 0xaa, 0xce, 0x32, 0x90, 0x9b, 0x16, 0x40, 0x94, 0x7f, 0x19, 0x27, - 0xbc, 0xbf, 0x45, 0x4b, 0xa5, 0xf0, 0xd0, 0x9e, 0x5b, 0xb9, 0x46, 0x6e, - 0x72, 0x8f, 0x49, 0x3b, 0x7a, 0xc1, 0x92, 0xb0, 0xd5, 0x25, 0x1b, 0x0b, - 0xf3, 0xd0, 0x8a, 0x47, 0x8b, 0xbe, 0xa4, 0xf9, 0x6a, 0x09, 0x84, 0x9a, - 0x5b, 0x5b, 0xea, 0xbb, 0x6f, 0xd8, 0xaf, 0xcd, 0x67, 0x9b, 0x79, 0x7c, - 0x8f, 0xcc, 0xd7, 0x5f, 0x3a, 0xc3, 0xd0, 0xb7, 0xba, 0x28, 0x83, 0x81, - 0x4a, 0x05, 0x51, 0xaf, 0xa0, 0x52, 0x34, 0xe3, 0x4f, 0xec, 0x82, 0xdc, - 0x97, 0xd8, 0x69, 0xb2, 0x0d, 0x68, 0x35, 0x87, 0x58, 0xc0, 0xcf, 0x58, - 0x0d, 0xf6, 0x6b, 0x6d, 0x2a, 0xc0, 0x72, 0xe4, 0x90, 0x8c, 0x7b, 0x45, - 0xba, 0xf1, 0x13, 0x6f, 0x8c, 0xd2, 0xdd, 0xc5, 0x8e, 0xc8, 0xec, 0xf9, - 0xfb, 0xde, 0xe5, 0xaa, 0xcb, 0xc0, 0xff, 0x77, 0x2d, 0x99, 0xb1, 0x69, - 0x7f, 0xe3, 0x38, 0x61, 0x35, 0xb6, 0x45, 0xdd, 0x73, 0x45, 0x84, 0x89, - 0x1b, 0x96, 0x7e, 0x6a, 0x1d, 0xd9, 0xe6, 0x76, 0xa8, 0x16, 0x0f, 0x42, - 0xc9, 0x41, 0xec, 0x5d, 0x25, 0x01, 0xb0, 0x45, 0xa6, 0xaa, 0x69, 0x87, - 0x11, 0xa1, 0xb8, 0x9e, 0x68, 0x48, 0x68, 0xe9, 0xb5, 0xc2, 0xff, 0x83, - 0x8f, 0x71, 0xb9, 0xd7, 0xbb, 0xae, 0x59, 0x8b, 0x1b, 0x4c, 0x44, 0xd8, - 0xe3, 0xce, 0xab, 0x88, 0xfb, 0x64, 0xd9, 0x61, 0x5a, 0x7d, 0xce, 0x3a, - 0x27, 0xb5, 0xa3, 0xfd, 0x5d, 0xa3, 0xb8, 0xa1, 0x15, 0x63, 0x0b, 0x75, - 0x39, 0xc3, 0xa4, 0xfb, 0x60, 0x53, 0xfd, 0x11, 0x21, 0x35, 0x0f, 0x19, - 0x28, 0x14, 0xcd, 0x8a, 0xcf, 0x33, 0xaa, 0x4f, 0x6a, 0x1e, 0x56, 0x87, - 0xd5, 0x6e, 0x43, 0x9b, 0xa3, 0x72, 0x95, 0x8c, 0x34, 0xa2, 0xac, 0x11, - 0x76, 0x95, 0xd7, 0xdd, 0xbf, 0x10, 0xf4, 0x0f, 0x2a, 0x64, 0xd2, 0x4d, - 0x7b, 0xc6, 0x9b, 0x7d, 0xf7, 0xa5, 0xb3, 0x84, 0x9a, 0x9a, 0x5e, 0xcf, - 0x7f, 0x95, 0x6d, 0x44, 0xd1, 0xb2, 0x19, 0xbb, 0xed, 0x37, 0x42, 0x4b, - 0x4b, 0x6d, 0xb7, 0x10, 0x02, 0x5f, 0x00, 0x1f, 0x24, 0xce, 0xb2, 0x8b, - 0x3e, 0x7d, 0xc6, 0x6e, 0x6c, 0x90, 0x75, 0xad, 0x3f, 0x9d, 0x63, 0x04, - 0x76, 0x20, 0x7a, 0x56, 0x48, 0xa1, 0x6a, 0x37, 0x74, 0xd2, 0xb7, 0x4f, - 0xa3, 0x64, 0x62, 0xaa, 0xce, 0x75, 0x8c, 0x15, 0x75, 0x79, 0xa0, 0xbd, - 0xdd, 0x01, 0x46, 0xca, 0xa0, 0x31, 0x1a, 0x16, 0x1f, 0xef, 0x8b, 0xc6, - 0x54, 0x57, 0xfa, 0x6e, 0x43, 0xdf, 0xb0, 0x99, 0xed, 0xa4, 0xcb, 0xeb, - 0x91, 0x35, 0x14, 0x0c, 0xa9, 0x1d, 0xb5, 0xa9, 0x32, 0x99, 0xe3, 0x89, - 0x74, 0xaa, 0xa4, 0x65, 0x1e, 0x82, 0x47, 0xfa, 0x37, 0x23, 0xe5, 0x86, - 0xb6, 0xc0, 0xb6, 0x89, 0x9a, 0xd9, 0xae, 0x29, 0x39, 0x7b, 0x66, 0xc7, - 0x5b, 0x02, 0x08, 0x86, 0xd4, 0xf0, 0x75, 0xc2, 0x05, 0x86, 0xc3, 0x75, - 0xd2, 0x2a, 0x1e, 0xec, 0x6e, 0x75, 0x29, 0x58, 0x8c, 0x25, 0x3b, 0x95, - 0x21, 0xde, 0x42, 0xd5, 0xb7, 0x15, 0x30, 0x09, 0x49, 0x78, 0x55, 0xd5, - 0xf2, 0x30, 0x80, 0x93, 0x8a, 0xce, 0x84, 0x27, 0xdb, 0x4a, 0x09, 0x30, - 0x0c, 0x7f, 0x4d, 0xd1, 0x0f, 0xda, 0x66, 0x58, 0xe1, 0x01, 0xfd, 0x75, - 0x83, 0xf5, 0x39, 0x2e, 0xe2, 0x6b, 0xde, 0xff, 0x20, 0x8a, 0xf7, 0xcc, - 0x81, 0x8e, 0x99, 0xb4, 0xeb, 0x76, 0x74, 0x38, 0x2b, 0xe0, 0x6d, 0x61, - 0x8f, 0x39, 0x59, 0x10, 0x7d, 0xb5, 0xd3, 0x14, 0x96, 0x04, 0x1d, 0x22, - 0x89, 0xef, 0x15, 0x7c, 0x28, 0x5a, 0xd6, 0x8d, 0xf3, 0xb7, 0x6a, 0x9a, - 0xce, 0x21, 0x77, 0xfd, 0x4f, 0x22, 0x26, 0x28, 0xb8, 0xb5, 0xb3, 0x73, - 0xfd, 0x2a, 0x7b, 0x42, 0x26, 0x77, 0x41, 0x93, 0xed, 0xf9, 0x8f, 0xa9, - 0x92, 0xd5, 0x9f, 0x2e, 0x60, 0xec, 0x60, 0x98, 0xf1, 0xd5, 0x11, 0xe2, - 0xe0, 0xd7, 0x45, 0xa7, 0xe4, 0xf2, 0x82, 0x61, 0x2f, 0x41, 0x1b, 0xd9, - 0x8e, 0x78, 0xd5, 0x6b, 0x68, 0x74, 0xf0, 0xc3, 0x83, 0x01, 0x16, 0x60, - 0x6e, 0x34, 0x88, 0x45, 0x8a, 0x86, 0x44, 0x5b, 0xa5, 0xa8, 0x55, 0xbc, - 0xfa, 0x8f, 0xbd, 0x93, 0x95, 0x3f, 0xab, 0x19, 0x54, 0x8f, 0x06, 0x8e, - 0xca, 0x0b, 0x4a, 0x18, 0x3f, 0x7a, 0x9c, 0x3f, 0xe6, 0xbe, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x81, 0x32, 0x41, 0x46, - 0x59, 0x26, 0xf4, 0xef, 0x93, 0x9f, 0x04, 0xc2, 0x67, 0x13, 0x32, 0x45, - 0xc0, 0x79, 0x70, 0x27, 0x21, 0x2b, 0xaf, 0x35, 0xf3, 0xc4, 0x88, 0x52, - 0x28, 0xea, 0xca, 0x8a, 0x08, 0x01, 0x6f, 0x61, 0xab, 0x10, 0xa3, 0xf0, - 0x6b, 0x3b, 0x54, 0x64, 0xf1, 0x63, 0x83, 0x38, 0x2b, 0x26, 0x18, 0x5a, - 0x67, 0xc4, 0x67, 0x38, 0x3f, 0x2c, 0x9a, 0xc9, 0x48, 0x33, 0x77, 0xb4, - 0xb2, 0xc2, 0xc7, 0x08, 0x21, 0x5e, 0xc4, 0x19, 0x59, 0xe1, 0xfa, 0x32, - 0xa4, 0x4c, 0x3e, 0xba, 0x65, 0x92, 0x98, 0x39, 0x71, 0x2f, 0x99, 0x08, - 0xf8, 0xb3, 0x7a, 0x03, 0x53, 0xd7, 0x68, 0xb2, 0x5e, 0xb0, 0xef, 0xe0, - 0x1e, 0x7d, 0xb2, 0x23, 0x5d, 0x2b, 0xd7, 0x09, 0xa6, 0x78, 0xa4, 0x7c, - 0x08, 0xed, 0x8a, 0xf6, 0x96, 0xa0, 0x10, 0x17, 0x62, 0x8b, 0x8a, 0xa0, - 0xac, 0x22, 0x67, 0x02, 0xa8, 0x66, 0x1a, 0xb5, 0x02, 0xde, 0xa5, 0xfa, - 0x69, 0x29, 0x5f, 0x24, 0x89, 0x46, 0x68, 0xd6, 0x51, 0x2a, 0xfe, 0x88, - 0xf0, 0x40, 0xde, 0xd1, 0x12, 0x2e, 0xed, 0x13, 0x7b, 0x49, 0xf6, 0xe1, - 0x7a, 0xcf, 0x61, 0xcb, 0x70, 0x9d, 0xaa, 0x51, 0x07, 0xc2, 0x54, 0x76, - 0x89, 0x29, 0x94, 0x29, 0x8b, 0x0e, 0xf5, 0xe8, 0x81, 0xc7, 0xdb, 0x59, - 0x1e, 0x75, 0xda, 0x6a, 0x94, 0x18, 0x16, 0xae, 0xbb, 0x43, 0x87, 0x56, - 0x66, 0x8b, 0x84, 0xe9, 0xa9, 0xd0, 0xd2, 0x8f, 0x5b, 0xbf, 0x1d, 0x24, - 0x3a, 0xb7, 0x64, 0xff, 0xe9, 0x22, 0x21, 0x65, 0xaf, 0x2b, 0x45, 0x8d, - 0x28, 0xea, 0xbc, 0x07, 0x10, 0x6e, 0xfb, 0x4d, 0x6f, 0x35, 0xe5, 0xeb, - 0x5d, 0x29, 0x72, 0xe1, 0x94, 0xad, 0xed, 0x25, 0xd7, 0x39, 0x63, 0x32, - 0x37, 0x0b, 0xb2, 0xd7, 0x54, 0x1f, 0xe4, 0x0d, 0xe7, 0xb3, 0xd1, 0xa6, - 0x2a, 0xcf, 0x8e, 0x97, 0xf1, 0xa8, 0xfc, 0xb1, 0x61, 0xdc, 0xb4, 0x8f, - 0x29, 0xa2, 0x68, 0x4a, 0xe6, 0x2f, 0x8a, 0x69, 0x2c, 0xa1, 0x1d, 0xe2, - 0x9e, 0x65, 0x71, 0xb7, 0x83, 0xef, 0x63, 0xf5, 0x36, 0xdc, 0xa0, 0x94, - 0x5a, 0x45, 0x8a, 0x85, 0x5e, 0x28, 0x86, 0x21, 0xd2, 0xbf, 0x7a, 0x2f, - 0x76, 0x1c, 0x2a, 0x15, 0xb2, 0xe8, 0xaf, 0x63, 0x37, 0xbe, 0xd8, 0x0a, - 0xef, 0x54, 0xee, 0xe6, 0xd9, 0xb3, 0xdb, 0x41, 0x55, 0xba, 0xd8, 0x14, - 0x7c, 0x10, 0x61, 0x06, 0x40, 0x45, 0x69, 0x37, 0x60, 0xf7, 0x6a, 0x7a, - 0x23, 0x70, 0x30, 0x57, 0x3e, 0xe5, 0x12, 0x24, 0xbc, 0x5e, 0x82, 0x89, - 0xd8, 0x37, 0xc9, 0x33, 0xb9, 0x38, 0xa5, 0xba, 0xed, 0xdd, 0x93, 0x58, - 0x81, 0x15, 0xec, 0x15, 0x70, 0x2f, 0x30, 0xfa, 0xaf, 0xf7, 0xf5, 0xcb, - 0x41, 0x74, 0xea, 0xc0, 0x91, 0xbe, 0x53, 0x4c, 0xc2, 0x74, 0x1b, 0x5b, - 0x8c, 0x74, 0xd8, 0xc3, 0x4a, 0x12, 0xaa, 0x57, 0xd6, 0x61, 0xb1, 0xb8, - 0x81, 0x5d, 0x81, 0x37, 0x1e, 0x5b, 0x3d, 0x5a, 0xbc, 0xa6, 0xb2, 0x27, - 0xe3, 0x01, 0x4c, 0xf0, 0xad, 0x7b, 0xdf, 0x50, 0xf9, 0xd7, 0xb7, 0xcc, - 0xa8, 0x5c, 0x3d, 0x9a, 0xb7, 0x60, 0x3e, 0x63, 0x3f, 0x6a, 0x08, 0x0b, - 0x82, 0xdc, 0x3e, 0xfa, 0x24, 0x33, 0xd3, 0x01, 0xbf, 0xef, 0xeb, 0x52, - 0x3f, 0x91, 0x61, 0xda, 0xe2, 0x26, 0x10, 0xdf, 0xe4, 0x9b, 0x77, 0x91, - 0x22, 0xc5, 0x4e, 0x9c, 0x0b, 0x32, 0xff, 0x27, 0x85, 0x85, 0x0c, 0x99, - 0x50, 0x8f, 0xad, 0x5d, 0x06, 0x18, 0x52, 0xb4, 0x64, 0x09, 0xc4, 0xa4, - 0x84, 0xd4, 0x81, 0x07, 0x0a, 0x97, 0x55, 0xf8, 0x96, 0x52, 0xb2, 0x9a, - 0xf4, 0x06, 0x2c, 0x9a, 0x3b, 0x8b, 0xaa, 0x67, 0x18, 0x3a, 0xee, 0xbc, - 0xca, 0x8f, 0x46, 0xf6, 0x4a, 0x33, 0x5b, 0x56, 0x09, 0xb2, 0x72, 0x87, - 0xdb, 0xbb, 0x57, 0x67, 0x53, 0x82, 0x77, 0x31, 0x66, 0xbb, 0xf1, 0x33, - 0x6d, 0x55, 0x82, 0xaa, 0x80, 0xd4, 0x4d, 0xb8, 0xab, 0xbd, 0x2a, 0xda, - 0x10, 0x3a, 0xc8, 0xf0, 0x14, 0x1e, 0xcb, 0x8e, 0x76, 0x6c, 0xc8, 0x74, - 0x05, 0xb3, 0x51, 0xbd, 0x63, 0x06, 0x69, 0x05, 0x2a, 0x21, 0xd6, 0x2f, - 0xe4, 0x38, 0xae, 0xf8, 0xd4, 0xe9, 0xa7, 0xe8, 0xc8, 0x5a, 0x65, 0x7d, - 0x54, 0x34, 0x33, 0x0d, 0xf6, 0x07, 0xd6, 0x8c, 0xe5, 0x72, 0x9b, 0xfb, - 0x60, 0x49, 0xd2, 0xaf, 0xb4, 0x17, 0xc4, 0x74, 0x8d, 0xe5, 0x54, 0xda, - 0x96, 0x56, 0x7d, 0x97, 0x62, 0xe8, 0xec, 0x0d, 0x2b, 0x02, 0x2e, 0x59, - 0xf8, 0xa1, 0x06, 0x6a, 0xb6, 0x3e, 0x15, 0xeb, 0x64, 0x1a, 0x48, 0x3d, - 0x53, 0x2c, 0x42, 0x3b, 0x97, 0xa1, 0x3f, 0x47, 0x8b, 0x74, 0x87, 0x8b, - 0x96, 0x63, 0x08, 0x4c, 0x99, 0x38, 0x5a, 0xb6, 0x93, 0xa8, 0xcc, 0xee, - 0x62, 0x3a, 0x00, 0x6d, 0x5c, 0xab, 0x77, 0x3c, 0x46, 0xae, 0x6e, 0xeb, - 0xf1, 0xf9, 0x63, 0xf1, 0xa2, 0x31, 0x21, 0x38, 0xc3, 0x4f, 0xe2, 0x3a, - 0x33, 0x7f, 0xe7, 0xc6, 0x69, 0xd5, 0x1c, 0x7e, 0x5b, 0x4f, 0xb1, 0x50, - 0x3b, 0xbe, 0x31, 0xa7, 0x42, 0xa3, 0x97, 0x7b, 0xe3, 0x90, 0xd0, 0x07, - 0xfd, 0x05, 0xb9, 0xf2, 0x47, 0xc4, 0xc8, 0xdd, 0x1c, 0x3c, 0xa4, 0x22, - 0x96, 0x04, 0xca, 0x28, 0x17, 0xcc, 0x5c, 0x49, 0x7e, 0xc6, 0x93, 0x98, - 0xd3, 0x8b, 0xd2, 0xf6, 0x4a, 0xb6, 0xbe, 0x8d, 0xa2, 0xdd, 0xb6, 0x7c, - 0x66, 0x0c, 0x29, 0xcb, 0x1d, 0x98, 0xf6, 0xe4, 0xe5, 0x30, 0x4c, 0x84, - 0xbf, 0x6f, 0x71, 0x4e, 0xc2, 0x12, 0x9f, 0x35, 0xd6, 0xf8, 0xc6, 0x30, - 0xe9, 0x9e, 0x1a, 0x8a, 0x2f, 0xd1, 0x96, 0xb3, 0x3c, 0x0f, 0xf5, 0x78, - 0xa7, 0xe0, 0xbd, 0x4b, 0xe0, 0xd8, 0x3d, 0x57, 0xa5, 0x44, 0xa0, 0xd9, - 0x10, 0x79, 0xd2, 0x10, 0x50, 0xc7, 0x77, 0x73, 0x09, 0xf8, 0xb4, 0xcf, - 0x66, 0xe3, 0x0c, 0xfb, 0x96, 0xf8, 0x52, 0xb3, 0x7e, 0x44, 0xf0, 0x03, - 0x54, 0xd4, 0xa2, 0x57, 0x38, 0x8a, 0x96, 0xfc, 0x7c, 0x4c, 0x9f, 0x3a, - 0xf2, 0xa2, 0x48, 0xbb, 0x3e, 0xd1, 0x11, 0x2c, 0xab, 0xdf, 0x53, 0x96, - 0xac, 0x58, 0x33, 0xb9, 0xdd, 0xd2, 0x4f, 0x8a, 0x0a, 0x89, 0x0e, 0xd3, - 0x6f, 0x58, 0x8c, 0xa1, 0x0a, 0x0b, 0xa7, 0xd7, 0x1f, 0x0a, 0x70, 0xe3, - 0x43, 0x12, 0x56, 0xb8, 0x6c, 0xf8, 0x75, 0x4e, 0x2b, 0xb0, 0x17, 0x29, - 0xe4, 0x95, 0x85, 0xd8, 0x85, 0x95, 0x63, 0x55, 0xa8, 0x82, 0xf0, 0xe7, - 0x7d, 0xf3, 0xf1, 0x78, 0x66, 0xd1, 0x92, 0x71, 0x99, 0xad, 0x30, 0x94, - 0xe9, 0x54, 0x2c, 0xe1, 0x57, 0xf3, 0x6a, 0xe6, 0x0c, 0x5e, 0xc7, 0x58, - 0xba, 0xb7, 0x61, 0xd3, 0x74, 0x72, 0x96, 0x06, 0x0b, 0x01, 0x3d, 0xc2, - 0xa1, 0xb4, 0x38, 0x81, 0x19, 0x44, 0xbc, 0x84, 0x52, 0x22, 0xc9, 0x67, - 0x81, 0x99, 0xfb, 0x0a, 0xc2, 0xff, 0x50, 0x67, 0xbe, 0x38, 0x5e, 0x13, - 0x16, 0x60, 0x83, 0x35, 0xb9, 0x2f, 0xa9, 0x55, 0xbb, 0x30, 0x6b, 0x19, - 0xfc, 0x2a, 0x40, 0x24, 0x74, 0x20, 0x57, 0x78, 0xb9, 0x55, 0xb7, 0x70, - 0x86, 0x65, 0x43, 0x1c, 0x76, 0x2e, 0x91, 0x83, 0x5e, 0x33, 0xc2, 0xd4, - 0xcc, 0xb5, 0x1c, 0x45, 0xaf, 0xa3, 0x87, 0x95, 0x9b, 0x77, 0x50, 0x44, - 0x7e, 0xdd, 0xca, 0x3f, 0x51, 0x21, 0xae, 0xf2, 0x15, 0xa9, 0x32, 0x94, - 0xca, 0xde, 0x3b, 0x97, 0x13, 0x6b, 0xff, 0xe0, 0x79, 0x39, 0x40, 0xf0, - 0x66, 0x7d, 0x5e, 0xef, 0xec, 0x0a, 0x35, 0xd2, 0x0d, 0x09, 0x19, 0x13, - 0xf2, 0xc2, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xdc, 0x07, 0x2e, 0x46, 0xab, 0x4d, 0x6d, 0xf7, 0x24, 0xba, 0x02, 0xe3, - 0xc5, 0xe3, 0xed, 0x64, 0xc6, 0x77, 0x5a, 0x14, 0xae, 0x38, 0x52, 0x8c, - 0x16, 0x2c, 0x52, 0x0e, 0xf6, 0x65, 0x99, 0xcc, 0xf6, 0x9f, 0x77, 0xcc, - 0x2e, 0xaf, 0x14, 0xd1, 0xf0, 0x0f, 0xa7, 0x3e, 0x5b, 0x74, 0xff, 0xb9, - 0xd3, 0x30, 0x02, 0x5e, 0x52, 0xc8, 0x6f, 0x57, 0xef, 0x28, 0xf5, 0xfa, - 0x9e, 0x70, 0x00, 0xfc, 0x3e, 0xc3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xaa, 0x9f, 0x86, 0xb0, 0x6d, 0xa1, 0x0c, 0xfa, - 0xef, 0xb3, 0x6a, 0x50, 0xa6, 0xfe, 0xff, 0xa9, 0x61, 0x0b, 0x18, 0x72, - 0xee, 0xc6, 0xcd, 0x3a, 0x34, 0x5e, 0xa8, 0x81, 0x31, 0x54, 0x25, 0x05, - 0xc1, 0xd9, 0x66, 0x3d, 0x17, 0xbb, 0x03, 0x21, 0x07, 0x69, 0x3a, 0x37, - 0xe8, 0xd4, 0x6a, 0x68, 0xe1, 0xa3, 0x19, 0x5a, 0x8d, 0x14, 0x11, 0x09, - 0xef, 0xae, 0xfe, 0x94, 0x19, 0x8a, 0xe4, 0xb9, 0x6e, 0xe8, 0xfa, 0x12, - 0x2a, 0x5d, 0x00, 0x29, 0x27, 0x6d, 0x5a, 0xa5, 0x09, 0x34, 0x79, 0x2b, - 0xa8, 0xcc, 0x42, 0xb4, 0xde, 0xe0, 0x91, 0xb9, 0x06, 0x0c, 0x11, 0x17, - 0x25, 0x7a, 0x35, 0x57, 0x51, 0x40, 0xf3, 0xc7, 0xc6, 0x4a, 0x69, 0x98, - 0x2b, 0x2b, 0x3e, 0x5d, 0x32, 0xd8, 0x8f, 0xb0, 0x1d, 0xee, 0x77, 0xe3, - 0xaf, 0x4f, 0x71, 0x05, 0x04, 0xd2, 0xff, 0x51, 0xed, 0xa4, 0x69, 0x50, - 0x24, 0x2a, 0xe5, 0xaa, 0xbb, 0xc6, 0x7a, 0x7f, 0xb2, 0xdf, 0x1d, 0xc2, - 0x02, 0x2e, 0x52, 0xd1, 0xd9, 0x5b, 0xe7, 0x6c, 0x50, 0x31, 0x4e, 0xdf, - 0x8e, 0x3f, 0x37, 0xfc, 0xf5, 0x34, 0x0e, 0xdb, 0x4c, 0x5d, 0x7d, 0xc8, - 0xe4, 0x72, 0x40, 0xcb, 0x95, 0xa5, 0x41, 0xeb, 0x78, 0x5f, 0x64, 0x20, - 0x55, 0x19, 0xc7, 0xf9, 0x9c, 0x71, 0x40, 0x8f, 0xcc, 0x2d, 0x86, 0xc0, - 0xf4, 0x36, 0x2b, 0x0e, 0x28, 0xb4, 0xad, 0x1b, 0xde, 0x60, 0x67, 0x03, - 0x0f, 0x7c, 0x18, 0xd9, 0xc3, 0x73, 0x67, 0x0d, 0x44, 0x3d, 0xbe, 0x7c, - 0xcf, 0x96, 0x22, 0x0b, 0x0e, 0x3a, 0x0b, 0xcf, 0x04, 0x95, 0x92, 0x7d, - 0x4b, 0xa2, 0x6a, 0x0b, 0x47, 0x72, 0x73, 0xa8, 0x9b, 0x96, 0x3d, 0xc6, - 0x03, 0x34, 0xb1, 0x69, 0xc2, 0x50, 0x60, 0x89, 0x8c, 0x55, 0x8f, 0x8e, - 0x74, 0xa8, 0x9e, 0x25, 0xe4, 0x0e, 0x73, 0xef, 0x4f, 0x51, 0xbe, 0xed, - 0x5c, 0x14, 0xd3, 0xfa, 0x94, 0x58, 0x8d, 0x5c, 0xa0, 0xb1, 0xfc, 0x37, - 0x6e, 0x9c, 0x9e, 0x61, 0xe5, 0x12, 0x13, 0xb2, 0x88, 0xc6, 0xcf, 0x60, - 0x3f, 0x0d, 0x51, 0x33, 0x22, 0xfa, 0xfb, 0x2d, 0x2b, 0x8d, 0x43, 0x9b, - 0x3d, 0x1e, 0x88, 0x24, 0x50, 0x78, 0xf7, 0x7e, 0x45, 0xb1, 0x0f, 0xa9, - 0xe6, 0x77, 0xf8, 0x78, 0xff, 0x57, 0x6a, 0x05, 0x06, 0x0c, 0x7e, 0x1e, - 0x7f, 0xe9, 0x90, 0xe8, 0x61, 0x68, 0xbc, 0x9e, 0xc4, 0xe5, 0x06, 0x04, - 0x76, 0xcc, 0x01, 0x57, 0x1a, 0x55, 0x9e, 0x45, 0x26, 0xd6, 0xd8, 0xc2, - 0x50, 0x25, 0xfc, 0x72, 0x4e, 0x18, 0xbe, 0xf2, 0x2f, 0xc0, 0x1b, 0xc8, - 0x14, 0xeb, 0x24, 0xda, 0x15, 0x0a, 0x83, 0x38, 0xc5, 0xdd, 0xc9, 0xd7, - 0x12, 0x35, 0x55, 0xdf, 0x2c, 0x23, 0xea, 0x17, 0xca, 0xbf, 0x18, 0xc9, - 0x80, 0x63, 0x4b, 0x77, 0x8b, 0x17, 0x01, 0x05, 0x1b, 0xa3, 0x0b, 0x0f, - 0xdd, 0xc6, 0xe0, 0xdf, 0xc9, 0xa6, 0x8c, 0x50, 0x95, 0x8d, 0x6c, 0x96, - 0x67, 0xff, 0x88, 0x38, 0x3b, 0x76, 0x72, 0x11, 0x35, 0xa0, 0x1c, 0xc8, - 0x96, 0x9c, 0xe5, 0x90, 0x79, 0x0e, 0x62, 0x57, 0x00, 0xd9, 0x57, 0xf8, - 0xa4, 0xc2, 0xc2, 0x0a, 0x17, 0x8e, 0xd7, 0x03, 0x6d, 0x4d, 0x14, 0xb6, - 0x96, 0x8a, 0x76, 0x67, 0x58, 0xce, 0x9c, 0xb3, 0x10, 0x49, 0x06, 0xeb, - 0x56, 0x43, 0x40, 0xcb, 0xd4, 0xd7, 0x59, 0x42, 0xa4, 0xd7, 0x21, 0x6a, - 0x51, 0x3d, 0x1c, 0x54, 0xd7, 0xd6, 0xa2, 0xcf, 0xf8, 0xf6, 0x72, 0x35, - 0x04, 0xa6, 0xe3, 0x53, 0xca, 0xc5, 0x62, 0xee, 0xa9, 0xc3, 0x6d, 0x1b, - 0xc4, 0xc5, 0xd9, 0xa7, 0x37, 0xc2, 0x04, 0x01, 0xc9, 0x4a, 0x2e, 0x26, - 0xdd, 0x12, 0x6e, 0x41, 0x64, 0xb4, 0xe8, 0xe8, 0xc7, 0xf8, 0xab, 0x8a, - 0xab, 0x1d, 0x7f, 0x2d, 0x58, 0xc2, 0xc4, 0xf0, 0x5d, 0x11, 0x35, 0x52, - 0x88, 0xbc, 0x0f, 0x44, 0x6e, 0x91, 0x1e, 0x87, 0xb4, 0xb1, 0x91, 0x52, - 0x32, 0xe4, 0x38, 0x6d, 0x5e, 0x8d, 0x30, 0xf0, 0xbc, 0xc3, 0x15, 0x80, - 0x47, 0x36, 0x35, 0xb0, 0x93, 0xf3, 0xc4, 0x82, 0xc7, 0x73, 0xc1, 0x67, - 0x0c, 0x7a, 0x31, 0x36, 0xbc, 0x73, 0x67, 0x66, 0xae, 0x48, 0x82, 0x27, - 0x6e, 0x14, 0xd0, 0xd5, 0x12, 0x10, 0xce, 0x5e, 0x37, 0xcd, 0x7e, 0xa5, - 0xcb, 0xff, 0x91, 0xf0, 0x62, 0xdb, 0x95, 0x74, 0x0c, 0x8c, 0x1e, 0x78, - 0x11, 0x02, 0xb3, 0x02, 0x0b, 0x31, 0xe7, 0x4e, 0x8b, 0x58, 0x6a, 0xde, - 0x20, 0x93, 0x8b, 0x8e, 0x62, 0x03, 0x24, 0xc9, 0xca, 0xf8, 0x44, 0x1d, - 0x0c, 0x1b, 0xd8, 0x5d, 0xcc, 0xe2, 0x8e, 0x02, 0xc6, 0x5c, 0x06, 0x45, - 0xe6, 0x94, 0x8f, 0xa2, 0x3e, 0xf5, 0xe9, 0xf5, 0x88, 0x87, 0xb2, 0x84, - 0x1e, 0xb6, 0xb6, 0xfc, 0x9f, 0x8e, 0x79, 0xf5, 0x4b, 0x24, 0x81, 0x3e, - 0x5d, 0xf4, 0x10, 0x6e, 0xdd, 0x8c, 0x8c, 0xae, 0xc6, 0x2c, 0x26, 0xb2, - 0xfc, 0xf3, 0x99, 0xe8, 0x8c, 0x65, 0x5d, 0x6c, 0xa8, 0x1d, 0x6f, 0x1e, - 0x32, 0x0a, 0xee, 0x87, 0xf6, 0xe1, 0xdd, 0x5e, 0x7f, 0x7a, 0x90, 0x8c, - 0x3f, 0xe8, 0x47, 0x95, 0x9b, 0xc8, 0x2c, 0x49, 0xc9, 0xe4, 0x2d, 0xea, - 0x58, 0xfc, 0x29, 0x1a, 0xb7, 0xa1, 0xf9, 0xb8, 0x84, 0x41, 0xa0, 0xf1, - 0x77, 0x83, 0x56, 0x73, 0x86, 0xea, 0xf4, 0xf5, 0x2a, 0xa6, 0x6b, 0x00, - 0x64, 0x39, 0x08, 0x8f, 0xf0, 0x22, 0x1a, 0x4c, 0xf2, 0x5a, 0xd0, 0xaa, - 0x39, 0xae, 0x8a, 0xbc, 0x03, 0x99, 0xf7, 0xcc, 0x80, 0xdf, 0x2b, 0x85, - 0xbe, 0x1a, 0x97, 0x28, 0x63, 0x04, 0x72, 0x75, 0x75, 0xb4, 0x9c, 0xd3, - 0x17, 0xcc, 0x1e, 0xa1, 0xd2, 0x47, 0x18, 0x45, 0xad, 0xb4, 0x0a, 0x32, - 0x31, 0x36, 0x64, 0x48, 0x3f, 0x7b, 0x4b, 0xc0, 0xd6, 0x78, 0x46, 0xaa, - 0x90, 0x89, 0xf9, 0x36, 0x3d, 0xb4, 0xb3, 0x50, 0x51, 0xd9, 0x55, 0x6f, - 0xa9, 0xe7, 0x25, 0xaf, 0xa0, 0xca, 0x9d, 0x45, 0x83, 0xc3, 0x0b, 0x2a, - 0x0c, 0xf9, 0x3f, 0xe4, 0x08, 0xf4, 0xbd, 0x23, 0x45, 0x85, 0xcf, 0x41, - 0x93, 0xd3, 0x21, 0x5f, 0x53, 0xa2, 0x5b, 0xa9, 0xf5, 0xe9, 0x8f, 0x2a, - 0x2d, 0x53, 0x3c, 0x36, 0x17, 0xce, 0x37, 0x35, 0x3e, 0x9e, 0x6b, 0xbc, - 0xba, 0xaa, 0xa5, 0x61, 0x79, 0x98, 0x8e, 0xbd, 0x19, 0xf4, 0x5f, 0xa9, - 0xb8, 0x96, 0xa2, 0xce, 0x32, 0x00, 0xab, 0x51, 0xcb, 0xfa, 0x30, 0x3a, - 0x83, 0x92, 0x91, 0xad, 0x08, 0x61, 0x62, 0x51, 0x7f, 0x19, 0xa9, 0x2a, - 0x84, 0xf2, 0xab, 0x7e, 0x5e, 0xa7, 0x5a, 0x54, 0x7f, 0x68, 0x2a, 0x7b, - 0x4f, 0xde, 0x45, 0x1d, 0xef, 0x73, 0x5f, 0xc0, 0x40, 0x6e, 0xec, 0x6c, - 0xe9, 0xa5, 0x6b, 0x46, 0x54, 0x7c, 0x24, 0x8b, 0xa4, 0xe5, 0xb4, 0x82, - 0x31, 0x1f, 0x3e, 0x79, 0x2e, 0x21, 0x8c, 0xf1, 0xbd, 0xad, 0x7c, 0x28, - 0xcc, 0xbd, 0x58, 0x72, 0xe9, 0x6a, 0x04, 0x56, 0x67, 0x0f, 0x62, 0x98, - 0x5a, 0x97, 0x4b, 0xe2, 0x67, 0x70, 0xbb, 0x17, 0xb1, 0x84, 0x5b, 0xd4, - 0x6e, 0xab, 0x90, 0x29, 0x20, 0x93, 0x34, 0xa8, 0x03, 0x0f, 0xed, 0x1a, - 0xf0, 0x1b, 0x92, 0x87, 0x43, 0xa5, 0x6a, 0x1c, 0xdc, 0xd7, 0x22, 0x68, - 0x83, 0x98, 0x74, 0x2a, 0x4c, 0x51, 0xef, 0x71, 0x19, 0xd5, 0x3d, 0x05, - 0x19, 0x61, 0xb2, 0x52, 0xa8, 0x6e, 0xda, 0x72, 0x51, 0x66, 0x9f, 0xf0, - 0x12, 0xf6, 0x18, 0x60, 0xcc, 0xd7, 0x2f, 0x2e, 0x83, 0x14, 0x09, 0xdb, - 0x55, 0x1c, 0xf2, 0xaf, 0xfd, 0xa4, 0x40, 0xf1, 0x4a, 0xc7, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x9c, 0x52, 0xff, 0x48, - 0x06, 0x61, 0x76, 0x6d, 0xd7, 0x44, 0xb1, 0x0c, 0x32, 0x62, 0x15, 0xa1, - 0xc3, 0x97, 0x03, 0xdd, 0xed, 0x20, 0x3c, 0x3a, 0x09, 0x16, 0xe5, 0x7d, - 0x8c, 0xf9, 0x7b, 0x22, 0x5e, 0x3a, 0xdd, 0xf0, 0xc6, 0xf0, 0x3a, 0xd4, - 0x94, 0x85, 0x1c, 0x60, 0x74, 0x91, 0xa3, 0xe2, 0x8a, 0xe5, 0x3e, 0xd4, - 0x95, 0x28, 0x8b, 0x1a, 0x7b, 0xbe, 0x07, 0xc0, 0xe3, 0x6b, 0xb9, 0x85, - 0x82, 0x0b, 0x24, 0xba, 0x1c, 0xfc, 0xc0, 0x0a, 0x21, 0x33, 0xad, 0x00, - 0x19, 0xce, 0xb5, 0x8f, 0x73, 0x05, 0xf1, 0xac, 0x03, 0xbe, 0x1f, 0x22, - 0xd5, 0x32, 0x5e, 0x50, 0xe3, 0xe0, 0x62, 0x26, 0xf4, 0xb0, 0x85, 0xd8, - 0xf7, 0xa7, 0xf4, 0xa7, 0xff, 0x10, 0xb8, 0xbc, 0xe0, 0x3e, 0x4d, 0xcb, - 0x37, 0x74, 0xcc, 0x85, 0xed, 0xa0, 0x34, 0x6c, 0xfa, 0x37, 0x84, 0x6a, - 0x94, 0x55, 0x3b, 0x1e, 0x14, 0xab, 0x26, 0x7b, 0x3e, 0xac, 0xc3, 0x79, - 0xcd, 0x1b, 0x00, 0x02, 0xb3, 0x01, 0xc3, 0x10, 0xdd, 0x56, 0x7d, 0x0e, - 0x69, 0x39, 0x3c, 0x17, 0xa3, 0xae, 0x9c, 0x2d, 0xc7, 0x5a, 0x0b, 0x7c, - 0xd0, 0xac, 0xa1, 0x91, 0x6a, 0x6d, 0xc0, 0x3f, 0x98, 0xf1, 0x21, 0xf5, - 0xa5, 0x7c, 0xbc, 0x70, 0x0d, 0x7b, 0x2f, 0x0d, 0x5a, 0xa5, 0x4a, 0x5a, - 0xff, 0x51, 0xbf, 0x7f, 0xb5, 0x4f, 0x2c, 0xba, 0xa9, 0x46, 0x81, 0x6b, - 0xac, 0xc6, 0x62, 0x2d, 0xd7, 0xb5, 0x04, 0x5f, 0xd4, 0x5f, 0x1f, 0x6b, - 0x11, 0x7d, 0xe3, 0x58, 0x1f, 0xb5, 0xbf, 0x16, 0x43, 0x88, 0x05, 0xf5, - 0xa4, 0x7b, 0xb5, 0x0e, 0xf4, 0x01, 0xb6, 0x90, 0x69, 0x52, 0x0a, 0x5e, - 0x9b, 0x87, 0x51, 0x5e, 0xd5, 0xed, 0x2c, 0xcc, 0x58, 0xad, 0xe6, 0x77, - 0xa2, 0xc5, 0x7c, 0x1e, 0xc5, 0x92, 0xbe, 0xed, 0x3a, 0x9a, 0x97, 0xed, - 0x56, 0xc8, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x16, 0xe8, 0x24, 0xe3, 0x82, 0x36, 0x8e, 0x50, 0x45, 0xbe, 0xc6, 0x10, - 0x02, 0xb9, 0x6d, 0xf9, 0xed, 0x8f, 0x64, 0x35, 0x4d, 0x2c, 0x9f, 0x99, - 0xdc, 0xee, 0xfa, 0x63, 0x99, 0xc4, 0xb8, 0x3d, 0x77, 0xea, 0xda, 0xd5, - 0x95, 0x8b, 0x8e, 0x76, 0x02, 0x9c, 0x62, 0xa0, 0xad, 0xfe, 0x80, 0x61, - 0x72, 0x59, 0xd6, 0x9f, 0x16, 0x2e, 0x09, 0x71, 0xb8, 0xd7, 0x65, 0x25, - 0xc2, 0x5b, 0x40, 0x67, 0x8e, 0xd6, 0xf8, 0xdf, 0x67, 0x29, 0x19, 0xa2, - 0xa6, 0x07, 0xf3, 0xc8, 0x91, 0x7d, 0xf2, 0x50, 0x71, 0xba, 0x5c, 0x2d, - 0xa7, 0xae, 0xc4, 0xd5, 0xeb, 0xb9, 0x0d, 0x2d, 0x23, 0xe5, 0x8c, 0x65, - 0xf5, 0xf8, 0x97, 0x69, 0xde, 0x25, 0x6f, 0xea, 0x12, 0x72, 0x3e, 0xb9, - 0xa7, 0x8d, 0xcf, 0xa5, 0x66, 0xee, 0x4e, 0x2e, 0x66, 0x6b, 0xec, 0x77, - 0x7f, 0x53, 0xdc, 0x29, 0x73, 0x5e, 0xe9, 0x2f, 0x79, 0xac, 0x8d, 0x0f, - 0x44, 0x09, 0x5d, 0x25, 0x1d, 0x78, 0xb6, 0xe9, 0xd0, 0xfa, 0x8f, 0x5f, - 0x9c, 0xf0, 0xe0, 0xfc, 0x62, 0x9f, 0x52, 0x6b, 0x5b, 0x8e, 0x3f, 0xdf, - 0xb4, 0xf1, 0xdf, 0x35, 0xd0, 0x8f, 0x5a, 0xc9, 0x1f, 0x08, 0x86, 0xaa, - 0x5a, 0x9e, 0xe8, 0xb0, 0xaa, 0xd4, 0xcd, 0x2a, 0x5b, 0x4f, 0x7f, 0x39, - 0x9f, 0x7f, 0x21, 0xf2, 0xfd, 0x05, 0x96, 0x53, 0x09, 0xfd, 0x36, 0x4c, - 0xcd, 0x98, 0x74, 0xf5, 0xbd, 0xcd, 0x9e, 0x14, 0x15, 0x05, 0xb9, 0x3d, - 0x5f, 0x8a, 0x02, 0x86, 0x10, 0xd7, 0xd4, 0x01, 0x20, 0xd9, 0x8c, 0x65, - 0x7d, 0x9d, 0x39, 0x25, 0xbc, 0xce, 0x1a, 0xb1, 0x76, 0x92, 0xc3, 0x03, - 0xed, 0xa2, 0x41, 0x31, 0x0d, 0xc0, 0x40, 0x94, 0x01, 0xbc, 0x9b, 0xe9, - 0x5e, 0x3e, 0x8c, 0x49, 0xf6, 0x98, 0x0c, 0x39, 0x79, 0xdc, 0xd1, 0x1b, - 0xc5, 0xb2, 0x20, 0xb4, 0x6c, 0xb4, 0x4f, 0xce, 0xf4, 0x6c, 0x0b, 0xef, - 0x85, 0xf2, 0x7d, 0x9a, 0x90, 0x58, 0x1b, 0x51, 0x56, 0x52, 0xac, 0x75, - 0x9f, 0x17, 0xe6, 0x48, 0xaf, 0x18, 0x4c, 0xd8, 0x67, 0xe8, 0xd2, 0x61, - 0xbc, 0xa0, 0x95, 0xc9, 0x78, 0xd8, 0xa2, 0x1d, 0x47, 0x59, 0x30, 0xcf, - 0xf3, 0x79, 0x06, 0xd4, 0x25, 0xf8, 0x9c, 0x5c, 0x28, 0xee, 0xb0, 0xd2, - 0xb6, 0xaf, 0x34, 0x0e, 0xe5, 0xe4, 0x16, 0x2e, 0x05, 0x45, 0x23, 0xc1, - 0x88, 0x90, 0x4a, 0x8f, 0xff, 0xfb, 0xe2, 0xc0, 0xb7, 0xae, 0xb5, 0x50, - 0xc9, 0x26, 0xf0, 0xa2, 0xf5, 0x21, 0x23, 0x79, 0x23, 0xb6, 0x8f, 0x57, - 0x64, 0xd1, 0x27, 0xc2, 0x07, 0x63, 0xa6, 0x54, 0x1f, 0x2f, 0xca, 0x16, - 0xb8, 0x28, 0x51, 0x2a, 0x92, 0xe0, 0x06, 0x36, 0x55, 0x00, 0x6c, 0x99, - 0x31, 0xa7, 0x56, 0xb3, 0x7b, 0x15, 0xcd, 0xc1, 0x32, 0x3a, 0xc0, 0x37, - 0x1f, 0xea, 0x29, 0xb6, 0x75, 0xdf, 0x8a, 0x17, 0x09, 0x45, 0xc2, 0x6e, - 0xe2, 0x4c, 0xa5, 0x93, 0x9b, 0x17, 0x08, 0x27, 0x75, 0x33, 0xdb, 0x1f, - 0xab, 0x37, 0xad, 0x8e, 0xaa, 0xef, 0x0b, 0x82, 0xaa, 0xa7, 0xae, 0x2c, - 0x43, 0x4d, 0x8f, 0xa0, 0x43, 0xd7, 0xa1, 0x34, 0xeb, 0xc0, 0x4e, 0xbd, - 0x64, 0xfc, 0xc8, 0x6a, 0x56, 0xa8, 0xfc, 0x9e, 0x2d, 0x5f, 0x7a, 0xa3, - 0x72, 0x06, 0x79, 0x38, 0x33, 0x05, 0xa7, 0xf0, 0x09, 0x48, 0x55, 0xfe, - 0x3f, 0xab, 0x25, 0x8e, 0x76, 0x1d, 0x12, 0x5a, 0x20, 0x68, 0xfb, 0x51, - 0x51, 0x33, 0x40, 0x37, 0x0c, 0x90, 0x98, 0x6f, 0x66, 0x3f, 0x40, 0xa2, - 0x2e, 0x3c, 0xd1, 0x22, 0x51, 0x54, 0x25, 0x7e, 0x4c, 0x5d, 0x96, 0xb2, - 0x65, 0x0f, 0xa3, 0xdf, 0x8e, 0x97, 0xfe, 0xeb, 0xe7, 0xc6, 0x22, 0x2a, - 0x47, 0x3a, 0x78, 0x1b, 0x39, 0x2e, 0xd6, 0xbc, 0x35, 0xb4, 0xf4, 0xc3, - 0xf2, 0x6a, 0x12, 0xc9, 0xe7, 0x6c, 0x9a, 0xfc, 0xed, 0xbc, 0x11, 0xc7, - 0x71, 0x09, 0x8f, 0x56, 0xc1, 0xd8, 0xb6, 0x92, 0x35, 0x97, 0x8e, 0x71, - 0xd2, 0xbb, 0xb4, 0xed, 0xf0, 0x7e, 0xff, 0x58, 0xd9, 0x95, 0x26, 0xea, - 0xa9, 0x4d, 0x38, 0x8d, 0x4e, 0x8e, 0x53, 0xae, 0x7e, 0xe6, 0xe6, 0x82, - 0x35, 0x96, 0xab, 0x0f, 0x04, 0x0f, 0xf2, 0xac, 0x1b, 0xcd, 0x07, 0x17, - 0x1b, 0x25, 0x2f, 0x92, 0xaf, 0x19, 0xa2, 0x1b, 0xa0, 0x7a, 0xc7, 0x4f, - 0xb8, 0x1b, 0x89, 0x21, 0xb5, 0xe2, 0x24, 0xe9, 0x78, 0xae, 0x7d, 0xd7, - 0xcc, 0x8e, 0x3f, 0xa7, 0xe9, 0xbe, 0xe6, 0x79, 0x0f, 0xdf, 0x86, 0xe9, - 0xb9, 0xcd, 0x82, 0x7b, 0xf5, 0x04, 0x89, 0xa0, 0x73, 0x5d, 0xa2, 0x4e, - 0xd6, 0xa0, 0x60, 0x21, 0xe2, 0xfe, 0xd3, 0xf4, 0x19, 0x8b, 0x6a, 0x03, - 0x12, 0x9c, 0x51, 0x9a, 0x41, 0x4e, 0xf6, 0xb4, 0x6e, 0x0c, 0x43, 0xf5, - 0x00, 0x00, 0x78, 0x12, 0xdd, 0x21, 0xa8, 0xc7, 0x21, 0xa1, 0x4e, 0x44, - 0x10, 0xd0, 0xdb, 0x6f, 0x0b, 0x4c, 0xe7, 0x7a, 0x8c, 0x0c, 0xaa, 0xb6, - 0x9a, 0x7d, 0xa9, 0xff, 0x5a, 0x2e, 0x15, 0x9e, 0x6f, 0xea, 0xe1, 0x42, - 0x0c, 0x9c, 0x5a, 0x3b, 0xd5, 0xe6, 0xde, 0x23, 0x3f, 0x9c, 0x45, 0x20, - 0x67, 0x96, 0x50, 0x16, 0x80, 0x42, 0xe7, 0x67, 0x7d, 0x24, 0xdc, 0x00, - 0xaa, 0x01, 0x8a, 0xa3, 0x61, 0xfe, 0x9a, 0xce, 0xc1, 0xe5, 0x2e, 0x19, - 0x85, 0x04, 0xe6, 0x7b, 0xe8, 0x7a, 0xbc, 0x9d, 0xfe, 0x71, 0x29, 0x1d, - 0x17, 0xae, 0x6b, 0x1a, 0x64, 0xd7, 0xfe, 0x18, 0x29, 0x07, 0x9b, 0x49, - 0x43, 0xba, 0x29, 0x37, 0xa8, 0xb0, 0x26, 0x27, 0x6b, 0x7d, 0xde, 0x49, - 0x12, 0x90, 0x05, 0xe2, 0x2c, 0xd8, 0x08, 0xd0, 0x5d, 0x74, 0xa7, 0x15, - 0xbe, 0x34, 0x34, 0x6d, 0xad, 0xfb, 0xa8, 0x01, 0x4a, 0x6c, 0x98, 0xba, - 0x84, 0x38, 0xbd, 0x05, 0xe8, 0x87, 0x27, 0x91, 0x3f, 0xb8, 0xe9, 0x06, - 0x27, 0xda, 0x56, 0x07, 0xaa, 0xea, 0xf4, 0x80, 0x5c, 0x12, 0x44, 0xbe, - 0x23, 0xb3, 0x63, 0x9f, 0x5f, 0x37, 0xa7, 0x53, 0x4c, 0xfc, 0x4d, 0x87, - 0xeb, 0x91, 0xe8, 0xd7, 0x5a, 0xd6, 0xca, 0x67, 0x2d, 0x2f, 0x5a, 0x0e, - 0xc7, 0x82, 0x78, 0xa4, 0xf3, 0x56, 0x07, 0xa5, 0xab, 0x6d, 0x09, 0xd2, - 0x0d, 0x08, 0x6b, 0x6e, 0x1f, 0xc1, 0xf2, 0x91, 0x1a, 0x39, 0xfe, 0x14, - 0x56, 0x3f, 0xeb, 0x9f, 0x14, 0xc2, 0xb3, 0xb2, 0xc2, 0x8d, 0xc2, 0xee, - 0x7e, 0xf0, 0x7d, 0x92, 0xd2, 0xc3, 0x57, 0x3e, 0x2c, 0x07, 0x1b, 0x6a, - 0x9b, 0x3b, 0x79, 0x59, 0xc9, 0x22, 0x96, 0x6c, 0x3e, 0x37, 0xd3, 0x0e, - 0x5c, 0xf6, 0x8f, 0xa9, 0xaa, 0xc9, 0xa4, 0x4b, 0xaf, 0x5d, 0x1a, 0xb6, - 0xf3, 0x91, 0x32, 0x4f, 0xca, 0x72, 0xa0, 0x42, 0x01, 0x51, 0xaf, 0x19, - 0x89, 0xc4, 0xcc, 0x9b, 0xf3, 0x52, 0xe9, 0xa6, 0xf2, 0x71, 0x6f, 0x5a, - 0x38, 0x02, 0xb8, 0x75, 0x88, 0x5f, 0x8d, 0x12, 0xc5, 0x55, 0x4f, 0xd1, - 0xba, 0xf2, 0x24, 0xdc, 0x63, 0x5f, 0x93, 0xc7, 0xf3, 0xe7, 0x59, 0xac, - 0xc3, 0xed, 0xbc, 0x02, 0xe3, 0xad, 0xb2, 0x8e, 0x2c, 0x2d, 0x47, 0xb4, - 0x34, 0x8d, 0xae, 0x44, 0xc8, 0x5f, 0x14, 0xe8, 0x8e, 0x7b, 0xc3, 0x60, - 0x53, 0x9a, 0x51, 0xea, 0x7f, 0x2f, 0xb6, 0x62, 0x61, 0xf7, 0xc0, 0x18, - 0x0f, 0x20, 0x79, 0x13, 0x5c, 0xe8, 0xca, 0x04, 0x29, 0x5f, 0x70, 0x4d, - 0x88, 0xa2, 0x43, 0x20, 0x57, 0x33, 0x04, 0x74, 0x8e, 0x7c, 0x89, 0xd4, - 0x56, 0x8f, 0x93, 0x86, 0x81, 0x6c, 0x11, 0xfc, 0x32, 0x0e, 0xb0, 0x3e, - 0xe5, 0x13, 0xbf, 0x76, 0x62, 0xcc, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x0e, 0xf8, 0x8f, 0xde, 0xfd, 0xfd, 0xcf, 0xd1, - 0x6f, 0x9f, 0xf2, 0xb6, 0xb6, 0x59, 0xb2, 0x73, 0x1c, 0x3c, 0x0d, 0xb0, - 0x4d, 0xb8, 0x96, 0xc6, 0xeb, 0xe5, 0xf8, 0x0d, 0x3e, 0xd7, 0x0c, 0xbd, - 0x9c, 0xaa, 0xd5, 0x1c, 0x19, 0x9a, 0x4c, 0x8e, 0xfa, 0xac, 0x68, 0x74, - 0x16, 0x06, 0xb5, 0x49, 0xe7, 0xd5, 0x6f, 0x4f, 0xcc, 0xd9, 0x02, 0x74, - 0xd6, 0x08, 0x73, 0x7c, 0xa9, 0xfa, 0x3e, 0x50, 0x87, 0xf7, 0xfb, 0xa6, - 0x94, 0xdc, 0xb1, 0x40, 0xec, 0xa7, 0xa9, 0x39, 0xff, 0x40, 0x4a, 0x97, - 0x9b, 0xcc, 0x57, 0x66, 0x68, 0xd6, 0xa8, 0x4d, 0x13, 0x06, 0x0e, 0x03, - 0xc4, 0xdf, 0x7a, 0xe4, 0x2f, 0x0e, 0xd7, 0x54, 0xe0, 0xbd, 0x93, 0xeb, - 0x82, 0xd8, 0x05, 0x2d, 0xa2, 0xf0, 0x4e, 0xd0, 0xf9, 0x3e, 0x3e, 0x6b, - 0x3d, 0x08, 0x39, 0x4e, 0x35, 0x13, 0x7b, 0x3b, 0x39, 0x2c, 0x47, 0x2c, - 0x61, 0x9f, 0xfd, 0x59, 0x88, 0x5f, 0x65, 0x08, 0xa9, 0x66, 0xec, 0xb5, - 0x21, 0xf3, 0xe9, 0xba, 0x11, 0x63, 0x24, 0x6c, 0xf4, 0x50, 0x3a, 0xe5, - 0x0c, 0x06, 0x39, 0x69, 0x2f, 0xca, 0x0f, 0x48, 0xbe, 0x95, 0x7d, 0x13, - 0x3d, 0xa5, 0x75, 0x69, 0x85, 0xc8, 0xb3, 0x72, 0x72, 0x3c, 0x4f, 0x96, - 0xe7, 0xb7, 0xbd, 0xe7, 0x76, 0xba, 0xac, 0xc0, 0x07, 0x4d, 0xc1, 0xed, - 0xb9, 0xf0, 0x91, 0x2e, 0x36, 0xb7, 0x5b, 0x1c, 0xb7, 0xd6, 0xb3, 0x45, - 0x7d, 0x0a, 0xf5, 0x43, 0xdd, 0x7a, 0x8b, 0x4e, 0x18, 0xf2, 0xf3, 0x19, - 0xcd, 0x4a, 0xda, 0x3c, 0x1b, 0x05, 0x27, 0x67, 0x43, 0xa9, 0x8e, 0xe7, - 0x4a, 0x95, 0xa9, 0xad, 0x6c, 0x8c, 0xb2, 0x2e, 0x12, 0xcb, 0xf3, 0xeb, - 0x65, 0x26, 0xf4, 0x3e, 0x86, 0xee, 0x7e, 0xd9, 0xba, 0xce, 0x8d, 0x15, - 0x3e, 0xa8, 0x40, 0x59, 0x1d, 0x27, 0x78, 0x75, 0xf0, 0xf9, 0x33, 0xb5, - 0x32, 0xa9, 0x66, 0xe6, 0x2e, 0x2e, 0x3d, 0xf5, 0x4a, 0xf0, 0x97, 0x2d, - 0xe7, 0x43, 0x85, 0x43, 0x61, 0x25, 0x15, 0x13, 0x9e, 0x8e, 0xf6, 0x78, - 0xe8, 0x67, 0xba, 0xc2, 0x6d, 0xda, 0x46, 0x25, 0x76, 0xd9, 0x9b, 0x69, - 0x95, 0x4b, 0x50, 0x8c, 0xb7, 0x36, 0x49, 0xbc, 0xd7, 0x39, 0x69, 0xb9, - 0xc1, 0x5f, 0x5f, 0xcc, 0x83, 0x4c, 0x16, 0xb8, 0x0c, 0x85, 0xf1, 0xa4, - 0x57, 0x6c, 0x22, 0x1f, 0x60, 0x0c, 0xff, 0xb6, 0xc9, 0xf7, 0x21, 0x2d, - 0x35, 0x78, 0x31, 0x79, 0xd0, 0x6d, 0x61, 0xec, 0x61, 0x04, 0x75, 0x5c, - 0x06, 0xc3, 0x53, 0x1b, 0xb5, 0xdc, 0x23, 0xb9, 0xd9, 0x07, 0xd1, 0xd0, - 0xb3, 0xa5, 0xab, 0xd9, 0xbe, 0xb7, 0xdc, 0xae, 0x3f, 0x3e, 0xd7, 0x2a, - 0x79, 0x3f, 0x9c, 0x27, 0x81, 0x8d, 0x61, 0xe8, 0x46, 0x8f, 0x05, 0xf4, - 0x9c, 0x30, 0x35, 0x9a, 0x2f, 0x62, 0x84, 0x7c, 0xa5, 0x95, 0x68, 0x34, - 0xe6, 0xf0, 0xb9, 0x42, 0xd4, 0x37, 0xc6, 0xd2, 0x35, 0x1f, 0x7b, 0xe0, - 0xa6, 0x92, 0xcf, 0xf7, 0x0f, 0x08, 0x10, 0x79, 0xbd, 0xa8, 0x7c, 0x4e, - 0xef, 0xf1, 0x01, 0x8d, 0x1b, 0x0c, 0x98, 0x46, 0x28, 0xdc, 0xd5, 0xa8, - 0xcf, 0x67, 0x7d, 0x87, 0x2a, 0x8f, 0xdd, 0x52, 0x43, 0x5a, 0x55, 0x80, - 0x88, 0xa6, 0xcd, 0x9c, 0x5d, 0x36, 0xae, 0xef, 0x61, 0x43, 0xec, 0xf0, - 0x7f, 0x92, 0x21, 0x1f, 0xa2, 0xa3, 0x76, 0x0e, 0x5d, 0xf3, 0xa7, 0xe7, - 0x7d, 0xb0, 0x2c, 0x94, 0x36, 0x95, 0x34, 0x4e, 0x04, 0xfb, 0x51, 0xf9, - 0xe6, 0x7e, 0x56, 0x7a, 0x59, 0xce, 0x0a, 0x45, 0x7e, 0xeb, 0xc4, 0xbc, - 0xfd, 0x20, 0xaa, 0x34, 0x6b, 0xee, 0x3b, 0x09, 0xe8, 0x00, 0x4b, 0xfc, - 0x68, 0x24, 0x43, 0xdb, 0x09, 0x58, 0xd0, 0xb6, 0xbf, 0xaf, 0x1d, 0x7f, - 0x8a, 0x4c, 0x9e, 0x51, 0x97, 0x97, 0xe1, 0x0c, 0x0d, 0xaf, 0xd1, 0x1e, - 0x62, 0xad, 0x70, 0xa5, 0x8a, 0x24, 0x2f, 0x4a, 0xa6, 0x55, 0xb1, 0x44, - 0x09, 0x88, 0xab, 0xa5, 0x45, 0x28, 0xa0, 0x34, 0x9e, 0x14, 0x2c, 0xf9, - 0x0f, 0xb8, 0x33, 0x8f, 0xcc, 0xba, 0x50, 0x34, 0x4c, 0x96, 0x89, 0x09, - 0xb9, 0xa8, 0xfb, 0xac, 0x59, 0x73, 0xea, 0x61, 0xbc, 0x0d, 0x24, 0x3a, - 0x20, 0xc2, 0x76, 0xfc, 0x2e, 0xce, 0xfb, 0x75, 0x00, 0xca, 0x58, 0xbd, - 0xab, 0x61, 0x9b, 0x13, 0x2b, 0xa3, 0xf6, 0x15, 0x55, 0x83, 0x23, 0xc4, - 0xf3, 0x4c, 0x89, 0xc5, 0x4a, 0x18, 0x5c, 0x8d, 0x41, 0xcc, 0x06, 0x7b, - 0xe3, 0x2a, 0x1f, 0x6a, 0x57, 0xbc, 0x54, 0x61, 0x0c, 0xf2, 0xec, 0xbf, - 0xb0, 0xf0, 0x21, 0xde, 0xfc, 0xe4, 0xef, 0xce, 0x47, 0xc8, 0xdc, 0x11, - 0xc7, 0x8a, 0x12, 0x97, 0x68, 0x1d, 0x9e, 0x9a, 0xbf, 0xad, 0x62, 0x7e, - 0x4b, 0x88, 0xd7, 0x20, 0x22, 0xce, 0x5e, 0xe3, 0x87, 0x12, 0xa3, 0x05, - 0xef, 0x1f, 0x05, 0xb1, 0xbd, 0x1b, 0x80, 0x43, 0x84, 0x33, 0x8b, 0x87, - 0xa5, 0xc2, 0xe1, 0x49, 0xa8, 0x75, 0x49, 0x9b, 0x1b, 0x64, 0x8a, 0xd0, - 0x86, 0x10, 0xa8, 0x72, 0xeb, 0x2e, 0xe7, 0x3f, 0xaa, 0x6b, 0x4a, 0x22, - 0xae, 0x17, 0x8f, 0x10, 0x22, 0x03, 0x66, 0x67, 0x35, 0x40, 0x29, 0x1e, - 0xf2, 0x05, 0x36, 0xd5, 0xed, 0xe2, 0x2a, 0xcc, 0x77, 0xe2, 0x16, 0xef, - 0xa7, 0x9b, 0xe1, 0x1b, 0xba, 0xf3, 0xf5, 0x74, 0x6c, 0x2a, 0x98, 0x8a, - 0x14, 0xaf, 0x2c, 0xab, 0xfb, 0x51, 0x53, 0x75, 0x17, 0xcb, 0x5c, 0x86, - 0xb5, 0x60, 0x70, 0x29, 0x65, 0x69, 0x49, 0x42, 0x4f, 0x42, 0x6b, 0xc7, - 0xdb, 0x98, 0x7d, 0x1e, 0xf8, 0x45, 0xb2, 0x33, 0xd6, 0x34, 0x26, 0xa6, - 0x7f, 0x76, 0x31, 0x13, 0x13, 0x9d, 0xd2, 0xb0, 0x30, 0x0b, 0x0b, 0x3e, - 0x1a, 0x84, 0xb0, 0xbd, 0x81, 0x34, 0x25, 0x73, 0x99, 0x87, 0x1a, 0xc8, - 0x44, 0x34, 0x9d, 0x1a, 0x3d, 0x76, 0x44, 0x1d, 0xe2, 0x22, 0xad, 0x3d, - 0xb2, 0xa3, 0x1c, 0xd5, 0x27, 0x8c, 0xc6, 0x84, 0xdf, 0x33, 0xbe, 0xb2, - 0xa7, 0xb9, 0xc5, 0x6e, 0x48, 0xdc, 0xe9, 0xf8, 0xef, 0xfc, 0xaa, 0x1f, - 0x5e, 0x41, 0x48, 0x1e, 0xe0, 0xb9, 0xd6, 0x6e, 0x7a, 0x9c, 0xa3, 0x98, - 0x4b, 0xfa, 0x90, 0xa4, 0x58, 0x33, 0x85, 0x3b, 0x11, 0x44, 0x83, 0x4b, - 0x1e, 0x0e, 0x5d, 0x11, 0x36, 0x15, 0xe1, 0xbf, 0x15, 0x04, 0x8e, 0x88, - 0xc6, 0x18, 0x53, 0xc3, 0x8d, 0x28, 0x86, 0x25, 0xef, 0x55, 0x7b, 0xf6, - 0x85, 0xf8, 0xed, 0x3b, 0xcf, 0x5d, 0xa6, 0xc7, 0x66, 0xb7, 0xbe, 0x14, - 0xf0, 0x62, 0x89, 0x1f, 0x32, 0x1e, 0x86, 0x2a, 0x93, 0xd5, 0xca, 0x37, - 0x03, 0x0b, 0xf8, 0x0f, 0xca, 0x50, 0x6c, 0x16, 0x2b, 0xf0, 0x77, 0xca, - 0xbb, 0x8e, 0x95, 0x11, 0xef, 0x5b, 0xbe, 0x2f, 0x62, 0x50, 0xb8, 0x3d, - 0xff, 0xfa, 0x30, 0x21, 0xb2, 0x86, 0x3f, 0x50, 0x57, 0x98, 0x79, 0x15, - 0xce, 0x3e, 0xbf, 0x49, 0x58, 0xb0, 0xb5, 0xd7, 0xbe, 0x01, 0x55, 0xee, - 0x60, 0x14, 0x9d, 0x5b, 0x57, 0x48, 0x05, 0x72, 0x6a, 0x23, 0x29, 0xeb, - 0xf3, 0x36, 0x2a, 0xc1, 0xda, 0x5e, 0x4a, 0x63, 0xc4, 0x6b, 0x04, 0xe8, - 0xe8, 0xc1, 0xb5, 0xc4, 0x2d, 0x60, 0x1f, 0xa0, 0x2b, 0x33, 0xa5, 0xb7, - 0x82, 0x59, 0x21, 0xba, 0x13, 0xda, 0x79, 0xda, 0x5a, 0xb1, 0x82, 0x5b, - 0x52, 0x7f, 0x0c, 0x70, 0x75, 0x65, 0xe0, 0x44, 0xb3, 0xca, 0xd0, 0x09, - 0x38, 0x24, 0x83, 0x8e, 0x0c, 0x4c, 0xef, 0x96, 0xe4, 0x04, 0x30, 0x46, - 0x23, 0x6a, 0x28, 0x13, 0x1d, 0x37, 0x14, 0x75, 0x6e, 0xd0, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x21, 0xa2, 0xf0, 0x7d, - 0x29, 0x8f, 0x62, 0x2e, 0xf4, 0x0e, 0x14, 0x9b, 0x60, 0x38, 0xc0, 0x95, - 0xfb, 0x3c, 0x90, 0x5a, 0xa0, 0x1f, 0x30, 0x09, 0xfc, 0x6d, 0xa9, 0xd1, - 0x7b, 0x0b, 0x7c, 0x78, 0xf9, 0xf6, 0xa8, 0x5e, 0xa6, 0x7a, 0xf6, 0x1c, - 0xab, 0x1b, 0x0e, 0xa9, 0x08, 0xfd, 0xd9, 0x97, 0x08, 0x24, 0x2b, 0xda, - 0x08, 0x8b, 0x0c, 0x07, 0x70, 0x15, 0xa8, 0x0c, 0x86, 0xfc, 0xd1, 0x84, - 0xba, 0xd0, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x35, 0x7a, 0xab, 0xaa, 0xbe, 0xd7, 0xad, 0x22, 0x99, 0x46, 0xbb, 0x78, - 0xfd, 0x47, 0x8f, 0x2a, 0x4a, 0xa6, 0x2f, 0x8d, 0x15, 0x07, 0xed, 0x26, - 0x1d, 0xb3, 0x12, 0xd3, 0x88, 0x0f, 0xf1, 0x75, 0x2a, 0x07, 0x62, 0xac, - 0xbf, 0x52, 0x4a, 0xc3, 0x12, 0xe5, 0x3c, 0xea, 0xa6, 0x1e, 0x57, 0x90, - 0x56, 0x60, 0x7d, 0xcf, 0x4b, 0x65, 0xaf, 0xee, 0x17, 0x56, 0xbe, 0xd2, - 0x38, 0x3f, 0xd6, 0xbc, 0xef, 0xa7, 0x32, 0xb7, 0x10, 0xe9, 0xbd, 0x97, - 0x45, 0x92, 0x3c, 0xd3, 0x35, 0x2e, 0x59, 0x37, 0x65, 0x5c, 0x7f, 0xd0, - 0x99, 0x9c, 0x01, 0xe9, 0x1f, 0x65, 0xe9, 0xec, 0x0f, 0x2d, 0x46, 0xbc, - 0xd4, 0x8f, 0x51, 0x1c, 0xa0, 0xa4, 0x9b, 0x4f, 0x95, 0x54, 0xb0, 0x50, - 0x74, 0xfa, 0x0f, 0xe6, 0x55, 0x81, 0xce, 0x0f, 0xd1, 0x25, 0x56, 0xc8, - 0x2f, 0x3a, 0x65, 0xd4, 0x86, 0x4a, 0x8e, 0xff, 0x5a, 0xcc, 0x67, 0x96, - 0xcc, 0x65, 0x0d, 0x20, 0xee, 0xba, 0x6b, 0xcb, 0xde, 0x10, 0x2f, 0xbf, - 0x67, 0x6d, 0xbe, 0xef, 0x72, 0xfc, 0x25, 0x62, 0xbf, 0xbb, 0xc5, 0xe0, - 0x7b, 0x4c, 0x32, 0xc5, 0xdb, 0x9f, 0xb5, 0xe2, 0x75, 0x8a, 0xba, 0xbb, - 0x69, 0x28, 0xb6, 0x41, 0x25, 0x83, 0x67, 0x35, 0x1b, 0xd7, 0xb3, 0xd7, - 0x58, 0x54, 0x8a, 0x0b, 0x7c, 0xf3, 0x05, 0xcf, 0x2c, 0x78, 0x70, 0xc6, - 0xed, 0x7e, 0x56, 0xb6, 0x4e, 0x48, 0xaa, 0x57, 0xc4, 0xb0, 0xb2, 0xa0, - 0xca, 0x50, 0xe1, 0xc7, 0x41, 0xea, 0xac, 0x5f, 0x18, 0x13, 0xe5, 0x85, - 0x78, 0x3f, 0x05, 0xf3, 0xfd, 0x74, 0x7a, 0x42, 0x61, 0x91, 0x19, 0xc6, - 0x19, 0xe9, 0xd2, 0x78, 0x2c, 0xb1, 0xa3, 0x7f, 0x62, 0xea, 0x2a, 0x35, - 0x1c, 0x55, 0xa3, 0xf7, 0xdc, 0xec, 0x48, 0x23, 0x99, 0x8d, 0xe1, 0x4d, - 0x45, 0xad, 0x92, 0xc6, 0xf4, 0xa2, 0xe5, 0xe6, 0x58, 0xe4, 0xd5, 0x37, - 0xd0, 0x47, 0x0b, 0x64, 0x68, 0x48, 0x7e, 0xeb, 0xbe, 0x5e, 0x74, 0xd1, - 0xc4, 0xa5, 0x60, 0xd0, 0x30, 0x62, 0xbc, 0x81, 0xc4, 0x01, 0x68, 0x18, - 0xf3, 0xac, 0x9d, 0xb1, 0x4d, 0xdd, 0x8b, 0xd2, 0x54, 0x5d, 0xd1, 0x1c, - 0xee, 0x75, 0x9e, 0x99, 0x42, 0x69, 0x38, 0xcc, 0x66, 0x24, 0xd9, 0x8f, - 0x70, 0x98, 0xc3, 0x5e, 0x08, 0xf0, 0xd8, 0x2d, 0xe6, 0x52, 0x48, 0xdf, - 0xd0, 0x03, 0x04, 0x92, 0xab, 0xa1, 0xa1, 0x2f, 0x7d, 0x84, 0xb2, 0x82, - 0x51, 0x56, 0x74, 0x4a, 0x94, 0xff, 0xd2, 0xe4, 0x4e, 0x1a, 0xbd, 0x18, - 0xab, 0x33, 0x68, 0x0e, 0x4f, 0x99, 0x1d, 0x7e, 0x02, 0x3f, 0x1f, 0x50, - 0x05, 0xf8, 0x59, 0x47, 0x97, 0x98, 0x60, 0xb1, 0x30, 0xb1, 0x14, 0xac, - 0x2c, 0x0a, 0xa8, 0x97, 0x83, 0xf5, 0x5a, 0x5c, 0x87, 0xe5, 0x36, 0x26, - 0xec, 0xb4, 0x94, 0x46, 0x9a, 0xad, 0x2b, 0x9a, 0xb7, 0xac, 0xc4, 0x1a, - 0x55, 0x53, 0xc0, 0x16, 0x91, 0x1c, 0xd6, 0xaa, 0x6b, 0xdd, 0x85, 0x6a, - 0x54, 0xec, 0x7c, 0xa1, 0xd5, 0x18, 0x00, 0x74, 0xd2, 0xf1, 0x7e, 0xad, - 0x7c, 0xa8, 0x85, 0x9b, 0xc0, 0x9f, 0x4f, 0x3b, 0xd9, 0x08, 0xc8, 0x9d, - 0x31, 0x22, 0x7a, 0x53, 0xa8, 0xbd, 0x00, 0xdf, 0xe8, 0x39, 0x52, 0xe9, - 0x14, 0x74, 0x7b, 0x53, 0xf9, 0xbd, 0x29, 0x8e, 0x5d, 0xf2, 0x35, 0x3b, - 0xe3, 0x48, 0xbf, 0xa0, 0xc4, 0x3d, 0x40, 0xb4, 0xf2, 0x7c, 0xd0, 0xe3, - 0x17, 0x11, 0x5b, 0xd6, 0x55, 0xd2, 0x54, 0xcf, 0x20, 0x8d, 0x74, 0x4a, - 0x6b, 0xe9, 0x5d, 0xfe, 0x72, 0x14, 0x6a, 0x11, 0x8b, 0x14, 0x19, 0xba, - 0x63, 0xe4, 0x6b, 0x39, 0xb4, 0x90, 0x67, 0x79, 0x56, 0x31, 0xd3, 0xb5, - 0xeb, 0x9e, 0x95, 0x4b, 0x1e, 0x04, 0x20, 0xd8, 0xbe, 0xe8, 0x1c, 0xd7, - 0x95, 0xcb, 0x57, 0x60, 0xe6, 0x11, 0x35, 0x42, 0x90, 0xfd, 0xb2, 0xe4, - 0x9b, 0x24, 0x70, 0xc0, 0xc3, 0xa9, 0x8a, 0xc9, 0x46, 0xd0, 0xea, 0xc9, - 0x93, 0x7d, 0x9f, 0x64, 0x12, 0x54, 0x09, 0xb7, 0xc2, 0x4d, 0x6e, 0xcc, - 0x60, 0x07, 0x36, 0x31, 0x64, 0x3d, 0x1e, 0xd3, 0x86, 0x47, 0x47, 0x42, - 0x76, 0xb6, 0xf0, 0xe5, 0xb4, 0xe7, 0xbe, 0x47, 0x91, 0x78, 0xbe, 0x06, - 0xf1, 0x6e, 0x58, 0xce, 0x32, 0x13, 0x26, 0x34, 0x92, 0xae, 0xb2, 0x29, - 0xd0, 0x30, 0x55, 0xfd, 0x89, 0x6a, 0xbf, 0x3e, 0xdf, 0x11, 0x39, 0xe4, - 0xfd, 0x56, 0xd7, 0x2f, 0x89, 0x96, 0x08, 0x54, 0xaa, 0xab, 0x8b, 0xfa, - 0x65, 0xe5, 0x64, 0xff, 0x24, 0x25, 0x8f, 0x7d, 0xf6, 0xb1, 0x7f, 0x2f, - 0xa6, 0xf6, 0x46, 0xab, 0x61, 0xfd, 0x47, 0xad, 0x6d, 0x38, 0x6d, 0xc1, - 0xe9, 0x4a, 0xf1, 0x85, 0x05, 0x0e, 0x69, 0x48, 0x7c, 0xa6, 0x76, 0x61, - 0xe3, 0x94, 0xf2, 0xd6, 0x7a, 0x9c, 0x79, 0xc0, 0x2a, 0x51, 0x23, 0xc6, - 0xaf, 0x29, 0x04, 0x0f, 0x47, 0xc2, 0x93, 0xd7, 0x64, 0xe5, 0x37, 0x2e, - 0x53, 0x3b, 0xb7, 0x7c, 0x9c, 0xb4, 0x63, 0x13, 0xc7, 0x56, 0x90, 0xe9, - 0x53, 0xd5, 0x86, 0x2b, 0x96, 0x41, 0x42, 0x56, 0xc5, 0x16, 0xd7, 0x9e, - 0x30, 0xce, 0xa1, 0x0d, 0x93, 0x5d, 0x11, 0x07, 0xb2, 0x95, 0xfd, 0xf6, - 0x0b, 0x28, 0x95, 0x1a, 0x8f, 0xfa, 0xe1, 0x57, 0x7e, 0x06, 0xff, 0x18, - 0xaf, 0xe3, 0x4f, 0x3c, 0x34, 0x5b, 0xd4, 0x46, 0x1a, 0xd1, 0xd1, 0x7e, - 0x55, 0xba, 0x5d, 0x2a, 0x1f, 0x42, 0x49, 0x95, 0x75, 0x5f, 0x80, 0x60, - 0x02, 0x01, 0xdb, 0x36, 0xad, 0x68, 0x69, 0x1e, 0x0b, 0x90, 0x3f, 0xa6, - 0xb6, 0x2f, 0x66, 0xa6, 0x7d, 0x81, 0x8c, 0xa0, 0xee, 0x05, 0x95, 0xbc, - 0xb3, 0x7c, 0x18, 0xd4, 0x1b, 0x40, 0x96, 0xf5, 0x05, 0x9d, 0x27, 0x3b, - 0x78, 0xfc, 0x19, 0x18, 0xc0, 0x61, 0xa0, 0xd6, 0xf9, 0xc0, 0x3f, 0xe5, - 0x48, 0x35, 0x0f, 0x8b, 0x0d, 0xfb, 0x31, 0xb7, 0x32, 0x40, 0x1d, 0x69, - 0x12, 0x5a, 0x23, 0xf0, 0xce, 0xe9, 0x5e, 0xa6, 0x68, 0x6b, 0xe1, 0xe2, - 0x68, 0x07, 0x02, 0x0d, 0x7a, 0xc2, 0x0a, 0x40, 0x10, 0x5e, 0x94, 0xba, - 0x77, 0x1d, 0xf7, 0xac, 0xec, 0x79, 0xa9, 0xa1, 0x8a, 0xb8, 0x49, 0x32, - 0x08, 0xe0, 0x18, 0xa8, 0x3d, 0x69, 0x41, 0x5d, 0x30, 0x3b, 0xb6, 0x91, - 0x46, 0x8d, 0x81, 0x10, 0xb0, 0xc2, 0xed, 0xa0, 0x4e, 0x59, 0x48, 0xd8, - 0x64, 0x7d, 0x2d, 0x46, 0xf2, 0x8a, 0x2e, 0x5d, 0x0c, 0x4d, 0x9f, 0xfe, - 0x7b, 0x5e, 0xbf, 0x1a, 0x78, 0xdf, 0xfc, 0x0f, 0x04, 0x37, 0x72, 0x1a, - 0x09, 0xb8, 0x6e, 0x1b, 0xf1, 0x18, 0x7d, 0x83, 0x44, 0xaa, 0x9b, 0x71, - 0xe1, 0x03, 0x04, 0x83, 0xe5, 0xaa, 0xc0, 0xd4, 0xa7, 0x80, 0x10, 0x35, - 0x09, 0xae, 0xf7, 0xe1, 0x5e, 0x7c, 0x31, 0x20, 0x43, 0x82, 0xda, 0x07, - 0x39, 0xfe, 0x8f, 0x9d, 0x70, 0x3c, 0x57, 0x43, 0x01, 0x51, 0x37, 0x2e, - 0x97, 0xef, 0xcf, 0x05, 0x44, 0x75, 0x69, 0xf7, 0xdb, 0xda, 0x80, 0x78, - 0x0c, 0xcc, 0xc1, 0x49, 0xac, 0x3b, 0x7e, 0x27, 0x6a, 0xbb, 0xdf, 0x45, - 0x5b, 0x3b, 0x29, 0xf6, 0x1b, 0xa9, 0x25, 0xf9, 0x2f, 0xcf, 0x37, 0x71, - 0x33, 0xb4, 0x90, 0xd7, 0x9b, 0x87, 0x41, 0x15, 0xd1, 0xa6, 0x39, 0xa7, - 0xa9, 0xcd, 0x66, 0x29, 0x59, 0xb4, 0x53, 0x12, 0xa1, 0x20, 0xd5, 0x04, - 0xca, 0x40, 0x31, 0xfa, 0x6f, 0xbb, 0x92, 0x04, 0xf3, 0xc2, 0x10, 0x0d, - 0xc1, 0x19, 0x78, 0x8c, 0x82, 0xed, 0x92, 0x3a, 0x6b, 0xd1, 0x3d, 0xe8, - 0xac, 0x55, 0xe4, 0x8c, 0xc6, 0xd4, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x00, 0xc2, 0x1d, 0x86, 0xe4, 0xf6, 0xa1, 0xbe, 0xf5, - 0xf3, 0x36, 0x9d, 0x32, 0x80, 0x17, 0x3b, 0x1f, 0x18, 0x21, 0xed, 0xa7, - 0xf5, 0xaf, 0xf1, 0x94, 0xe2, 0xa7, 0x08, 0xd5, 0xca, 0x18, 0x45, 0xf5, - 0x68, 0x94, 0x82, 0x61, 0xf7, 0xb7, 0xb2, 0xfa, 0xd4, 0x5e, 0x32, 0xd0, - 0xf0, 0x20, 0x66, 0x83, 0xd1, 0x6b, 0x3c, 0xdf, 0x73, 0xeb, 0x73, 0x82, - 0x09, 0x9b, 0xd0, 0xc5, 0xb0, 0x9f, 0x01, 0x77, 0x85, 0xcc, 0x6e, 0x23, - 0xb7, 0x00, 0x45, 0xe0, 0xa6, 0x01, 0x29, 0x1d, 0x8b, 0xc4, 0xe0, 0xc2, - 0xe0, 0x4f, 0x3b, 0x07, 0xd5, 0xac, 0x6b, 0x88, 0xb8, 0xa4, 0xe2, 0x5c, - 0x19, 0xe9, 0x98, 0x72, 0xa5, 0x6b, 0xf5, 0xa4, 0xf7, 0x15, 0xaf, 0xfb, - 0xb4, 0x80, 0x9a, 0xe3, 0xa5, 0x35, 0x2f, 0x45, 0x81, 0xf1, 0x8b, 0x2d, - 0x26, 0x5c, 0x65, 0xa9, 0x5b, 0x6e, 0x83, 0xc3, 0x62, 0x2f, 0x84, 0xef, - 0x11, 0xa5, 0x58, 0x48, 0xe9, 0x67, 0x7e, 0xd3, 0x0b, 0x5d, 0x51, 0x80, - 0x39, 0x08, 0x8e, 0xc1, 0x0d, 0x04, 0x11, 0x5f, 0x72, 0x64, 0x1f, 0x83, - 0xf8, 0xd3, 0x09, 0x38, 0xb6, 0x7f, 0x50, 0x78, 0x27, 0x20, 0xe5, 0xbd, - 0x16, 0xbf, 0x51, 0xd8, 0x4f, 0x67, 0x60, 0xf6, 0x9e, 0xff, 0x08, 0xfe, - 0xc6, 0x96, 0xd6, 0x64, 0x94, 0x28, 0xc6, 0x9a, 0x09, 0x1a, 0x34, 0x08, - 0x31, 0x4b, 0x0b, 0x97, 0x5a, 0x18, 0x72, 0x49, 0xe9, 0x1d, 0xbb, 0x9c, - 0xed, 0x7e, 0xb5, 0xc5, 0xa7, 0xf4, 0x25, 0x7a, 0x26, 0xe9, 0x15, 0x61, - 0x85, 0x32, 0xc9, 0xb3, 0xcf, 0x95, 0xbf, 0x35, 0x10, 0x2d, 0x71, 0xfe, - 0x03, 0xd6, 0x69, 0x75, 0x8d, 0xb7, 0x16, 0xa7, 0x3d, 0x0e, 0xb7, 0x55, - 0x6d, 0xa7, 0x9f, 0x10, 0x7e, 0x7e, 0xff, 0x39, 0xee, 0x8e, 0xa7, 0x81, - 0x7d, 0x11, 0xea, 0xa9, 0xd6, 0xed, 0x54, 0xf8, 0xd2, 0xd5, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xf9, 0xde, 0x41, 0xe7, - 0xa6, 0x88, 0x53, 0x76, 0x5a, 0x26, 0xc3, 0x5c, 0xf2, 0x58, 0x68, 0x9c, - 0xc7, 0x4e, 0x53, 0x18, 0x53, 0x67, 0x39, 0x23, 0x96, 0xb0, 0xef, 0x58, - 0x29, 0xe1, 0x68, 0xd8, 0xce, 0xc0, 0x41, 0xc2, 0x35, 0x5f, 0x74, 0xfa, - 0xdf, 0xc7, 0x0f, 0x80, 0x50, 0xd1, 0xf6, 0x5a, 0x3a, 0x81, 0xe0, 0xd9, - 0x9b, 0x47, 0x96, 0xcd, 0xc5, 0x0f, 0x91, 0x12, 0x81, 0x77, 0x1e, 0xef, - 0x2e, 0xba, 0x16, 0x51, 0x70, 0x78, 0xdc, 0xa3, 0x84, 0x12, 0x7c, 0x9e, - 0x21, 0x7d, 0xa3, 0x5f, 0xce, 0xa1, 0x25, 0x84, 0x99, 0xa4, 0x2d, 0xa6, - 0x0f, 0x95, 0xef, 0xef, 0x31, 0xe6, 0xf2, 0x18, 0x08, 0x47, 0xd2, 0x5a, - 0x39, 0x01, 0x7a, 0xca, 0xd3, 0x03, 0xb1, 0xc2, 0x48, 0xf4, 0x1f, 0x6d, - 0xc2, 0x8c, 0x5c, 0xda, 0xf5, 0x10, 0xed, 0xfc, 0x2e, 0x0c, 0xb3, 0x52, - 0xaa, 0xa9, 0xed, 0xbc, 0x41, 0xcc, 0xd4, 0x4b, 0x1c, 0xd0, 0xa3, 0x1d, - 0xf4, 0xe7, 0x48, 0x34, 0x4e, 0xcf, 0x3b, 0xb3, 0x71, 0x06, 0xbe, 0x0c, - 0x35, 0xbb, 0xb4, 0x17, 0xd8, 0x8b, 0xba, 0xdd, 0x32, 0x30, 0x51, 0xb1, - 0xb1, 0xd6, 0x3a, 0xdc, 0x3b, 0x25, 0x9a, 0x57, 0xc7, 0x4d, 0xd3, 0x75, - 0x93, 0x59, 0x3e, 0x9b, 0x10, 0xcf, 0xdb, 0x38, 0x75, 0x51, 0xb2, 0x2a, - 0x48, 0x78, 0xfc, 0xaa, 0xe3, 0x91, 0xe7, 0x93, 0xe7, 0x0a, 0x07, 0x2c, - 0xf8, 0x88, 0x93, 0xde, 0x2f, 0xba, 0x7b, 0x72, 0xcd, 0x92, 0xdd, 0xb1, - 0xac, 0x1e, 0xe4, 0xe3, 0x5d, 0xa4, 0x7f, 0x86, 0xa7, 0xcb, 0xb5, 0x81, - 0x86, 0xf1, 0xf5, 0xad, 0xd6, 0x36, 0x08, 0x09, 0x9f, 0x75, 0x6f, 0x4a, - 0x5b, 0x30, 0xf8, 0xaf, 0xd2, 0xbc, 0xb5, 0xbe, 0xf2, 0xeb, 0x9b, 0xbc, - 0x11, 0xd4, 0x0c, 0x14, 0xa6, 0x6f, 0x43, 0xd3, 0xc9, 0x4e, 0xca, 0x9b, - 0x4e, 0x46, 0x60, 0x4c, 0x63, 0xcc, 0x07, 0x36, 0x8c, 0xf2, 0xd1, 0x93, - 0x7a, 0x51, 0x49, 0x15, 0xbf, 0xbf, 0x9e, 0x82, 0x21, 0x06, 0xa0, 0x39, - 0x11, 0x1d, 0x6c, 0x41, 0x72, 0xcd, 0x2a, 0x8a, 0x4a, 0xd0, 0x13, 0x6c, - 0x56, 0xf4, 0x00, 0x48, 0xaf, 0xab, 0xdf, 0xa9, 0xe9, 0xa6, 0xaa, 0x06, - 0x61, 0x79, 0xc4, 0x57, 0x42, 0xca, 0x12, 0x18, 0xcf, 0x81, 0xec, 0x79, - 0x19, 0xd2, 0xd2, 0xe3, 0x1d, 0xc6, 0x6c, 0xd0, 0xd6, 0x0a, 0xfb, 0x70, - 0x42, 0x28, 0x25, 0x23, 0xb6, 0x23, 0x15, 0x28, 0x5e, 0x9f, 0x49, 0xf2, - 0x7b, 0x69, 0x74, 0xa5, 0xb9, 0x26, 0x81, 0xfe, 0x39, 0x3e, 0x3f, 0xc8, - 0x7e, 0x9e, 0x5e, 0x8e, 0xf2, 0xdb, 0x6b, 0xfd, 0xe1, 0xc3, 0x01, 0x4a, - 0xba, 0x8f, 0x33, 0x71, 0x09, 0x80, 0x5d, 0x9c, 0x58, 0x64, 0xb7, 0x90, - 0x13, 0x2a, 0xe9, 0x1d, 0x07, 0x2c, 0x06, 0x70, 0x43, 0x0d, 0xb6, 0x57, - 0x02, 0x3c, 0xbe, 0x3c, 0x42, 0xab, 0x77, 0x15, 0x0e, 0x98, 0xfb, 0xf2, - 0x1d, 0x14, 0xd9, 0xb8, 0xd1, 0x59, 0x2a, 0x67, 0x6f, 0xfc, 0x59, 0x39, - 0x33, 0xe0, 0x49, 0x0b, 0x4e, 0x65, 0x81, 0x9f, 0x71, 0xf2, 0xa5, 0x90, - 0x4f, 0x24, 0xc7, 0x05, 0xfb, 0x77, 0x1e, 0x14, 0xca, 0x2f, 0xfc, 0xac, - 0xec, 0xbf, 0xa2, 0x69, 0x15, 0x0a, 0x6b, 0xa9, 0xa0, 0x74, 0xee, 0xad, - 0xa9, 0x50, 0x4d, 0x4d, 0xab, 0x6e, 0xc1, 0xb3, 0xda, 0xbb, 0xbd, 0xab, - 0x00, 0x05, 0x14, 0xc1, 0xc4, 0x53, 0x7b, 0x78, 0x97, 0x68, 0x3c, 0x05, - 0xf2, 0xed, 0x87, 0xca, 0x86, 0xd1, 0xdf, 0xda, 0xb3, 0x2f, 0x17, 0x87, - 0x87, 0x2f, 0xd8, 0xe9, 0xb2, 0x96, 0xdc, 0x7f, 0x22, 0xf1, 0x2a, 0x9f, - 0xfe, 0x54, 0x55, 0xa1, 0x96, 0xab, 0x9f, 0x61, 0x74, 0xcd, 0x4d, 0x77, - 0x38, 0x02, 0x23, 0x29, 0x28, 0x5b, 0xfc, 0x86, 0x17, 0x40, 0xd4, 0x42, - 0x2a, 0x9b, 0x84, 0xf7, 0x67, 0x2b, 0x3a, 0xc1, 0x31, 0x89, 0x4b, 0x67, - 0xd1, 0x7d, 0x6b, 0x36, 0xec, 0x69, 0x6b, 0x24, 0xca, 0xd6, 0x2d, 0xbb, - 0x21, 0xc8, 0x0c, 0x53, 0x41, 0x29, 0x0b, 0xc1, 0xfe, 0xd5, 0xa3, 0x4c, - 0x66, 0x2f, 0xc7, 0xf1, 0xa8, 0xc0, 0x3d, 0x9a, 0xb9, 0x09, 0x50, 0x3f, - 0x09, 0x87, 0xa4, 0x3f, 0x7a, 0x33, 0xef, 0xf0, 0xfb, 0x77, 0x02, 0x7d, - 0x92, 0xaf, 0x73, 0xaa, 0xcc, 0x3f, 0x66, 0x56, 0xd0, 0x21, 0xd1, 0xe8, - 0x0e, 0x47, 0x03, 0x5e, 0x3b, 0xe9, 0xa2, 0xe3, 0x83, 0x0b, 0x73, 0xd3, - 0xaa, 0x94, 0x80, 0xef, 0x7c, 0xdf, 0xde, 0x86, 0xc3, 0xa9, 0x62, 0x34, - 0x76, 0xee, 0x4d, 0x15, 0x73, 0x7b, 0xd7, 0x6d, 0xd4, 0x21, 0x05, 0xd4, - 0xcf, 0xf3, 0x54, 0xdc, 0x49, 0x5f, 0x5a, 0x2a, 0x37, 0x19, 0x89, 0x61, - 0x1d, 0x95, 0x17, 0x8b, 0x09, 0x95, 0x5d, 0x9f, 0xde, 0x86, 0x03, 0x93, - 0x76, 0xec, 0x54, 0xec, 0x13, 0xc3, 0xf9, 0x38, 0x8f, 0xa9, 0x11, 0xf0, - 0x9a, 0x0e, 0x5e, 0x38, 0x69, 0xeb, 0x62, 0x41, 0x9e, 0xd0, 0x1b, 0x59, - 0x8c, 0xfd, 0x16, 0xfa, 0xd8, 0x99, 0x0d, 0x83, 0x7e, 0xba, 0x5b, 0xc6, - 0x59, 0xe1, 0xae, 0xba, 0xb9, 0xb8, 0xba, 0xa5, 0x4d, 0x20, 0x00, 0xc9, - 0x0c, 0xe1, 0x77, 0xdf, 0xc4, 0x95, 0xca, 0x7c, 0xa5, 0xef, 0x0a, 0xed, - 0x9b, 0x31, 0x06, 0xe1, 0xc9, 0xa3, 0x88, 0x0a, 0xcc, 0x3d, 0xc8, 0xb6, - 0x01, 0xe2, 0xa9, 0x29, 0x03, 0x8a, 0x28, 0xf8, 0x0d, 0x70, 0x77, 0xb9, - 0xe1, 0x1b, 0x06, 0x19, 0x86, 0xc1, 0xd3, 0xcf, 0x6b, 0x9c, 0x09, 0x70, - 0x50, 0xed, 0xb5, 0xf6, 0x69, 0xcc, 0xac, 0x30, 0x6a, 0x1f, 0x1d, 0xe6, - 0x75, 0x33, 0xab, 0x55, 0x48, 0xfa, 0x81, 0xb8, 0x06, 0x3a, 0x78, 0xee, - 0xde, 0xef, 0xe2, 0x17, 0xc4, 0x3e, 0xe5, 0x22, 0xa7, 0xd1, 0x45, 0x5b, - 0x57, 0xb0, 0xde, 0x69, 0x30, 0xd1, 0x9a, 0xd7, 0x6b, 0x0e, 0x7a, 0x30, - 0x0d, 0xb5, 0xec, 0x60, 0xa7, 0x05, 0x87, 0x42, 0x4b, 0x92, 0x1f, 0x68, - 0x8e, 0x1a, 0x90, 0x84, 0x27, 0x2a, 0xc0, 0xd2, 0xff, 0xbc, 0x8e, 0x34, - 0x53, 0x9d, 0x04, 0x50, 0xcb, 0x79, 0xd9, 0x55, 0xd5, 0x4d, 0x3c, 0xe2, - 0xb4, 0x9b, 0x57, 0x07, 0x1f, 0xce, 0xd0, 0xa7, 0x84, 0xe1, 0xb7, 0x3a, - 0xaf, 0xc5, 0x67, 0x64, 0xbc, 0x02, 0xbe, 0xb0, 0x65, 0x7e, 0xb0, 0x4c, - 0xc2, 0x2d, 0xcd, 0xf8, 0x60, 0xcb, 0xfe, 0xd1, 0x8d, 0x14, 0x5a, 0xd3, - 0x38, 0xd4, 0x71, 0x5a, 0xca, 0xbb, 0xfe, 0x0e, 0x54, 0xf9, 0xb4, 0x25, - 0xa5, 0x71, 0x13, 0x95, 0x14, 0xdc, 0x86, 0xb8, 0x21, 0xa7, 0x2e, 0x13, - 0xc6, 0x2f, 0xce, 0xe7, 0x6c, 0xb8, 0x0d, 0xc9, 0xe4, 0xc4, 0x64, 0x12, - 0x78, 0x1c, 0x95, 0x92, 0xc2, 0xec, 0xaa, 0xd3, 0xc3, 0x3a, 0xd2, 0xe8, - 0x95, 0xf0, 0x6b, 0x03, 0x8c, 0xcf, 0x6b, 0xdb, 0x21, 0xa0, 0xcf, 0xf4, - 0x05, 0xc8, 0xe7, 0x77, 0x05, 0x55, 0x7b, 0x6b, 0xfa, 0x96, 0xf1, 0x7c, - 0x30, 0x62, 0x75, 0xbe, 0x6e, 0xea, 0xba, 0x9f, 0x40, 0x2e, 0x9a, 0x86, - 0x93, 0xcc, 0x38, 0xf7, 0xee, 0xd8, 0xbb, 0x24, 0xcd, 0x85, 0x3e, 0x85, - 0x16, 0x8c, 0x33, 0x23, 0x73, 0xe6, 0x43, 0xc4, 0x67, 0xbf, 0xef, 0x85, - 0xb1, 0x44, 0xf9, 0x55, 0x93, 0x4d, 0x0b, 0x8e, 0xc1, 0x42, 0x13, 0xc6, - 0xc8, 0x09, 0x63, 0xab, 0xb3, 0xc7, 0xc4, 0xa4, 0x8b, 0x72, 0xfb, 0xa5, - 0x99, 0xa1, 0x5d, 0x07, 0x02, 0x82, 0x56, 0x11, 0x3c, 0xc2, 0x5a, 0x55, - 0xf9, 0x3a, 0x93, 0x61, 0x89, 0x46, 0xb7, 0x6a, 0x42, 0x76, 0x1e, 0x70, - 0xde, 0xd9, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x32, 0xc1, 0x61, 0xaa, 0xdb, 0xe9, 0xae, 0x88, 0xcb, 0xf7, 0x28, 0xdd, - 0x82, 0x62, 0x61, 0x41, 0x4e, 0xbb, 0xf9, 0xb7, 0xe8, 0x81, 0x99, 0x18, - 0xe2, 0xa7, 0xb4, 0x7c, 0xb7, 0x08, 0x44, 0x6f, 0x24, 0xb3, 0xda, 0x57, - 0x62, 0x29, 0xc7, 0xa6, 0x84, 0xb1, 0x5d, 0xc5, 0x00, 0x4c, 0x30, 0x16, - 0xf0, 0x0a, 0x74, 0x73, 0xec, 0xaf, 0xb5, 0xde, 0xb0, 0xa7, 0x75, 0x22, - 0x8f, 0x9e, 0x43, 0x01, 0x68, 0xae, 0x91, 0xeb, 0x46, 0x52, 0x3f, 0x2c, - 0x4e, 0xc5, 0xd0, 0xc8, 0x15, 0xea, 0x99, 0xc2, 0x37, 0x5b, 0x68, 0xb5, - 0xce, 0x41, 0x92, 0xbf, 0xd6, 0xdb, 0x85, 0xad, 0x08, 0xd1, 0x11, 0x93, - 0xe8, 0xd4, 0x78, 0x43, 0x3b, 0x7d, 0xcb, 0x42, 0x84, 0xf3, 0x61, 0x88, - 0x9e, 0x6a, 0x73, 0xb9, 0x78, 0x17, 0x9a, 0x9f, 0xfb, 0x97, 0xcb, 0xd6, - 0xb5, 0x3f, 0x00, 0x41, 0xb0, 0x30, 0x2f, 0x6f, 0x89, 0xdd, 0xfa, 0x13, - 0xd1, 0x07, 0xbe, 0x2f, 0xea, 0x91, 0x62, 0xaa, 0xed, 0xcb, 0xfd, 0x07, - 0x82, 0xbb, 0x3f, 0xf4, 0xa6, 0x94, 0x66, 0x71, 0x20, 0x61, 0xac, 0x84, - 0x04, 0x70, 0xf2, 0xd3, 0xdf, 0xac, 0x44, 0xfd, 0x47, 0x26, 0x81, 0x64, - 0xb3, 0xa6, 0x90, 0x2b, 0xd2, 0x2c, 0xd0, 0x77, 0x81, 0x53, 0x45, 0x78, - 0x5f, 0x30, 0x77, 0x91, 0x83, 0x13, 0x33, 0xd1, 0x91, 0xa6, 0x35, 0x21, - 0xcb, 0x26, 0x54, 0x0a, 0xf7, 0x70, 0x5e, 0xdb, 0xd8, 0x92, 0xc7, 0xdf, - 0xf9, 0x2a, 0x46, 0x91, 0x22, 0x3b, 0xe6, 0xe1, 0x91, 0xeb, 0xa6, 0x78, - 0x81, 0x57, 0xf3, 0x04, 0xdf, 0x34, 0x55, 0x74, 0x0a, 0xfe, 0xf2, 0xbd, - 0xb3, 0xeb, 0xa3, 0x8e, 0x71, 0x15, 0xa9, 0x2f, 0x53, 0xe2, 0xa1, 0x45, - 0xdf, 0xe8, 0x29, 0x40, 0xf1, 0x4b, 0x23, 0xdb, 0x8e, 0xee, 0x19, 0xa8, - 0xd4, 0x15, 0x90, 0x8c, 0x04, 0x46, 0x81, 0x49, 0x92, 0xe5, 0xe1, 0xfe, - 0x99, 0x06, 0xfc, 0x3e, 0x43, 0x58, 0x3b, 0x19, 0x7f, 0xd2, 0x13, 0x65, - 0xc2, 0x64, 0x27, 0x6d, 0x93, 0x6a, 0xcf, 0x48, 0x2a, 0x3d, 0xdd, 0x79, - 0x9f, 0x05, 0x32, 0xeb, 0xfd, 0xb4, 0xd2, 0x1d, 0x16, 0x61, 0x3d, 0x17, - 0x4c, 0xb8, 0xad, 0x63, 0x0e, 0x6b, 0x8a, 0x4a, 0x34, 0x4c, 0xb5, 0x3c, - 0x0f, 0x05, 0x28, 0x8c, 0x8b, 0xdf, 0xf4, 0xa0, 0x49, 0xbf, 0x34, 0x6c, - 0x6a, 0x5f, 0x40, 0x95, 0x48, 0x4b, 0x93, 0x1e, 0x61, 0x6d, 0x58, 0xc3, - 0x86, 0x98, 0x70, 0x11, 0x4e, 0x44, 0x65, 0xc1, 0x0d, 0xea, 0x2f, 0xda, - 0x38, 0x16, 0xbd, 0xd4, 0x7b, 0x3e, 0x31, 0xee, 0x42, 0x4c, 0xdc, 0xe9, - 0x8b, 0x1f, 0xa9, 0xcf, 0xab, 0x60, 0xb5, 0xb1, 0xd2, 0xf2, 0x6a, 0xe9, - 0xbc, 0xcc, 0xcb, 0x60, 0x4a, 0xca, 0x70, 0x79, 0x64, 0x9d, 0x07, 0x1e, - 0xdb, 0xef, 0x34, 0xaf, 0x17, 0x93, 0x6b, 0x60, 0x73, 0x2d, 0x8c, 0x08, - 0x27, 0x1e, 0x46, 0x9f, 0xcb, 0x33, 0xdd, 0x76, 0xef, 0x17, 0x58, 0x9a, - 0x5f, 0x82, 0x78, 0x0f, 0xbf, 0xe7, 0x0f, 0x3a, 0x1e, 0xa8, 0x30, 0xbf, - 0xff, 0xc7, 0xc7, 0x82, 0x8b, 0xc3, 0x65, 0x04, 0xfd, 0x45, 0xc9, 0x88, - 0x99, 0x8e, 0x44, 0xc5, 0x23, 0x1e, 0xbf, 0xf1, 0x95, 0x70, 0x35, 0xe6, - 0x56, 0x4a, 0x53, 0xb2, 0xac, 0x0c, 0xfd, 0xf5, 0x61, 0x26, 0x5b, 0x70, - 0xd6, 0x4c, 0xfc, 0x0f, 0xcc, 0x53, 0x6e, 0x25, 0xca, 0x1d, 0x0c, 0x56, - 0xf7, 0x9c, 0x95, 0xf6, 0x3c, 0x08, 0x0c, 0x64, 0xb1, 0x1c, 0x5c, 0xe6, - 0x25, 0xa4, 0xa3, 0xb7, 0xaf, 0x8b, 0xbc, 0xe1, 0x68, 0xdf, 0x10, 0xab, - 0xbb, 0xd5, 0x30, 0x64, 0x42, 0xf6, 0xe6, 0x9a, 0xb5, 0x59, 0x12, 0x76, - 0x92, 0xac, 0x29, 0xe9, 0x45, 0xdb, 0x2e, 0x62, 0x22, 0x58, 0x24, 0x89, - 0xc8, 0x6a, 0x2a, 0xa7, 0x3f, 0x04, 0x53, 0x4e, 0x07, 0x41, 0x4e, 0x5f, - 0x95, 0x5f, 0x6e, 0x14, 0x5b, 0xa7, 0xa7, 0xd3, 0x5a, 0xa2, 0x95, 0x4a, - 0xc8, 0xe9, 0x3c, 0x5a, 0x84, 0x50, 0xbc, 0xe1, 0x9c, 0x7a, 0x16, 0xe5, - 0xc7, 0x04, 0x9d, 0x60, 0x2e, 0x7d, 0xb3, 0x77, 0x5d, 0x86, 0x2e, 0xac, - 0x57, 0x2a, 0x31, 0x26, 0x23, 0x6e, 0xcc, 0x7f, 0xb8, 0x36, 0x29, 0xa9, - 0xa8, 0xd9, 0xc6, 0x75, 0xee, 0x16, 0x23, 0x27, 0x0f, 0xe1, 0xb0, 0x3d, - 0x91, 0x3a, 0x26, 0x4a, 0x60, 0x72, 0x14, 0xf9, 0x3c, 0x66, 0x66, 0xe8, - 0x7d, 0x4a, 0x6f, 0x7e, 0x63, 0x58, 0x6a, 0x28, 0x78, 0x50, 0xef, 0x3b, - 0x9d, 0xeb, 0xb6, 0x4b, 0x5d, 0x55, 0x80, 0x84, 0x97, 0x9b, 0x74, 0x4b, - 0x5c, 0x09, 0x1d, 0xe7, 0x57, 0xfc, 0x40, 0x3f, 0xa9, 0xbd, 0xdf, 0x61, - 0x2a, 0x89, 0x62, 0x51, 0xfc, 0x24, 0xee, 0xee, 0x97, 0x10, 0xca, 0xb6, - 0x0e, 0x8e, 0x71, 0x67, 0x2a, 0x79, 0x4f, 0xc4, 0xe6, 0x3e, 0x27, 0xc2, - 0x9b, 0x85, 0xfd, 0xde, 0xfb, 0x58, 0x75, 0xf3, 0x1c, 0x31, 0xa2, 0x56, - 0x3e, 0xdc, 0x24, 0xf4, 0x4f, 0xcb, 0x5a, 0x1a, 0x77, 0x5c, 0x28, 0xd1, - 0x5a, 0x55, 0xa9, 0x8c, 0xb5, 0xdd, 0x77, 0x93, 0x58, 0xd8, 0x2f, 0x7d, - 0x5a, 0x67, 0xa1, 0x95, 0x0a, 0xd2, 0x6a, 0x93, 0xa6, 0xf0, 0x5f, 0x7f, - 0x0a, 0x29, 0xdb, 0x1d, 0x8c, 0xa7, 0x12, 0x0a, 0xf4, 0xc9, 0xcd, 0x70, - 0xd1, 0xbd, 0x48, 0xd4, 0x9a, 0xbb, 0xbb, 0x24, 0xbf, 0x52, 0x25, 0xb9, - 0x75, 0xc2, 0x17, 0x36, 0x6f, 0x4a, 0xc0, 0x53, 0x6d, 0x38, 0xfb, 0x7a, - 0x60, 0xc8, 0x5d, 0x03, 0xc1, 0x1c, 0x0c, 0x31, 0xf0, 0x59, 0xed, 0x0a, - 0x5f, 0x84, 0xf2, 0x89, 0x6c, 0xb4, 0xd5, 0x24, 0x2d, 0x2a, 0xda, 0xbe, - 0x74, 0x1d, 0x22, 0xe2, 0xc6, 0xf0, 0x9b, 0x98, 0x5a, 0x41, 0x11, 0x4c, - 0x51, 0x97, 0x16, 0xa7, 0xc9, 0xd8, 0x53, 0x12, 0x53, 0xdd, 0x22, 0xa9, - 0xf2, 0xae, 0x52, 0x49, 0x02, 0xf9, 0x5c, 0x78, 0x00, 0xa2, 0x64, 0xff, - 0x91, 0x62, 0x20, 0x6a, 0x87, 0x6a, 0x40, 0x01, 0x85, 0x30, 0xf5, 0xdd, - 0xa7, 0x64, 0x0a, 0x85, 0x8d, 0x37, 0x99, 0xcb, 0x03, 0xc8, 0x29, 0x56, - 0x7e, 0x75, 0x4f, 0xa1, 0xc3, 0x76, 0xce, 0xdb, 0xa3, 0xb4, 0x7e, 0x91, - 0x95, 0xbe, 0x53, 0x0e, 0x20, 0xc9, 0xe7, 0x71, 0x78, 0xad, 0x3d, 0x4c, - 0xbb, 0x59, 0xb9, 0x77, 0xcf, 0x7d, 0x7b, 0xff, 0x15, 0xdb, 0x1d, 0xae, - 0x1f, 0xbe, 0x33, 0x88, 0x01, 0x04, 0x95, 0xe5, 0xe9, 0x6a, 0x1c, 0xbf, - 0xc8, 0xc3, 0x33, 0x3b, 0xd8, 0x2f, 0x75, 0x4a, 0xc3, 0x6f, 0x09, 0x88, - 0x26, 0x46, 0x90, 0x89, 0x53, 0x12, 0x27, 0xc2, 0x7d, 0x23, 0x6b, 0xc4, - 0xe3, 0x0a, 0x0f, 0xc2, 0x86, 0x6d, 0x20, 0x35, 0x82, 0x33, 0xec, 0xdd, - 0xa7, 0x6a, 0xc3, 0xa8, 0x11, 0xdc, 0x02, 0xd9, 0x05, 0x1b, 0x04, 0x75, - 0x92, 0x6c, 0x08, 0x9e, 0x38, 0x72, 0xd9, 0x7d, 0x9b, 0xbc, 0xfd, 0xca, - 0xb8, 0x06, 0x0e, 0x24, 0x89, 0x90, 0xde, 0x52, 0xe4, 0xd1, 0xcc, 0x99, - 0x87, 0x0b, 0x87, 0xbb, 0x5c, 0xa9, 0xab, 0xec, 0xb5, 0xe4, 0xdd, 0x5d, - 0xfa, 0xb1, 0x97, 0x5f, 0x61, 0xf7, 0x58, 0xd6, 0x08, 0x02, 0xf2, 0x51, - 0x7c, 0x7a, 0xe6, 0xf1, 0xcb, 0x43, 0xd0, 0x21, 0x09, 0xb8, 0x82, 0xa9, - 0x52, 0xd9, 0xa8, 0x7f, 0x2b, 0xe1, 0x0f, 0x31, 0xbc, 0x16, 0xa2, 0xce, - 0x35, 0x55, 0x2e, 0xd6, 0xda, 0x38, 0xd9, 0xc2, 0x5e, 0xca, 0x27, 0xd9, - 0xa6, 0xd6, 0x4b, 0xa2, 0x73, 0xc4, 0xce, 0x66, 0x30, 0x60, 0xa2, 0x01, - 0xfa, 0xc1, 0xd6, 0xc8, 0xea, 0xdd, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x70, 0xe2, 0x62, 0x68, 0xff, 0x60, 0x67, 0x64, - 0x88, 0xdd, 0x81, 0x79, 0x82, 0xf5, 0x46, 0xf9, 0x7e, 0x0e, 0xa9, 0x26, - 0xf6, 0xcf, 0x5d, 0xef, 0x10, 0x11, 0xe1, 0x71, 0x72, 0x77, 0xcf, 0x02, - 0x7b, 0xf1, 0x6e, 0xc4, 0xb4, 0xfa, 0x2a, 0x12, 0xfe, 0x7e, 0x3c, 0x66, - 0xef, 0x41, 0x98, 0x3a, 0x1f, 0xa9, 0x14, 0x8f, 0x46, 0x22, 0xa0, 0xc2, - 0xee, 0x93, 0x25, 0x34, 0xf2, 0xb7, 0x6d, 0x0a, 0x36, 0xde, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xd4, 0x17, 0x62, 0x25, - 0xfd, 0x5b, 0x75, 0xeb, 0xec, 0x06, 0xc9, 0x39, 0x86, 0x6d, 0xc5, 0x60, - 0x2d, 0x33, 0x3d, 0xce, 0x6a, 0x9f, 0x07, 0x3b, 0xb9, 0x70, 0x0f, 0xc7, - 0x13, 0x46, 0x35, 0x46, 0x26, 0xe4, 0xbc, 0x6e, 0x54, 0x89, 0x29, 0xd5, - 0xa4, 0x94, 0xa0, 0x3a, 0x7a, 0x61, 0xcf, 0xd1, 0x48, 0x27, 0x7a, 0x72, - 0x95, 0xde, 0x93, 0xd1, 0x19, 0x1f, 0xc9, 0xc8, 0x8f, 0x0d, 0xce, 0x34, - 0x03, 0x39, 0x0a, 0x92, 0x16, 0x09, 0xc4, 0x49, 0xf9, 0x30, 0x2e, 0x19, - 0xd1, 0x69, 0x7e, 0x78, 0x00, 0x25, 0x30, 0x6f, 0x6b, 0xe1, 0xbe, 0xad, - 0xb2, 0x05, 0xde, 0xc7, 0xc2, 0xf7, 0xd5, 0xa7, 0x4d, 0x03, 0x6f, 0x6b, - 0xcd, 0xcb, 0x42, 0xfa, 0x88, 0x16, 0xd5, 0xa6, 0x60, 0x08, 0xd4, 0xa5, - 0x5b, 0x3b, 0x7b, 0xa2, 0xca, 0xa3, 0xa2, 0x5d, 0x63, 0x7f, 0xc0, 0x37, - 0xc5, 0x7e, 0x99, 0x04, 0x5d, 0x9a, 0xb9, 0xa5, 0xac, 0xd1, 0xe2, 0x5d, - 0xb2, 0x2b, 0x7e, 0xbb, 0xb9, 0x66, 0x13, 0xa7, 0x30, 0xbf, 0x80, 0x0c, - 0x2b, 0x8d, 0x45, 0xe1, 0x8d, 0x96, 0x25, 0x27, 0x47, 0x3d, 0x21, 0x7d, - 0x1c, 0x42, 0xac, 0x31, 0x26, 0x47, 0x59, 0xb3, 0x44, 0x85, 0xf2, 0x8e, - 0x7d, 0x01, 0x96, 0x6d, 0xb2, 0x64, 0xc3, 0xfc, 0xa7, 0x82, 0x06, 0x4a, - 0x87, 0x75, 0x9b, 0x99, 0x47, 0x7e, 0xa6, 0x4d, 0x2c, 0x36, 0xff, 0xac, - 0x2b, 0x77, 0x96, 0x52, 0x14, 0x8d, 0x07, 0x0d, 0x28, 0x9d, 0x84, 0xa2, - 0xda, 0xd6, 0x45, 0x3a, 0xd4, 0xe6, 0xb7, 0x9a, 0xf3, 0x34, 0xe3, 0xda, - 0x39, 0xdf, 0x35, 0x9c, 0xe4, 0x87, 0x55, 0xc8, 0x43, 0xd0, 0x61, 0x46, - 0x52, 0x2f, 0x75, 0x63, 0xbb, 0x98, 0x97, 0xeb, 0xfb, 0x15, 0xaf, 0x8e, - 0x96, 0xdc, 0xff, 0x0a, 0x90, 0xda, 0x09, 0x63, 0x28, 0x7b, 0x92, 0x73, - 0x0b, 0xd4, 0x2b, 0x72, 0x2a, 0x86, 0x32, 0xc3, 0xc1, 0x3e, 0xe4, 0x2c, - 0x07, 0x89, 0x53, 0xb7, 0xfe, 0x78, 0x6c, 0x95, 0xb4, 0x62, 0x4d, 0x4b, - 0xfe, 0x6c, 0xfc, 0x5e, 0x4e, 0xa7, 0x8c, 0x07, 0x4f, 0x85, 0x27, 0xe0, - 0x7b, 0xd9, 0x7a, 0xe5, 0x1d, 0xbc, 0x36, 0xda, 0x8e, 0x21, 0xff, 0xb3, - 0x60, 0x2c, 0x5e, 0x23, 0x0f, 0xde, 0x3f, 0xae, 0xa5, 0x3a, 0x50, 0xa9, - 0x99, 0x39, 0x45, 0xaf, 0xd3, 0x5f, 0x4a, 0x15, 0xad, 0x9c, 0x66, 0x7f, - 0x92, 0xe0, 0x02, 0x81, 0x3e, 0x06, 0x6a, 0x5e, 0xd0, 0x0c, 0x42, 0xe7, - 0xcf, 0xe2, 0xeb, 0xa3, 0xe0, 0xf7, 0x2d, 0x8a, 0x21, 0xdb, 0x64, 0x28, - 0x2a, 0xb3, 0x2b, 0xc4, 0xc9, 0xd5, 0x60, 0xaf, 0xfc, 0x15, 0xa1, 0x44, - 0x9c, 0x96, 0x04, 0x42, 0x1c, 0x55, 0x8c, 0xa5, 0xce, 0x80, 0xce, 0x75, - 0x64, 0xa9, 0xf6, 0xa5, 0x5a, 0x0f, 0x8a, 0x4b, 0x8b, 0x72, 0xcf, 0x3e, - 0xd7, 0xeb, 0xe1, 0xd0, 0xd3, 0x2d, 0x04, 0x6c, 0x9e, 0x02, 0x75, 0x43, - 0x5c, 0xc1, 0x57, 0x66, 0xd9, 0x14, 0x5b, 0x08, 0x10, 0x44, 0x8d, 0x8e, - 0x89, 0xd1, 0x65, 0x27, 0x2a, 0x0b, 0x99, 0x6f, 0x09, 0xa6, 0x20, 0xa5, - 0x75, 0x24, 0xe4, 0xf7, 0xf5, 0xe0, 0xed, 0x79, 0x37, 0x18, 0x13, 0x1c, - 0xd9, 0xd1, 0xf5, 0x69, 0x0c, 0xa5, 0x02, 0xdf, 0x6a, 0xfd, 0x2e, 0x35, - 0x8e, 0xd0, 0x41, 0x91, 0x61, 0x0f, 0x5c, 0xdd, 0x70, 0xbf, 0x1c, 0x49, - 0xcb, 0xe9, 0xc9, 0x33, 0xc4, 0x99, 0x1e, 0x8b, 0x75, 0x48, 0xc2, 0x58, - 0xa4, 0x70, 0x1f, 0xbb, 0xcd, 0xd3, 0x0e, 0x79, 0x25, 0xbe, 0x53, 0xfa, - 0x32, 0x32, 0xf6, 0xb9, 0xf0, 0x0a, 0x52, 0x5b, 0xe0, 0x69, 0xff, 0x43, - 0xda, 0x98, 0x1f, 0xee, 0x54, 0x60, 0xf8, 0x24, 0x43, 0xc5, 0x37, 0x72, - 0xd1, 0xfc, 0x99, 0x9a, 0x3e, 0x24, 0xe0, 0xd9, 0xc2, 0x61, 0x47, 0xb3, - 0x26, 0x09, 0x85, 0x74, 0xa1, 0x2b, 0x4a, 0x70, 0xd0, 0x1b, 0x90, 0x03, - 0x25, 0xd9, 0x22, 0xc2, 0x16, 0x22, 0x3a, 0x62, 0x20, 0xd4, 0x13, 0xce, - 0xa2, 0xc7, 0x02, 0xfb, 0x9a, 0xbf, 0xf1, 0x1c, 0x80, 0x01, 0x97, 0x90, - 0x7f, 0x5a, 0x98, 0x70, 0x30, 0x61, 0x77, 0xe5, 0xd4, 0x3b, 0x03, 0x42, - 0x57, 0x31, 0x5e, 0xc6, 0x64, 0xe1, 0xf4, 0x64, 0x77, 0x21, 0x9b, 0x44, - 0x1c, 0xd9, 0x8c, 0x95, 0x8a, 0xf1, 0xcb, 0x82, 0xac, 0xc1, 0x26, 0x31, - 0xf2, 0x22, 0x41, 0xab, 0xbb, 0x23, 0xd3, 0x8d, 0xcc, 0x5c, 0x9d, 0x9b, - 0x1d, 0x9c, 0x4d, 0xf3, 0x62, 0xde, 0x15, 0x6a, 0x94, 0x8d, 0x24, 0xe7, - 0x52, 0x8d, 0x2a, 0xa4, 0x1d, 0x54, 0x5a, 0xda, 0xaf, 0xab, 0x05, 0x27, - 0x4b, 0xbb, 0xb4, 0xda, 0x0c, 0xb9, 0x20, 0xb3, 0xaf, 0x4a, 0xeb, 0x37, - 0xe5, 0x43, 0xe4, 0xc1, 0xf6, 0x9e, 0xf8, 0x6c, 0xd8, 0xa1, 0x0c, 0xf9, - 0xd1, 0x4b, 0x96, 0xa0, 0x6d, 0x38, 0x64, 0x41, 0xd3, 0x14, 0xfb, 0xad, - 0x89, 0xa9, 0xf7, 0x36, 0x01, 0x0f, 0xbe, 0x8e, 0xd7, 0x76, 0xc6, 0x70, - 0x22, 0x32, 0x8b, 0x08, 0xca, 0x95, 0xbf, 0xcf, 0x5e, 0xb8, 0xc0, 0x3f, - 0xd9, 0xaa, 0x84, 0xab, 0x30, 0x5b, 0xe3, 0x7a, 0x61, 0x32, 0xe5, 0x54, - 0x01, 0x5e, 0xb6, 0x1c, 0x9c, 0x78, 0x52, 0x2a, 0xa7, 0xf5, 0x29, 0xa6, - 0x0f, 0x14, 0xa5, 0x3a, 0x34, 0xd4, 0xf5, 0xc2, 0xb2, 0x8d, 0x12, 0x7b, - 0x8a, 0x64, 0x00, 0xfd, 0x02, 0x0e, 0x02, 0x26, 0x5a, 0xb9, 0xeb, 0xfd, - 0x30, 0xce, 0x51, 0xec, 0x5f, 0xbc, 0xee, 0x53, 0x21, 0xec, 0x0e, 0xee, - 0xc4, 0x28, 0x1a, 0xec, 0x2a, 0x39, 0x4e, 0xe1, 0x50, 0x11, 0x3f, 0x16, - 0xdd, 0xbf, 0xaf, 0x3e, 0xbe, 0xd4, 0xfe, 0x34, 0x1e, 0x62, 0x3f, 0x5a, - 0xea, 0x05, 0xfc, 0xd5, 0x45, 0x08, 0x47, 0xce, 0x38, 0x3f, 0x75, 0x7e, - 0x0c, 0x3a, 0x2a, 0x14, 0xa7, 0x61, 0xba, 0x3a, 0xa1, 0x41, 0xa2, 0x72, - 0x19, 0xfa, 0x33, 0x43, 0xa7, 0xf4, 0x4e, 0x5b, 0xf9, 0xb1, 0x45, 0x16, - 0x57, 0x8e, 0xb1, 0xad, 0x7d, 0x88, 0xd3, 0x93, 0xa2, 0x08, 0xf3, 0x96, - 0x4d, 0x84, 0x63, 0x08, 0xfa, 0x9d, 0xf3, 0x04, 0x33, 0xbd, 0x7e, 0x7a, - 0xc7, 0x63, 0xc5, 0x31, 0x5a, 0x82, 0x33, 0x90, 0x56, 0x44, 0xe9, 0xd3, - 0xc4, 0xd4, 0x76, 0x29, 0x2f, 0xdb, 0xa3, 0x9d, 0xff, 0xd4, 0xd2, 0xb1, - 0xce, 0xf1, 0xcb, 0x7f, 0x10, 0x3b, 0x90, 0xa4, 0x1b, 0xa0, 0x9b, 0xa7, - 0xfa, 0x27, 0x40, 0x11, 0x35, 0xc9, 0x7f, 0x01, 0x97, 0x76, 0x9f, 0x33, - 0xc5, 0xd6, 0x8d, 0x20, 0x07, 0x73, 0x93, 0x0b, 0x24, 0x88, 0x4e, 0x73, - 0x68, 0x79, 0x92, 0x20, 0x2a, 0x71, 0xed, 0x22, 0x0b, 0xfb, 0x42, 0xb5, - 0xd9, 0xc3, 0xaa, 0xed, 0x45, 0x03, 0x64, 0xde, 0x6f, 0x25, 0x8e, 0x3b, - 0x9a, 0xef, 0xc5, 0x63, 0xc2, 0x7f, 0x34, 0xd0, 0x1b, 0x20, 0xa3, 0xab, - 0x9d, 0x54, 0x41, 0x0e, 0x7b, 0x2e, 0x96, 0x12, 0x75, 0x58, 0xdf, 0xd5, - 0xaa, 0x3c, 0xf2, 0x26, 0xc1, 0xf1, 0x18, 0x37, 0x56, 0xf2, 0xd2, 0x86, - 0x6f, 0xd4, 0x9f, 0x57, 0x2b, 0x32, 0xe9, 0x08, 0x94, 0x53, 0x40, 0xc5, - 0x4d, 0x77, 0x39, 0xc6, 0x4c, 0x63, 0x53, 0xf9, 0xbf, 0x35, 0x08, 0xc5, - 0x0d, 0xd0, 0x89, 0x82, 0xa7, 0x2d, 0x6a, 0xb4, 0x22, 0xb1, 0x10, 0x7f, - 0xcf, 0x2e, 0x21, 0x27, 0x9c, 0x12, 0xc6, 0x0e, 0xca, 0xd2, 0x32, 0xb1, - 0x6d, 0xfd, 0x59, 0x12, 0x23, 0x60, 0x46, 0x89, 0xe0, 0x75, 0x5e, 0xc9, - 0xf4, 0x3d, 0x8a, 0x89, 0xd4, 0x23, 0xc2, 0xbe, 0x30, 0x32, 0x4a, 0x95, - 0x42, 0xe2, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0xa7, 0x0b, 0x48, 0xe2, 0xeb, 0xd7, 0x12, 0x42, 0x4c, 0x71, 0xfb, 0x25, - 0x17, 0x23, 0x0e, 0x01, 0xa6, 0x21, 0xb9, 0x17, 0x6e, 0xf0, 0x24, 0x66, - 0x9e, 0x9d, 0x0f, 0x71, 0xf8, 0x5b, 0x79, 0xb0, 0x1b, 0x1f, 0xe7, 0xa2, - 0xc0, 0x17, 0x16, 0x08, 0x5e, 0x24, 0x7b, 0xf9, 0x7a, 0x1e, 0x70, 0xe2, - 0x05, 0x40, 0x16, 0x56, 0xe7, 0x79, 0xf2, 0x30, 0xa3, 0xdc, 0xe3, 0x7a, - 0x7e, 0x22, 0x88, 0xc0, 0xf7, 0xc8, 0x5c, 0x93, 0x95, 0x86, 0x02, 0x6c, - 0x73, 0x76, 0xef, 0x03, 0x2d, 0xcb, 0xa5, 0x22, 0xfe, 0x05, 0xbb, 0xe6, - 0xfd, 0x19, 0x8c, 0x8b, 0x67, 0x58, 0x81, 0x81, 0x2d, 0x36, 0xd0, 0xc1, - 0x20, 0xb2, 0x87, 0x87, 0xdb, 0xe4, 0xe5, 0xd1, 0xd1, 0xd5, 0x81, 0x34, - 0x4c, 0xd6, 0x09, 0xa2, 0x5d, 0xcc, 0x99, 0x12, 0xa5, 0x06, 0x0f, 0x06, - 0x7e, 0xbb, 0x67, 0x26, 0x69, 0x15, 0x6e, 0x5f, 0xb1, 0x8e, 0xd6, 0x34, - 0xfc, 0x4d, 0xd9, 0x03, 0xb7, 0x5a, 0xf4, 0xaa, 0x03, 0x00, 0x88, 0x6b, - 0x5a, 0xc9, 0xf2, 0xfb, 0x67, 0x72, 0xbc, 0xf7, 0xb9, 0xdc, 0x97, 0xdf, - 0x80, 0x91, 0xfa, 0x30, 0x18, 0x02, 0x89, 0xc7, 0xc9, 0x62, 0x1d, 0xc0, - 0x0b, 0xa6, 0xfe, 0x7e, 0xb9, 0xa9, 0x1f, 0x11, 0x71, 0xe1, 0xd1, 0xfe, - 0x8d, 0x90, 0x2c, 0x09, 0x82, 0x2e, 0x36, 0x79, 0xa5, 0x75, 0x54, 0xfb, - 0xd3, 0x3c, 0xb4, 0x18, 0x2f, 0x4e, 0x3f, 0x37, 0xc4, 0xf8, 0xc5, 0x59, - 0xa3, 0xfd, 0x0c, 0x62, 0x9e, 0xa8, 0x7a, 0x56, 0xc5, 0x97, 0x89, 0x35, - 0xc7, 0xb0, 0x29, 0x87, 0xbf, 0x6a, 0xdc, 0xb1, 0x2f, 0x01, 0xf4, 0x0d, - 0x7c, 0x25, 0x95, 0x39, 0x81, 0xdd, 0x1a, 0x81, 0x36, 0xc0, 0x6b, 0xbf, - 0x6b, 0x4d, 0xea, 0x23, 0xc0, 0x3e, 0x5c, 0x39, 0xe5, 0x6b, 0x59, 0xa0, - 0x50, 0x02, 0x99, 0xdf, 0x4e, 0xe3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x17, 0x88, 0xf8, 0xda, 0x3d, 0x57, 0x83, 0x63, - 0x76, 0xa0, 0x5c, 0x13, 0x1a, 0x00, 0x64, 0x30, 0x19, 0xfd, 0x2e, 0x9c, - 0x64, 0xb6, 0xda, 0x51, 0x7b, 0x55, 0xe8, 0xc4, 0x67, 0x1b, 0xda, 0xfc, - 0x4c, 0xd0, 0x27, 0x58, 0x56, 0xa1, 0x52, 0xd2, 0xb8, 0xd8, 0xd5, 0x94, - 0x69, 0xcf, 0xd0, 0xd5, 0x72, 0xeb, 0x2b, 0x05, 0xf3, 0x12, 0xa6, 0xac, - 0xa6, 0xf7, 0x90, 0x24, 0x1f, 0x22, 0x97, 0x5e, 0x8b, 0x7c, 0x2c, 0x30, - 0x61, 0x11, 0x9b, 0xdf, 0x83, 0x2b, 0x10, 0x09, 0x42, 0x77, 0x2b, 0xd9, - 0x43, 0xb3, 0x27, 0x69, 0x75, 0xf2, 0x2e, 0x72, 0xed, 0x50, 0xea, 0xbf, - 0x7f, 0x47, 0x39, 0x9c, 0xf8, 0x1e, 0xce, 0x6f, 0xdd, 0xe8, 0x40, 0xc5, - 0x14, 0x01, 0x7e, 0xbb, 0x0f, 0x43, 0x2d, 0x36, 0x70, 0x54, 0xc6, 0xbe, - 0x69, 0x24, 0xd1, 0x65, 0x49, 0x77, 0xf0, 0xd2, 0x99, 0xb4, 0x50, 0x8d, - 0x98, 0xcb, 0xbf, 0x7a, 0x7c, 0x65, 0xd3, 0x46, 0xcf, 0x90, 0x69, 0x56, - 0x15, 0xa2, 0xae, 0x11, 0x94, 0x60, 0xf9, 0x45, 0x17, 0x54, 0x6b, 0xbd, - 0xeb, 0xd8, 0x74, 0x41, 0x5c, 0xf6, 0x49, 0x0a, 0x14, 0xce, 0x43, 0x1f, - 0x67, 0xc3, 0x6c, 0xf4, 0x01, 0xce, 0x3f, 0x85, 0xed, 0x19, 0xa1, 0xf7, - 0x1b, 0xf8, 0x46, 0x45, 0xb4, 0xe9, 0xa7, 0x1f, 0x2a, 0x65, 0x00, 0x2a, - 0xd3, 0x8b, 0x6a, 0x3b, 0xac, 0x78, 0xab, 0xf4, 0xc8, 0x62, 0x76, 0xc8, - 0x24, 0xf8, 0xf8, 0x08, 0xe0, 0x64, 0x00, 0x64, 0x74, 0x9e, 0x55, 0x2e, - 0xf8, 0xc9, 0xc8, 0x58, 0x0e, 0x1f, 0x27, 0x32, 0xfd, 0x30, 0x24, 0x68, - 0xc8, 0xa4, 0x8c, 0x1c, 0xf3, 0xa7, 0x32, 0xae, 0x84, 0x0a, 0x8a, 0x1e, - 0x11, 0xce, 0xb2, 0x02, 0xf1, 0xb3, 0x5f, 0x7d, 0x5e, 0x54, 0x8c, 0xe0, - 0xeb, 0x46, 0x6e, 0x8a, 0x5f, 0x3f, 0x71, 0x47, 0x2a, 0x8a, 0xe6, 0xf0, - 0xb0, 0x04, 0x49, 0x64, 0xb3, 0x7e, 0x16, 0x09, 0x83, 0x5f, 0x12, 0xe0, - 0x85, 0xb7, 0x36, 0xc0, 0x8a, 0xa5, 0xcd, 0xae, 0xc0, 0xb4, 0xa2, 0x62, - 0x9b, 0xfa, 0x64, 0x18, 0x16, 0x8e, 0xb6, 0x50, 0xf2, 0x9b, 0xc4, 0x7d, - 0x0c, 0x4c, 0x8b, 0x58, 0xcf, 0x9b, 0x87, 0x09, 0xb1, 0x37, 0xbb, 0xaf, - 0xa7, 0x72, 0x79, 0x81, 0x09, 0x55, 0xa1, 0x6a, 0x87, 0xb0, 0x7d, 0xc8, - 0xb0, 0xc1, 0xa4, 0xa9, 0xdf, 0xcf, 0x95, 0x77, 0x36, 0x8e, 0x2b, 0xae, - 0xeb, 0x4b, 0xf9, 0x2a, 0x83, 0x6c, 0x53, 0x3c, 0x89, 0xa6, 0x08, 0xae, - 0x00, 0x4e, 0xb8, 0xf6, 0x34, 0x7c, 0xc6, 0x76, 0x87, 0x1a, 0x02, 0xb0, - 0x89, 0xa3, 0x0f, 0x00, 0xc6, 0x7b, 0xeb, 0xf7, 0x95, 0x40, 0xc5, 0x0d, - 0x6f, 0x74, 0xd8, 0x21, 0x2f, 0x9f, 0x24, 0xac, 0x43, 0xdb, 0x3a, 0x39, - 0x6c, 0x34, 0x59, 0x62, 0x66, 0xbc, 0x28, 0x7f, 0x8c, 0x64, 0x62, 0x8c, - 0x28, 0x6c, 0xf5, 0x79, 0x24, 0xb1, 0x00, 0x9c, 0x58, 0x6b, 0x09, 0xef, - 0xb0, 0x73, 0xcd, 0x47, 0xbb, 0x52, 0xfd, 0x26, 0x6a, 0xff, 0xb9, 0xf1, - 0xd5, 0x82, 0x59, 0x01, 0xfa, 0x87, 0x14, 0x24, 0x10, 0xb0, 0xf7, 0xdf, - 0xf9, 0x3f, 0x67, 0x19, 0xbd, 0xc7, 0x85, 0xb0, 0xad, 0x47, 0xa8, 0x4c, - 0x3e, 0xb6, 0x2e, 0x8a, 0xb3, 0xcc, 0x35, 0xa0, 0x48, 0xc7, 0x90, 0x81, - 0xb7, 0x53, 0x1c, 0x38, 0x63, 0xf2, 0x2f, 0xa0, 0x71, 0x82, 0xe2, 0x56, - 0xdb, 0x68, 0xe8, 0x5f, 0xf8, 0x42, 0xf2, 0xf6, 0xb8, 0x10, 0x6b, 0x54, - 0x21, 0xa0, 0xc1, 0xfe, 0xcb, 0xce, 0x12, 0xa2, 0x49, 0x51, 0x86, 0x53, - 0x56, 0xec, 0x33, 0xb3, 0x72, 0xce, 0xa4, 0x46, 0xe3, 0x37, 0xcb, 0xc0, - 0x95, 0xaa, 0xe2, 0xa3, 0xc5, 0xe9, 0x36, 0x40, 0xfe, 0xf7, 0xe2, 0x5a, - 0x6d, 0x58, 0x39, 0xb2, 0x41, 0x5d, 0xe2, 0x71, 0x72, 0xd0, 0xf0, 0x5c, - 0x16, 0x88, 0x95, 0x30, 0x0a, 0xfb, 0x8d, 0xda, 0x14, 0x80, 0xf4, 0x15, - 0xf2, 0xf6, 0xac, 0xf3, 0xd8, 0x8d, 0x13, 0x24, 0x2c, 0x74, 0x60, 0x6e, - 0x8c, 0xa1, 0x59, 0xcf, 0x74, 0x7c, 0x2d, 0x0b, 0xbb, 0x06, 0x5c, 0x9d, - 0xcd, 0xf3, 0x1e, 0x4a, 0xba, 0x3f, 0x9c, 0x4a, 0xc4, 0xd7, 0xf9, 0xf0, - 0xa5, 0x56, 0x7f, 0xb0, 0xa2, 0x57, 0xd0, 0xc3, 0xaa, 0xa7, 0xd0, 0x49, - 0xe2, 0x28, 0x9b, 0xc4, 0x64, 0x0c, 0xe0, 0x71, 0x9c, 0x05, 0x04, 0x95, - 0x00, 0x1f, 0x7b, 0xa9, 0xb9, 0xb3, 0x2b, 0x8f, 0x0b, 0x45, 0x1e, 0x23, - 0xaa, 0x27, 0x89, 0x4a, 0xb0, 0x7d, 0x03, 0xdf, 0xae, 0xdb, 0xcb, 0xc4, - 0xec, 0x3b, 0x02, 0xe2, 0x85, 0x3a, 0xb7, 0x25, 0xfb, 0xab, 0xca, 0xc1, - 0x33, 0x00, 0x5b, 0xd2, 0xcf, 0xb0, 0x11, 0x1d, 0x51, 0xb5, 0x5b, 0xea, - 0x94, 0xf7, 0xa0, 0x98, 0x33, 0xba, 0x58, 0xfc, 0x12, 0xea, 0xdd, 0x89, - 0xbd, 0x63, 0x03, 0xbe, 0x7e, 0x3b, 0x69, 0xc4, 0x9d, 0x57, 0x0f, 0xd6, - 0xbe, 0xea, 0x5b, 0xd0, 0x97, 0x63, 0x89, 0xb0, 0xa0, 0xc0, 0xd6, 0x39, - 0xc1, 0x69, 0x12, 0x6a, 0xfb, 0xac, 0x74, 0x7f, 0xfb, 0xf4, 0x7f, 0x38, - 0x44, 0x4c, 0x8a, 0xa2, 0x41, 0x15, 0xc0, 0x54, 0xc0, 0xed, 0x14, 0x83, - 0xef, 0xbc, 0x9c, 0xc7, 0xdd, 0x21, 0xd6, 0xf0, 0x9b, 0x7f, 0x09, 0xd5, - 0x96, 0xe5, 0xf7, 0xc5, 0xa9, 0xb3, 0x41, 0xb0, 0x9d, 0xeb, 0x49, 0x68, - 0x9d, 0x2b, 0xea, 0x47, 0x80, 0x3b, 0x54, 0xb8, 0xf4, 0x14, 0x5e, 0xd6, - 0x66, 0x89, 0x04, 0xb3, 0x00, 0xa3, 0xa8, 0x32, 0x62, 0x2e, 0xc3, 0x15, - 0xc6, 0x93, 0x7d, 0x40, 0x32, 0xb1, 0x6b, 0x60, 0xd3, 0x52, 0xdf, 0x09, - 0x8c, 0x80, 0x2b, 0x01, 0xe7, 0x97, 0x8d, 0xbb, 0x14, 0xd6, 0x10, 0x15, - 0x64, 0x00, 0x4a, 0x2c, 0x67, 0xca, 0xd0, 0xa1, 0x37, 0x33, 0x7b, 0xa1, - 0x2a, 0x5b, 0x5b, 0x78, 0xf8, 0x2f, 0xdd, 0x76, 0xab, 0x8a, 0xc3, 0xe3, - 0x37, 0x00, 0xd1, 0x29, 0xb0, 0x96, 0x1d, 0x18, 0xbe, 0x5d, 0x32, 0x7e, - 0xb7, 0x11, 0xa9, 0x78, 0x72, 0xa2, 0x2d, 0x29, 0x1c, 0x32, 0xa4, 0xff, - 0xc7, 0xce, 0xfe, 0xaf, 0xb7, 0x17, 0x43, 0xe5, 0x2f, 0xae, 0x45, 0xd3, - 0xaf, 0x10, 0xe3, 0xd0, 0x58, 0xb6, 0xee, 0xee, 0x7a, 0xb5, 0x06, 0x70, - 0x26, 0x7e, 0x2d, 0x5b, 0xd5, 0xe1, 0x7b, 0x9a, 0x37, 0x02, 0xfc, 0x1d, - 0x08, 0x4f, 0x1a, 0xf5, 0x44, 0x63, 0xde, 0x4b, 0x14, 0x68, 0x54, 0x0b, - 0x6a, 0x22, 0x4e, 0x02, 0x65, 0xcd, 0xf4, 0x04, 0xec, 0xcc, 0x8a, 0x0b, - 0xe0, 0x59, 0xf8, 0x65, 0x25, 0x63, 0xed, 0x0f, 0xa6, 0xc5, 0x3c, 0xcb, - 0x5d, 0xc5, 0xd8, 0x9f, 0x5a, 0xd3, 0x88, 0x3d, 0xd4, 0x2c, 0xb3, 0x04, - 0xf6, 0x97, 0xc7, 0xe2, 0xfd, 0xb6, 0xf4, 0x7d, 0x0d, 0xb9, 0x75, 0x7e, - 0x9d, 0x81, 0xdc, 0xdf, 0x8e, 0x90, 0x40, 0x0c, 0x7b, 0x45, 0xfe, 0x68, - 0xfd, 0xff, 0x1c, 0xf1, 0x16, 0x09, 0x33, 0x74, 0x27, 0x7b, 0x4d, 0xd9, - 0x9b, 0x48, 0x6d, 0x84, 0xeb, 0x96, 0x8f, 0x4b, 0x82, 0x73, 0xd5, 0x69, - 0x7d, 0x14, 0x45, 0x8c, 0xb8, 0x71, 0x87, 0x70, 0x09, 0x26, 0xfc, 0x89, - 0x6f, 0x0f, 0xb6, 0xc1, 0xd6, 0xe1, 0xbf, 0xdb, 0x85, 0x8f, 0x94, 0xad, - 0x94, 0x01, 0x01, 0xbb, 0x3f, 0xc0, 0xb5, 0xff, 0xf5, 0xbb, 0x4f, 0x50, - 0x09, 0xca, 0x7d, 0x36, 0x47, 0x66, 0x9a, 0x8c, 0xee, 0x84, 0x73, 0x9a, - 0x1f, 0x49, 0x75, 0xb4, 0xab, 0x66, 0xf7, 0x3b, 0xfe, 0x81, 0x67, 0xc9, - 0xd1, 0x16, 0xde, 0x1f, 0xc2, 0x24, 0xed, 0x6a, 0x5a, 0xe7, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0xc5, 0xd7, 0x14, 0x84, - 0xf8, 0xcf, 0x9b, 0xf4, 0xb7, 0x6f, 0x47, 0x90, 0x47, 0x30, 0x80, 0x4b, - 0x9e, 0x32, 0x25, 0xa9, 0xf1, 0x33, 0xb5, 0xde, 0xa1, 0x68, 0xf4, 0xe2, - 0x85, 0x1f, 0x07, 0x2f, 0xcc, 0x00, 0xfc, 0xaa, 0x7c, 0xa6, 0x20, 0x61, - 0x71, 0x7a, 0x48, 0xe5, 0x2e, 0x29, 0xa3, 0xfa, 0x37, 0x9a, 0x95, 0x3f, - 0xaa, 0x68, 0x93, 0xe3, 0x2e, 0xc5, 0xa2, 0x7b, 0x94, 0x5e, 0x60, 0x5f, - 0x10, 0x85, 0xf3, 0x23, 0x2d, 0x42, 0x4c, 0x13, 0x29, 0xc8, 0x8d, 0x78, - 0x6e, 0xd6, 0x8c, 0xe6, 0xfc, 0xb6, 0x2a, 0xa6, 0x3b, 0xf9, 0xab, 0x61, - 0x7c, 0x08, 0x8a, 0x3b, 0x70, 0xbe, 0x57, 0xaa, 0xda, 0x1f, 0x33, 0x4a, - 0x70, 0x17, 0x25, 0x0d, 0x3f, 0x60, 0x3d, 0xc8, 0x2e, 0xbd, 0x3b, 0x12, - 0x0b, 0x63, 0x5e, 0x3f, 0xf5, 0x6b, 0x1f, 0x0b, 0xd9, 0x33, 0x85, 0x23, - 0x71, 0x24, 0x9a, 0xb3, 0xdf, 0x5c, 0x1f, 0xef, 0x14, 0x33, 0xc8, 0x66, - 0x85, 0xb7, 0xf0, 0x56, 0x68, 0x1d, 0x51, 0x52, 0xaf, 0x80, 0x3c, 0xe2, - 0x59, 0x06, 0xf1, 0xd1, 0x9f, 0xb6, 0xc6, 0x80, 0x4e, 0x06, 0xea, 0x28, - 0xab, 0x17, 0x8f, 0x45, 0x7a, 0xf6, 0xb4, 0x93, 0xb7, 0x43, 0x9e, 0xc6, - 0xd4, 0x29, 0x00, 0x62, 0xab, 0x51, 0x7a, 0x72, 0xe5, 0xc1, 0xd4, 0x10, - 0xcd, 0xd6, 0x17, 0x54, 0xe4, 0x20, 0x84, 0x50, 0xe4, 0xf9, 0x00, 0x13, - 0xfd, 0xa6, 0x9f, 0xef, 0x19, 0xd4, 0x60, 0x2a, 0x42, 0x07, 0xcd, 0xd5, - 0xa1, 0x01, 0x6d, 0x07, 0x01, 0x32, 0x61, 0x3c, 0x65, 0x9a, 0x8f, 0x5d, - 0x33, 0xf3, 0xcb, 0x29, 0x0b, 0x8c, 0xe7, 0x3b, 0x83, 0x44, 0xb1, 0x3a, - 0x4f, 0x8e, 0x09, 0x15, 0x14, 0x69, 0x84, 0xa1, 0xbb, 0x15, 0xfd, 0xea, - 0xde, 0xbe, 0x5b, 0x6a, 0xc0, 0x95, 0x04, 0x46, 0x4d, 0x8a, 0xaa, 0xac, - 0xbc, 0x2f, 0xad, 0x12, 0x15, 0x8a, 0x53, 0x4c, 0x94, 0xb8, 0xca, 0x42, - 0x96, 0x3a, 0xf4, 0x7a, 0x18, 0x9d, 0x5b, 0x24, 0x9a, 0xce, 0xa8, 0x99, - 0xd4, 0x37, 0x32, 0xf6, 0xf2, 0xac, 0xaf, 0x3f, 0xf5, 0x3b, 0xfe, 0xda, - 0x13, 0x9a, 0xab, 0x4f, 0x55, 0xc0, 0x2c, 0x21, 0x2b, 0x65, 0x71, 0x1f, - 0xc5, 0x04, 0x32, 0xc9, 0x94, 0xe5, 0xfa, 0x6f, 0xd8, 0x2a, 0xbc, 0x70, - 0x85, 0x55, 0xdc, 0x62, 0xb7, 0x3a, 0x20, 0x0e, 0xe7, 0x67, 0x3c, 0xfe, - 0xcb, 0x83, 0x6a, 0x15, 0x6e, 0x4a, 0x35, 0x65, 0xea, 0xc1, 0xb9, 0x4d, - 0x35, 0xf9, 0x4b, 0xcf, 0xd8, 0xfd, 0xa5, 0xff, 0xff, 0x67, 0x70, 0x04, - 0xae, 0xa2, 0xa4, 0x12, 0x4b, 0x83, 0x4f, 0xc2, 0x96, 0xf0, 0x21, 0x2b, - 0x14, 0x21, 0x73, 0x42, 0x14, 0x99, 0x07, 0xe5, 0xa9, 0x52, 0x4c, 0xeb, - 0xbe, 0xc3, 0x11, 0x2e, 0x27, 0xda, 0x69, 0x94, 0xd5, 0xf6, 0xc6, 0x77, - 0x0a, 0x00, 0x5d, 0x9a, 0x82, 0xaa, 0x21, 0xfc, 0x86, 0x9b, 0xd0, 0xc4, - 0xc4, 0x1f, 0x53, 0x41, 0x7a, 0x92, 0xab, 0x1c, 0x12, 0xf6, 0xd5, 0x48, - 0xfb, 0x29, 0x4d, 0xb4, 0xd2, 0x12, 0xee, 0xc5, 0xea, 0x18, 0x33, 0xf1, - 0x4d, 0x0a, 0x10, 0x43, 0xa5, 0x35, 0xb1, 0x63, 0xc4, 0xfb, 0x38, 0x1e, - 0xef, 0xac, 0x3f, 0x97, 0x41, 0xc6, 0x96, 0x3e, 0x60, 0x13, 0xc8, 0xe3, - 0xbe, 0x61, 0xe9, 0xb6, 0x26, 0x16, 0x14, 0xf8, 0x82, 0x0d, 0x6e, 0x75, - 0x2f, 0xd7, 0x9c, 0x3a, 0x4a, 0xda, 0xd8, 0x2b, 0x35, 0xd4, 0x20, 0x32, - 0xd4, 0x4f, 0x0f, 0xe4, 0xdc, 0xd5, 0x0f, 0xfe, 0xa6, 0x81, 0x28, 0xb4, - 0x24, 0x3e, 0xb7, 0x0f, 0xb0, 0xb2, 0x5b, 0x05, 0x76, 0xbb, 0x24, 0x49, - 0x6a, 0x01, 0x68, 0x3f, 0x03, 0x96, 0xbc, 0x0c, 0x77, 0x48, 0x5f, 0xe8, - 0x39, 0xf4, 0xb0, 0x84, 0x42, 0x0e, 0x6a, 0xb9, 0xab, 0xf2, 0x95, 0x97, - 0xa7, 0x5e, 0x29, 0x34, 0x9d, 0x50, 0xc0, 0x4b, 0x40, 0x72, 0xa1, 0x7c, - 0x79, 0x5e, 0x95, 0xbe, 0xd6, 0x17, 0x43, 0x0a, 0xc9, 0x27, 0x25, 0x43, - 0xd7, 0x99, 0xd5, 0x48, 0xd8, 0x98, 0xb5, 0x2b, 0x7f, 0xe3, 0xbd, 0x1d, - 0xc0, 0xd1, 0x04, 0xd5, 0xa4, 0xe1, 0x68, 0xbe, 0x96, 0xf1, 0x2e, 0x5e, - 0x37, 0x8d, 0x39, 0x4e, 0xe4, 0xcc, 0x5e, 0xd7, 0xdd, 0x59, 0x7e, 0xe8, - 0xae, 0x48, 0xb5, 0xec, 0x2c, 0xf7, 0x68, 0x96, 0x00, 0xe5, 0xec, 0x03, - 0x6f, 0x98, 0x3a, 0x9a, 0x4f, 0xd9, 0xf1, 0x2f, 0xfe, 0x76, 0xcf, 0x8f, - 0x0b, 0x3d, 0x8a, 0x14, 0x00, 0x83, 0xcb, 0xca, 0xe3, 0x34, 0x81, 0xb5, - 0x91, 0x64, 0x2b, 0x12, 0x24, 0x86, 0x9c, 0xae, 0x3c, 0x7f, 0x53, 0x22, - 0xd4, 0x94, 0x90, 0x44, 0x6b, 0x35, 0xd2, 0xce, 0x8e, 0x95, 0xe2, 0xbe, - 0x46, 0x50, 0x3f, 0x3d, 0xc3, 0xcd, 0xef, 0x47, 0x99, 0xb5, 0xf2, 0xd4, - 0x6f, 0xf4, 0xfa, 0xa2, 0xfc, 0x1e, 0xe3, 0x99, 0x49, 0xfd, 0x1a, 0x6e, - 0x0d, 0xb5, 0xf1, 0xc8, 0x05, 0x22, 0x29, 0xca, 0x03, 0xb8, 0x15, 0x3b, - 0x01, 0x8a, 0x95, 0x74, 0x48, 0x93, 0x61, 0x35, 0xde, 0xeb, 0xa9, 0xc4, - 0x56, 0xa9, 0xd7, 0xde, 0x4b, 0xe5, 0x4b, 0xa1, 0x42, 0x6a, 0x5f, 0xe3, - 0xb2, 0xc7, 0xda, 0xfb, 0xc7, 0x70, 0x64, 0xe0, 0x68, 0x19, 0xc6, 0x11, - 0x77, 0x2b, 0x5f, 0xba, 0x1d, 0x58, 0x77, 0x98, 0x2c, 0x91, 0xb4, 0xd2, - 0xea, 0x1b, 0xdc, 0xe8, 0xfa, 0x82, 0xf3, 0x6e, 0xac, 0x88, 0x15, 0x16, - 0x1a, 0x53, 0xb3, 0x01, 0x94, 0x03, 0x47, 0x20, 0xdb, 0x71, 0xcb, 0x71, - 0xe8, 0x62, 0xad, 0x34, 0x2b, 0xa3, 0xa5, 0xe9, 0xa6, 0x82, 0x0e, 0x16, - 0x61, 0xbc, 0x29, 0x6b, 0xb1, 0x60, 0x67, 0x80, 0x9a, 0x9f, 0xc4, 0x82, - 0xf6, 0xb0, 0x7a, 0x16, 0x9c, 0x25, 0x04, 0xeb, 0xfd, 0xe0, 0x18, 0xd3, - 0xfc, 0xeb, 0xe1, 0x3c, 0x2b, 0x29, 0x7b, 0x32, 0x4e, 0xd3, 0x6d, 0xe1, - 0x27, 0xda, 0xc9, 0x14, 0x5c, 0x7f, 0xfa, 0x70, 0x41, 0x8e, 0xb4, 0xa3, - 0xde, 0x36, 0x92, 0x67, 0x97, 0xe2, 0xec, 0x85, 0x8b, 0x76, 0x08, 0x3c, - 0x32, 0x58, 0xd4, 0x7f, 0x6f, 0x91, 0x03, 0xdb, 0x19, 0x3e, 0xc4, 0x8b, - 0x3c, 0xb7, 0x75, 0x90, 0x71, 0x7a, 0x21, 0x9d, 0xa7, 0x77, 0xbf, 0xf5, - 0x92, 0x57, 0x46, 0x07, 0xa7, 0xbb, 0x0c, 0x42, 0xca, 0x4f, 0x5a, 0x27, - 0x45, 0x69, 0xfe, 0x6d, 0x78, 0x43, 0x77, 0xc4, 0xb4, 0x43, 0xff, 0x37, - 0x0d, 0xb7, 0xfa, 0xe9, 0x9e, 0x06, 0x70, 0x53, 0xfd, 0xf6, 0xa0, 0x28, - 0x84, 0x46, 0xcd, 0x61, 0xa2, 0x95, 0xc4, 0x1e, 0x6a, 0x13, 0xa1, 0x7f, - 0xaf, 0xe1, 0x73, 0x85, 0xb0, 0x53, 0x9c, 0x08, 0xb6, 0x1d, 0x4d, 0xb4, - 0x0b, 0xfb, 0x1f, 0x0c, 0x7b, 0x17, 0x06, 0x73, 0xa7, 0x22, 0x1f, 0xb0, - 0xd8, 0x45, 0x6e, 0xe5, 0xde, 0x48, 0xb7, 0x9f, 0x5a, 0xa8, 0xd1, 0xc3, - 0x04, 0xd1, 0x87, 0xec, 0x15, 0x3e, 0xd1, 0xc7, 0x57, 0x01, 0x46, 0x4b, - 0x28, 0xa8, 0x79, 0x5a, 0x7e, 0x0b, 0x56, 0x56, 0x28, 0xda, 0x35, 0xea, - 0x4c, 0x14, 0x81, 0xae, 0xc0, 0x0d, 0x12, 0xfe, 0x2d, 0xb7, 0x95, 0x4d, - 0xea, 0x78, 0xb6, 0x53, 0xcf, 0xac, 0x8a, 0xfc, 0xc9, 0x07, 0x9f, 0x93, - 0xf0, 0x11, 0x86, 0x13, 0xe9, 0xca, 0x3d, 0xce, 0xb1, 0xfd, 0x1a, 0x0a, - 0x8b, 0x11, 0x82, 0x94, 0x6a, 0xae, 0xc5, 0x80, 0x6a, 0x3b, 0xa8, 0x7c, - 0xb4, 0x53, 0x4e, 0xa9, 0x04, 0x1a, 0x4f, 0xb0, 0xb9, 0x95, 0x96, 0xa5, - 0xfd, 0xce, 0xdc, 0x57, 0x00, 0x48, 0x16, 0xe2, 0x40, 0xae, 0x04, 0xf5, - 0x83, 0x60, 0x23, 0xd9, 0x8e, 0x59, 0x56, 0x20, 0x50, 0x38, 0xc4, 0xde, - 0x88, 0x9f, 0x91, 0x06, 0xdb, 0x8f, 0x84, 0xa2, 0xaf, 0x61, 0xdd, 0x48, - 0x03, 0x4f, 0xc4, 0xb8, 0xed, 0x12, 0xd2, 0x74, 0x08, 0xb9, 0x51, 0x63, - 0xb5, 0xfe, 0x09, 0x7f, 0x7b, 0x8c, 0x5e, 0xd7, 0x27, 0xe5, 0x79, 0xe6, - 0x33, 0x60, 0x54, 0xe1, 0x21, 0xda, 0xca, 0x8b, 0x81, 0xdf, 0xb6, 0xa7, - 0x2e, 0x9d, 0x0f, 0xfc, 0x05, 0x80, 0x67, 0xcb, 0xc5, 0xdf, 0xc7, 0x13, - 0xee, 0xb5, 0x40, 0x8e, 0xa7, 0x0c, 0xcb, 0xf2, 0x45, 0x15, 0x29, 0xb1, - 0xb8, 0x02, 0x23, 0x61, 0x38, 0xf1, 0x16, 0xa1, 0x0c, 0xa1, 0xc9, 0x40, - 0x8c, 0xd0, 0x48, 0x4b, 0xce, 0x9c, 0x1e, 0x53, 0x40, 0x44, 0xf6, 0x17, - 0x16, 0xc6, 0x5c, 0xb0, 0x2a, 0x29, 0x59, 0x87, 0x67, 0x85, 0xa7, 0x81, - 0x84, 0xe9, 0x4f, 0xe5, 0x4e, 0x13, 0x5a, 0x11, 0xa1, 0x24, 0x62, 0xe9, - 0x7a, 0xea, 0x51, 0xaa, 0x45, 0xf3, 0x1d, 0x2a, 0xaf, 0x01, 0x28, 0x35, - 0xda, 0xb4, 0xe7, 0xab, 0xc1, 0xb9, 0x3c, 0x45, 0xa2, 0x0b, 0x5d, 0x40, - 0x09, 0xac, 0x62, 0x16, 0xd3, 0x1f, 0x9f, 0xc7, 0x1a, 0x56, 0xb7, 0x27, - 0xd1, 0x1b, 0xe1, 0xb5, 0x82, 0x9e, 0xe8, 0xd3, 0x5c, 0x0f, 0xe8, 0x87, - 0x61, 0xc6, 0x20, 0xb7, 0x31, 0x3f, 0x0d, 0xb3, 0x0a, 0x5a, 0xce, 0x06, - 0xa5, 0xe9, 0xfd, 0xf3, 0x29, 0x1a, 0xcd, 0x86, 0x0e, 0x31, 0x29, 0xaa, - 0xb7, 0x32, 0xf1, 0x10, 0x4e, 0x92, 0x12, 0x00, 0xc0, 0xac, 0x50, 0x4b, - 0x52, 0x59, 0x51, 0x7c, 0xa8, 0x0c, 0xf7, 0xcb, 0x16, 0x73, 0x7b, 0x90, - 0xa8, 0x57, 0x79, 0xb4, 0x73, 0x53, 0xd7, 0xed, 0xba, 0x46, 0xc5, 0x06, - 0x53, 0x02, 0xc7, 0x58, 0x4c, 0x09, 0x0c, 0xa5, 0x01, 0x13, 0x18, 0x39, - 0x4b, 0x4e, 0xc2, 0x0d, 0xd6, 0xdf, 0xaa, 0x7e, 0x46, 0xba, 0x6e, 0xcc, - 0x25, 0x42, 0xd0, 0xb3, 0x31, 0xdc, 0xdf, 0x7d, 0xf1, 0xc3, 0x73, 0xca, - 0x7a, 0xf6, 0xcb, 0x23, 0x81, 0x8d, 0xbe, 0x0b, 0xf2, 0x79, 0x8d, 0x14, - 0xa4, 0xc8, 0x36, 0x18, 0x49, 0xc8, 0x0d, 0xd7, 0xc9, 0xdd, 0x35, 0xeb, - 0xec, 0x52, 0x56, 0xae, 0xf2, 0xd2, 0x51, 0x91, 0x39, 0xbc, 0xb0, 0x49, - 0xb7, 0xf2, 0x1b, 0x64, 0x83, 0x5a, 0xa6, 0x97, 0xc2, 0x15, 0x95, 0xdc, - 0x11, 0xd2, 0x89, 0xc0, 0x6a, 0xb1, 0x44, 0x43, 0x38, 0xb6, 0x54, 0x0f, - 0xdc, 0xcb, 0xed, 0x26, 0x27, 0xd9, 0x46, 0x56, 0x4e, 0x6a, 0x54, 0x74, - 0x0f, 0x45, 0xfc, 0xb6, 0x93, 0xab, 0x3c, 0xd1, 0x86, 0x51, 0xaf, 0xa9, - 0x4a, 0xc0, 0x9c, 0x78, 0xc1, 0xb1, 0xc7, 0xf1, 0x9c, 0xd1, 0xd0, 0x32, - 0x4e, 0x4b, 0x02, 0x36, 0x68, 0x38, 0x88, 0x56, 0xc0, 0x2b, 0x12, 0x05, - 0x3b, 0xb9, 0xf6, 0xa2, 0x37, 0xe7, 0xbc, 0x81, 0xf9, 0x75, 0x51, 0x27, - 0x56, 0x0d, 0x55, 0xd1, 0x6a, 0xe0, 0xcf, 0x87, 0x0a, 0x44, 0xc6, 0x57, - 0xe1, 0x1b, 0xc0, 0x2c, 0xcf, 0xab, 0x77, 0xe9, 0x14, 0xf5, 0x34, 0x89, - 0xfb, 0xc9, 0xf2, 0x87, 0x5c, 0x75, 0xba, 0x51, 0x9a, 0x49, 0xe9, 0x23, - 0x23, 0xf4, 0xc9, 0xd1, 0x2f, 0x87, 0xf6, 0x75, 0x38, 0x97, 0x48, 0xb8, - 0x30, 0x46, 0x1d, 0x46, 0x65, 0x03, 0x10, 0xcf, 0xfb, 0x36, 0xf2, 0xb1, - 0xaf, 0x31, 0x02, 0x7b, 0x74, 0xfe, 0x9f, 0x8c, 0x73, 0x04, 0xfd, 0xb5, - 0xae, 0x2e, 0x27, 0x9c, 0xd8, 0x73, 0xbc, 0xc3, 0x4a, 0x76, 0x93, 0x66, - 0xf6, 0xb7, 0x90, 0xc4, 0x42, 0x3d, 0xcd, 0xb5, 0xf1, 0x75, 0xbf, 0xb7, - 0xdd, 0x8e, 0xb7, 0xcd, 0x90, 0x35, 0xf5, 0x95, 0x3d, 0xe4, 0x4e, 0xb0, - 0x7c, 0x5f, 0xad, 0xff, 0x75, 0x38, 0xc4, 0xc7, 0xed, 0xec, 0x70, 0xcc, - 0x9f, 0xf9, 0x77, 0xa1, 0x00, 0x2f, 0xf1, 0xa2, 0xc9, 0x74, 0xdc, 0x18, - 0x14, 0xd0, 0x2f, 0x86, 0x66, 0xa7, 0x5b, 0x39, 0x5c, 0xba, 0x0e, 0x77, - 0x16, 0x04, 0xc3, 0x02, 0x42, 0x3b, 0x66, 0x29, 0xee, 0x65, 0x00, 0xd4, - 0x22, 0x5a, 0x77, 0x74, 0xd4, 0xc3, 0xf3, 0x00, 0xdf, 0x6b, 0xc3, 0x15, - 0x89, 0x0e, 0xb1, 0xbc, 0xac, 0xe8, 0x44, 0x2f, 0x80, 0x34, 0x34, 0x8b, - 0x0c, 0x48, 0x45, 0xc2, 0x6a, 0xa3, 0x67, 0xd7, 0x3d, 0x36, 0xf3, 0x3f, - 0xe5, 0xf0, 0x5b, 0xe8, 0xad, 0x41, 0xd5, 0x82, 0xc1, 0x28, 0xab, 0x77, - 0xe8, 0x7f, 0xb3, 0xf6, 0xd2, 0x0c, 0xe4, 0x03, 0xcf, 0xe4, 0x72, 0xdb, - 0x7b, 0x81, 0xf4, 0xf3, 0x48, 0x74, 0xe1, 0x91, 0xb8, 0xf8, 0x4c, 0x2c, - 0x60, 0x99, 0x3e, 0x1e, 0x4f, 0xaf, 0x12, 0xab, 0x52, 0xef, 0xc7, 0x60, - 0xd2, 0xfe, 0x62, 0x55, 0xc8, 0x18, 0xad, 0x60, 0xa7, 0x5d, 0xde, 0x4d, - 0xfc, 0x6d, 0xe1, 0x10, 0x7c, 0xf9, 0xa2, 0x64, 0x00, 0x16, 0x1f, 0x44, - 0x7c, 0xe2, 0x72, 0x37, 0xd9, 0x92, 0xad, 0xfc, 0x62, 0x53, 0xbe, 0xb6, - 0xe0, 0xc8, 0xe0, 0xa2, 0xef, 0x22, 0x4b, 0x70, 0x3a, 0x4f, 0xc9, 0xed, - 0x6b, 0xbc, 0x17, 0x0a, 0xcf, 0x6a, 0x2c, 0xd3, 0xd2, 0x6b, 0x02, 0x45, - 0xfa, 0x9e, 0xc2, 0x21, 0x28, 0xfc, 0x07, 0x68, 0xd6, 0xb8, 0x9f, 0x2a, - 0x0b, 0x7a, 0x0e, 0xbc, 0x4e, 0xee, 0x84, 0x38, 0xe4, 0x8e, 0x70, 0xc3, - 0xc4, 0xad, 0x74, 0x87, 0x2d, 0x16, 0x4f, 0xa1, 0xf8, 0x20, 0xf5, 0xde, - 0xa3, 0xc5, 0x0c, 0x3b, 0xde, 0x44, 0x48, 0x0f, 0x3c, 0xdc, 0x7e, 0x10, - 0x8b, 0x87, 0xc4, 0x3b, 0xb0, 0x95, 0xbf, 0x61, 0x1e, 0xad, 0x07, 0x52, - 0xfd, 0x0b, 0x84, 0xa9, 0x46, 0xb0, 0x32, 0xd5, 0x22, 0x80, 0x35, 0x26, - 0x41, 0xf8, 0x11, 0x72, 0xb1, 0x31, 0x6f, 0x5a, 0x75, 0xcc, 0x67, 0xe0, - 0xb2, 0x50, 0x89, 0xb2, 0x66, 0x6e, 0xee, 0xa0, 0x41, 0x8d, 0x00, 0x2a, - 0xa7, 0x9d, 0xa5, 0x11, 0x2b, 0x07, 0x95, 0x3a, 0x55, 0x8c, 0x67, 0xb1, - 0xe5, 0x2d, 0xd4, 0xd1, 0x3e, 0x29, 0xed, 0xa5, 0x59, 0x97, 0x7b, 0xdf, - 0x92, 0x10, 0x0b, 0x04, 0x89, 0x27, 0xa0, 0xa2, 0x93, 0x18, 0x7f, 0x47, - 0x84, 0x1c, 0xc6, 0xd6, 0x8f, 0x73, 0x81, 0xa0, 0xfa, 0xe5, 0x3e, 0xd8, - 0xbf, 0x56, 0x1a, 0x76, 0xf4, 0xc4, 0x0f, 0x7a, 0x29, 0x9d, 0x32, 0x5d, - 0x41, 0xe0, 0x07, 0xb9, 0xd3, 0x3f, 0x7e, 0xff, 0x90, 0x89, 0xce, 0xdc, - 0xf1, 0x1d, 0x54, 0xb6, 0x67, 0x7f, 0x4d, 0x71, 0x9a, 0x4a, 0x5f, 0x80, - 0x0d, 0x5c, 0x77, 0xd5, 0x50, 0x7c, 0x41, 0x56, 0x7e, 0x99, 0x0a, 0xeb, - 0x66, 0x1f, 0xd2, 0x55, 0xc3, 0xc6, 0x6c, 0xc5, 0xfc, 0x34, 0x40, 0x2c, - 0x05, 0x29, 0x05, 0x7c, 0xca, 0xe6, 0x8d, 0xd3, 0xb0, 0xca, 0x84, 0x27, - 0x50, 0x7c, 0x6b, 0x17, 0x1b, 0x22, 0xe4, 0x7f, 0xe6, 0x44, 0x94, 0x06, - 0x4b, 0xb3, 0xb7, 0xbb, 0x98, 0x81, 0x44, 0x0b, 0xf5, 0x66, 0xcb, 0xad, - 0xf2, 0x9a, 0xe1, 0x47, 0xf3, 0x97, 0xa9, 0xb2, 0xc2, 0xca, 0xcd, 0x98, - 0x78, 0x60, 0xdc, 0x6e, 0x87, 0x55, 0x47, 0xf3, 0xae, 0x84, 0xdd, 0x9a, - 0xe9, 0x1a, 0x63, 0x83, 0xea, 0x23, 0x09, 0x67, 0x34, 0x83, 0x00, 0x6e, - 0x5e, 0x58, 0xb8, 0x89, 0x04, 0x08, 0x0a, 0x55, 0x9e, 0x78, 0xc9, 0xff, - 0xb9, 0xb5, 0x2c, 0xdd, 0x3b, 0x0c, 0x58, 0x07, 0x8b, 0xb4, 0x6a, 0xc4, - 0x64, 0xa3, 0x5e, 0x5b, 0xfe, 0x4d, 0xd0, 0x74, 0x01, 0x1b, 0xdf, 0x10, - 0x45, 0x2b, 0xd6, 0x9e, 0xa9, 0x60, 0x1f, 0xad, 0x46, 0xa1, 0x8c, 0xf8, - 0xf6, 0xa9, 0x8a, 0x27, 0xea, 0x51, 0x37, 0x84, 0xcf, 0xe5, 0xd7, 0x51, - 0xd6, 0x40, 0x39, 0x39, 0x5f, 0xf6, 0x96, 0x33, 0xd9, 0x86, 0x8d, 0x38, - 0xb6, 0x26, 0x04, 0x14, 0x07, 0x46, 0x3e, 0xd0, 0xc5, 0xf6, 0x0d, 0xa0, - 0x47, 0x2b, 0xc8, 0x73, 0x18, 0x6b, 0xd3, 0x0e, 0x18, 0xcc, 0x43, 0x98, - 0xd0, 0xcf, 0x1c, 0xe4, 0x4a, 0x41, 0x6a, 0x56, 0x2d, 0xf0, 0x93, 0x89, - 0x81, 0x6c, 0xce, 0x04, 0x1a, 0x23, 0x05, 0x91, 0x4f, 0x48, 0x44, 0x3a, - 0xaa, 0x03, 0xa5, 0x4a, 0xa9, 0x20, 0x2c, 0xbe, 0x6a, 0x81, 0xe6, 0xa9, - 0xf8, 0xf0, 0x2b, 0x29, 0xa1, 0xe0, 0xc4, 0xce, 0xf5, 0xda, 0x25, 0x70, - 0x49, 0xcc, 0xa0, 0x4b, 0x24, 0x49, 0x4f, 0x11, 0xc4, 0x3b, 0x22, 0x89, - 0x9a, 0xb4, 0xf4, 0xcd, 0xa3, 0xee, 0xb0, 0x76, 0x13, 0xc4, 0xbb, 0xaf, - 0x03, 0x7f, 0x27, 0xf3, 0x38, 0xbc, 0xde, 0x7c, 0x0c, 0x39, 0x14, 0xb7, - 0x14, 0xbb, 0x5c, 0xae, 0x89, 0xf8, 0xf7, 0xd6, 0x00, 0x78, 0xf4, 0xb0, - 0x52, 0x16, 0xf5, 0x54, 0xc5, 0x93, 0xf7, 0x6d, 0x0d, 0xe8, 0x58, 0xe2, - 0xa1, 0xa7, 0xdc, 0x49, 0xdb, 0xc8, 0x79, 0xbc, 0xc3, 0x97, 0x7b, 0x6c, - 0x82, 0x7b, 0xbe, 0xe9, 0x79, 0xac, 0x4a, 0xa4, 0x7c, 0x49, 0x83, 0x58, - 0x3a, 0xe4, 0xf5, 0x68, 0x5c, 0xb7, 0x7f, 0x2d, 0xfe, 0x6b, 0x96, 0xc7, - 0x8b, 0x67, 0xb5, 0xd0, 0xa1, 0x0a, 0x16, 0x62, 0x64, 0x53, 0xea, 0x29, - 0x80, 0x93, 0xf9, 0xd6, 0xa0, 0xc5, 0x1b, 0x3a, 0x1e, 0xab, 0x51, 0x88, - 0xe0, 0x9e, 0xd4, 0xf6, 0xbf, 0x70, 0x2d, 0x29, 0x2e, 0x08, 0xa9, 0x31, - 0x78, 0x0a, 0x15, 0x30, 0x9f, 0x2e, 0xc8, 0x41, 0x65, 0x8e, 0x97, 0x51, - 0x5e, 0x73, 0x46, 0x42, 0x74, 0x84, 0xfd, 0x9b, 0x4a, 0x8a, 0x68, 0x28, - 0x45, 0xd0, 0x5d, 0x65, 0x08, 0xb3, 0xf5, 0x40, 0x8a, 0x29, 0x8e, 0x70, - 0x02, 0x49, 0x6a, 0x01, 0xd6, 0x41, 0x4a, 0xf8, 0x15, 0xa3, 0x70, 0x59, - 0xe9, 0xa2, 0xe2, 0x76, 0x8c, 0x60, 0x33, 0xb3, 0xfa, 0x8b, 0xb4, 0x90, - 0x6f, 0x92, 0xc8, 0x21, 0x59, 0xc0, 0x3a, 0x30, 0x46, 0xeb, 0x49, 0xd8, - 0x85, 0x63, 0x5a, 0x23, 0x87, 0xe1, 0xa7, 0xc0, 0x1a, 0xb0, 0xc7, 0xc4, - 0x40, 0x4d, 0x11, 0x9c, 0xe3, 0xd4, 0x6b, 0xef, 0x68, 0xc8, 0x2c, 0x31, - 0xcd, 0x3e, 0xee, 0x55, 0x10, 0x67, 0x77, 0x7b, 0x30, 0xc1, 0xd0, 0x23, - 0x6c, 0x65, 0x6f, 0xfb, 0x2e, 0x62, 0x33, 0x42, 0x63, 0xdc, 0xca, 0x86, - 0xf1, 0x0e, 0xb3, 0xb0, 0x69, 0x11, 0x65, 0xe1, 0x6e, 0x6c, 0x03, 0x49, - 0x79, 0xe8, 0xf1, 0x2e, 0x8d, 0x94, 0xc8, 0xa8, 0x98, 0x2d, 0x3f, 0xfe, - 0xbd, 0x2d, 0x75, 0x45, 0xd1, 0x7a, 0x09, 0xf8, 0x90, 0x49, 0xbd, 0x4a, - 0x3b, 0xa4, 0xa3, 0x26, 0xb8, 0x62, 0x66, 0x97, 0xd9, 0xc1, 0xca, 0x12, - 0x49, 0xe1, 0x27, 0x93, 0x4f, 0x60, 0xfa, 0xb3, 0x4f, 0x4c, 0xdb, 0x87, - 0x6c, 0x3b, 0x50, 0x47, 0xe2, 0xd8, 0x5b, 0x13, 0x99, 0xf0, 0x2b, 0xbb, - 0x32, 0x33, 0xfd, 0x7d, 0x15, 0x0f, 0x2c, 0xee, 0x85, 0x83, 0xc0, 0x53, - 0x79, 0x3e, 0x51, 0xfe, 0x7c, 0x06, 0x73, 0x49, 0x49, 0x4f, 0x5a, 0x22, - 0x36, 0x8f, 0x30, 0x8a, 0xef, 0x84, 0xd6, 0x15, 0x26, 0x48, 0xe7, 0x1e, - 0xb1, 0xaa, 0x82, 0xd0, 0xc7, 0x0b, 0x97, 0x7b, 0x6c, 0x2d, 0x49, 0x7e, - 0x6d, 0xe7, 0xa3, 0x05, 0x80, 0xd7, 0x42, 0xa9, 0xc6, 0x66, 0x98, 0x30, - 0xe3, 0x8a, 0x79, 0x86, 0x9c, 0x2b, 0xbc, 0x4a, 0xe6, 0x0d, 0xc5, 0xe5, - 0x1a, 0x92, 0xd9, 0xef, 0x63, 0x52, 0x03, 0x88, 0x36, 0xc5, 0x83, 0x65, - 0xf8, 0xf1, 0x87, 0xce, 0x43, 0xfe, 0x89, 0x58, 0x07, 0x6a, 0xad, 0x85, - 0x37, 0x0f, 0xdf, 0x9e, 0xa5, 0x62, 0xa9, 0xd2, 0x41, 0x3f, 0x7f, 0xb7, - 0xf1, 0xe2, 0x58, 0xb5, 0xda, 0xdf, 0xd1, 0xba, 0x36, 0x2c, 0xe7, 0x43, - 0x31, 0x07, 0xc5, 0xf5, 0x79, 0xc9, 0x31, 0xd7, 0x1d, 0x97, 0x57, 0x9a, - 0x8e, 0x3f, 0xac, 0x00, 0x49, 0x00, 0x2f, 0xad, 0xac, 0xe7, 0x65, 0x7c, - 0xbf, 0xec, 0x85, 0x57, 0xe6, 0xcc, 0x07, 0x34, 0x02, 0x36, 0xa8, 0x6a, - 0x9f, 0x3a, 0x9a, 0x2f, 0x34, 0x93, 0x1f, 0x7d, 0x38, 0x54, 0xe3, 0x54, - 0x54, 0xee, 0x84, 0x55, 0xe1, 0x0d, 0xc1, 0x08, 0x3e, 0x33, 0x9e, 0x2a, - 0xc3, 0x6a, 0x83, 0xc4, 0x75, 0xed, 0xbc, 0x5f, 0xd9, 0x04, 0xd7, 0x77, - 0x91, 0xb1, 0xa0, 0xf2, 0xef, 0x81, 0xb0, 0x8b, 0x53, 0x5f, 0x71, 0xec, - 0xa5, 0x0b, 0xbe, 0xf2, 0x92, 0x7e, 0x0a, 0x34, 0xeb, 0x5d, 0x65, 0xc7, - 0xa9, 0x44, 0x10, 0xfb, 0xd3, 0xef, 0xe1, 0xbc, 0x06, 0x65, 0x68, 0x22, - 0xfb, 0x43, 0x2c, 0xcf, 0x8e, 0x6a, 0x28, 0xdb, 0x0b, 0xf4, 0xaf, 0x01, - 0x65, 0x97, 0xd6, 0xe5, 0x91, 0x20, 0x13, 0x2c, 0xb1, 0xc2, 0xd3, 0xc3, - 0x76, 0x90, 0xf8, 0xcd, 0x00, 0xde, 0x93, 0xf8, 0x4e, 0xcc, 0xdc, 0xca, - 0x9a, 0xf0, 0xbd, 0x9b, 0xd6, 0x57, 0xb1, 0x13, 0xd9, 0xe0, 0xe1, 0x9e, - 0x21, 0x74, 0xa9, 0x76, 0xc0, 0x0c, 0xad, 0x4f, 0x5d, 0xfe, 0x23, 0x32, - 0x5a, 0x10, 0x75, 0x5b, 0x05, 0xdf, 0xdc, 0x5b, 0x94, 0xcb, 0xe1, 0x9f, - 0x13, 0x51, 0xf5, 0x50, 0x36, 0x3b, 0xf2, 0x90, 0x9c, 0x9a, 0xc8, 0x10, - 0x88, 0xa9, 0xec, 0x22, 0x1e, 0x96, 0x70, 0xe8, 0x9e, 0x69, 0xc1, 0x22, - 0xd9, 0x14, 0x15, 0x2e, 0xbc, 0x03, 0x96, 0x9e, 0x1d, 0x00, 0x10, 0x16, - 0x4f, 0x56, 0xf0, 0x29, 0x47, 0x0a, 0x45, 0x34, 0x27, 0x21, 0x3b, 0x67, - 0x33, 0xf9, 0xdd, 0x29, 0x3a, 0xf2, 0xe4, 0x56, 0x34, 0x46, 0xbe, 0xd8, - 0x42, 0x29, 0x11, 0x7f, 0x30, 0xc1, 0xbe, 0xa5, 0xc8, 0x9d, 0x7b, 0x2e, - 0x4e, 0xcf, 0xba, 0x91, 0xb4, 0xbf, 0x0a, 0x04, 0x00, 0x49, 0x83, 0x6b, - 0x46, 0x5f, 0x3b, 0xfa, 0xf7, 0x40, 0x8d, 0x85, 0x47, 0x14, 0x58, 0xb3, - 0xa5, 0x66, 0x30, 0xfd, 0x4a, 0x80, 0xa4, 0x61, 0x3b, 0x7c, 0xb4, 0xcc, - 0x34, 0x8c, 0xc6, 0xb6, 0x10, 0xa9, 0x76, 0xc9, 0x11, 0xd7, 0x8a, 0x51, - 0x86, 0x17, 0x89, 0x28, 0xab, 0xd5, 0x03, 0x88, 0x74, 0x5b, 0x81, 0xbd, - 0x3a, 0x57, 0xfe, 0x66, 0x25, 0xd0, 0x92, 0x15, 0x84, 0x02, 0x0f, 0x51, - 0xa8, 0x58, 0xcf, 0x77, 0x65, 0x10, 0x61, 0xe8, 0xe6, 0xab, 0xb1, 0xba, - 0x3b, 0x08, 0xd6, 0xba, 0x5f, 0xf5, 0x74, 0xc5, 0x07, 0x60, 0xfd, 0xd3, - 0xc8, 0x52, 0x4e, 0xdb, 0xc3, 0xe3, 0x6d, 0x81, 0x20, 0x51, 0x01, 0x9a, - 0x5e, 0x32, 0x4e, 0x80, 0x5a, 0xcb, 0x83, 0xd7, 0xa4, 0xd9, 0xfb, 0xed, - 0x3d, 0x80, 0xa1, 0x83, 0x81, 0x91, 0xc0, 0x0b, 0xff, 0x67, 0xd8, 0x8b, - 0xd0, 0x12, 0x0b, 0xd4, 0x2b, 0x8e, 0x0d, 0x0f, 0xfc, 0xc7, 0xb3, 0xf1, - 0xe3, 0xf3, 0x5e, 0x0c, 0xb6, 0x6b, 0x9d, 0xdc, 0x22, 0x70, 0x31, 0x54, - 0xe8, 0x41, 0xfe, 0xa1, 0xe1, 0x4f, 0xfa, 0x81, 0xfb, 0xae, 0x72, 0x16, - 0xb8, 0x87, 0xc9, 0x31, 0x9d, 0x42, 0x47, 0x4a, 0x20, 0xae, 0x63, 0x16, - 0x0d, 0xfa, 0xf1, 0x27, 0x19, 0x47, 0xee, 0x45, 0x84, 0x29, 0x9a, 0xb6, - 0x42, 0xef, 0xbd, 0x15, 0xa8, 0x34, 0x33, 0x38, 0x9c, 0x9d, 0xbb, 0x5c, - 0x03, 0xf3, 0xcf, 0xcf, 0x6d, 0x2e, 0xd5, 0x88, 0xf8, 0xdd, 0xfc, 0xc0, - 0x4a, 0xdb, 0x69, 0xd9, 0x62, 0x89, 0x24, 0x46, 0xee, 0xa4, 0xb9, 0x95, - 0xe6, 0xaf, 0x7d, 0x53, 0xec, 0x41, 0xae, 0x70, 0xfe, 0x4f, 0x31, 0xe3, - 0xa2, 0x59, 0x2c, 0xa1, 0x53, 0x8b, 0xb6, 0x3b, 0x39, 0xc1, 0xa4, 0xa7, - 0x9e, 0xaa, 0x00, 0x60, 0x9a, 0x5f, 0x56, 0x51, 0xf3, 0x7b, 0x28, 0x84, - 0x36, 0x1a, 0xc1, 0x2d, 0xc8, 0xed, 0xf8, 0x48, 0x48, 0x1d, 0x39, 0x4d, - 0x3d, 0xce, 0x30, 0x90, 0x29, 0x33, 0x6f, 0x9a, 0xce, 0x58, 0xe7, 0x88, - 0xac, 0x59, 0xce, 0x85, 0x5a, 0x52, 0x2b, 0x6c, 0xb7, 0xe9, 0x2e, 0xa9, - 0xd9, 0x9a, 0xea, 0x1c, 0x47, 0xb2, 0x59, 0xff, 0x73, 0x76, 0x21, 0x40, - 0xe1, 0xde, 0x32, 0xb8, 0x73, 0x3d, 0xa5, 0x44, 0x66, 0x79, 0xa1, 0xfe, - 0xaf, 0xf6, 0x8a, 0x97, 0x09, 0x5c, 0x8b, 0x64, 0x38, 0x9f, 0xe1, 0x59, - 0x38, 0x18, 0xe9, 0xc0, 0xd6, 0xa2, 0xac, 0x74, 0xa9, 0xfd, 0x4a, 0x0d, - 0xf6, 0x47, 0x00, 0x2b, 0x09, 0x46, 0x38, 0x1c, 0xa4, 0x9f, 0x63, 0x20, - 0x18, 0x75, 0x5a, 0xb8, 0xc4, 0xbc, 0xd6, 0x6b, 0xc8, 0x14, 0x72, 0x03, - 0xe4, 0x05, 0xd4, 0x4e, 0x66, 0x20, 0x42, 0xa2, 0x8f, 0x96, 0xe7, 0xaf, - 0xd3, 0xfb, 0xa8, 0x88, 0x9b, 0xe3, 0xaa, 0xcd, 0xab, 0xce, 0x8f, 0x07, - 0x6d, 0xef, 0x98, 0xce, 0xdb, 0x42, 0x5b, 0xf4, 0x61, 0x57, 0x62, 0x27, - 0x8a, 0x53, 0x5e, 0xf8, 0x3e, 0xf6, 0x7f, 0xde, 0x5e, 0x3b, 0x1b, 0x13, - 0x2e, 0x30, 0x46, 0x4b, 0x6b, 0xb7, 0xbb, 0x33, 0x31, 0xc0, 0xfa, 0x40, - 0xab, 0x68, 0x72, 0xe3, 0x92, 0x30, 0x47, 0xd6, 0x30, 0x60, 0x42, 0x5b, - 0x88, 0x8d, 0xa6, 0x56, 0xe4, 0xac, 0x33, 0x2e, 0xca, 0x05, 0x1f, 0x60, - 0xaf, 0xde, 0x7f, 0xa9, 0xda, 0x3f, 0xa8, 0x21, 0xf6, 0xfc, 0x98, 0x7d, - 0xc4, 0x1e, 0xb0, 0xa9, 0x56, 0x2d, 0x8d, 0xea, 0x03, 0x51, 0x48, 0xac, - 0xe8, 0x22, 0xc7, 0x8b, 0xef, 0x91, 0x0e, 0xcf, 0x0c, 0xe9, 0x38, 0x43, - 0x99, 0xa8, 0x98, 0x4f, 0xfa, 0xe3, 0x03, 0xa6, 0x4f, 0xd4, 0x0d, 0x98, - 0x5b, 0x50, 0x28, 0xd7, 0xe7, 0x46, 0xd7, 0xad, 0x43, 0xb8, 0x56, 0x2a, - 0x2f, 0x7c, 0x39, 0x67, 0xf4, 0x62, 0x0e, 0xc0, 0xa8, 0x87, 0xb5, 0x81, - 0xe2, 0x13, 0x9f, 0xe4, 0xdd, 0x72, 0xf2, 0x07, 0xca, 0xac, 0x6d, 0xb2, - 0x96, 0x53, 0x5a, 0x8f, 0x66, 0x3c, 0xb4, 0xc1, 0x4f, 0x9a, 0x82, 0x55, - 0xcf, 0x0e, 0x27, 0x5f, 0xc7, 0xd2, 0x28, 0x27, 0x7f, 0x22, 0x6e, 0xa5, - 0xe7, 0x32, 0x56, 0x51, 0x18, 0xe0, 0x85, 0x6d, 0x1f, 0xfc, 0x25, 0x08, - 0x18, 0x60, 0x57, 0xfc, 0x66, 0x94, 0x2c, 0x4c, 0xbe, 0x00, 0xab, 0x9e, - 0x73, 0x9b, 0x06, 0xd3, 0xb5, 0x24, 0xa8, 0x8f, 0xb1, 0x33, 0x99, 0x4c, - 0xb4, 0x13, 0x07, 0xcd, 0x04, 0xdd, 0x77, 0xdc, 0xee, 0x96, 0x02, 0x59, - 0xe8, 0x22, 0x07, 0x16, 0x2e, 0x41, 0xc9, 0xc4, 0x59, 0x70, 0x37, 0x0f, - 0x14, 0xc9, 0xcf, 0x90, 0x57, 0xc2, 0x0d, 0xa3, 0xd7, 0x66, 0xb6, 0x7d, - 0x10, 0xd4, 0xfc, 0x18, 0x66, 0xad, 0xea, 0x5e, 0x64, 0x6c, 0x12, 0x66, - 0x3d, 0x96, 0xa5, 0xa8, 0x9c, 0x49, 0x5c, 0xd4, 0x8d, 0x1c, 0xc3, 0x38, - 0xfe, 0x53, 0xc2, 0x71, 0xd1, 0xc6, 0x41, 0xe2, 0xb9, 0x17, 0x74, 0x6e, - 0xcc, 0xf8, 0x72, 0x28, 0x38, 0x4e, 0x54, 0x9b, 0x0e, 0xa3, 0x3a, 0x43, - 0x5c, 0xd5, 0x83, 0x06, 0xbb, 0x46, 0x16, 0x6e, 0xe3, 0x8a, 0xd5, 0x1e, - 0x7f, 0x88, 0x62, 0xac, 0x35, 0x89, 0xfb, 0xbe, 0x96, 0x1d, 0x87, 0x37, - 0xb7, 0x91, 0x63, 0xae, 0x77, 0x7b, 0x66, 0x60, 0xc1, 0x3e, 0x80, 0x56, - 0xb1, 0xc8, 0x0d, 0x16, 0xde, 0x38, 0x82, 0x66, 0x99, 0x2b, 0x35, 0xd8, - 0xb4, 0x5b, 0x4b, 0x3e, 0x93, 0x96, 0x59, 0xf8, 0x96, 0x7e, 0x7b, 0x27, - 0xf4, 0x62, 0xb7, 0xda, 0x89, 0xa7, 0x34, 0x47, 0xed, 0xb3, 0x42, 0x20, - 0xeb, 0xcd, 0xf6, 0xa3, 0x9f, 0xf7, 0x48, 0x91, 0x17, 0xd2, 0x21, 0xed, - 0x5a, 0x22, 0x39, 0xc9, 0x76, 0x95, 0x36, 0xd9, 0x97, 0x0f, 0x19, 0xce, - 0xd3, 0xbc, 0x74, 0x7d, 0x53, 0x37, 0x3b, 0x4a, 0x97, 0xb7, 0xf8, 0x7e, - 0xdd, 0x4c, 0x5f, 0xae, 0x5c, 0x0b, 0xab, 0x4c, 0x34, 0xa1, 0x7e, 0x34, - 0x35, 0xf4, 0xfc, 0x92, 0xab, 0x2e, 0x6a, 0x15, 0xce, 0x84, 0xae, 0x70, - 0xae, 0x85, 0x21, 0xe6, 0x41, 0x13, 0x31, 0xe0, 0x8f, 0xab, 0x82, 0xe3, - 0x09, 0xaf, 0xa4, 0x7c, 0xb4, 0xb9, 0xb7, 0xc0, 0x67, 0x08, 0xc9, 0x9d, - 0xcd, 0x0b, 0x3c, 0xa0, 0x0c, 0xde, 0x49, 0x2f, 0x40, 0x19, 0x95, 0x64, - 0xb9, 0x7c, 0x2a, 0x72, 0xdd, 0xa2, 0x92, 0x0a, 0x21, 0xeb, 0x8c, 0xc3, - 0x6d, 0x52, 0xe7, 0x05, 0x50, 0x01, 0x55, 0x19, 0x2f, 0xbd, 0x1b, 0x72, - 0x73, 0xfe, 0x82, 0x9f, 0xbf, 0xa0, 0xfe, 0x19, 0x7c, 0x42, 0x6d, 0x76, - 0x32, 0x47, 0x36, 0x15, 0x2e, 0xde, 0xe8, 0xe6, 0xca, 0x07, 0xa3, 0x6b, - 0x40, 0x99, 0x96, 0xcd, 0x19, 0xea, 0x7e, 0xc9, 0x87, 0x9d, 0x3d, 0xa0, - 0x82, 0x88, 0xe7, 0xe4, 0x34, 0x9f, 0xa5, 0x27, 0xdf, 0xae, 0x03, 0x37, - 0xa8, 0x35, 0x64, 0x02, 0x09, 0x09, 0x9e, 0xec, 0x38, 0x0a, 0xff, 0x79, - 0x8c, 0x9a, 0x87, 0x66, 0xcd, 0xe4, 0xf4, 0x9d, 0xa9, 0x07, 0x96, 0x36, - 0xae, 0x2e, 0x4e, 0xc5, 0xe9, 0x86, 0xb2, 0x8e, 0x71, 0x5d, 0xe8, 0xee, - 0x84, 0xf3, 0x30, 0x2a, 0x58, 0x1a, 0x80, 0xb8, 0xaa, 0xb8, 0x1d, 0xc4, - 0xae, 0x59, 0x91, 0xf3, 0x16, 0x9b, 0xa3, 0x8a, 0xa3, 0x26, 0xb2, 0x0a, - 0xe5, 0x58, 0xb7, 0x96, 0x87, 0xfb, 0x00, 0xe4, 0x50, 0x7c, 0xb1, 0x77, - 0x3a, 0x18, 0xc2, 0xe3, 0xc1, 0x12, 0xa6, 0x0d, 0x06, 0xeb, 0x80, 0x6c, - 0x5a, 0xee, 0x34, 0xcc, 0x1c, 0x87, 0x35, 0x46, 0x1d, 0x05, 0x83, 0xd8, - 0x91, 0x22, 0xaa, 0xf6, 0xad, 0x87, 0xab, 0x76, 0x18, 0x79, 0xe2, 0x09, - 0xc3, 0xa3, 0x15, 0x67, 0x3a, 0x7c, 0x0f, 0xa0, 0x4c, 0x7b, 0xfc, 0xfc, - 0xdd, 0x5c, 0xe4, 0x86, 0x58, 0x13, 0xb8, 0x97, 0xae, 0x8c, 0x75, 0xc8, - 0x02, 0x1e, 0x33, 0x45, 0xa9, 0x54, 0x09, 0x15, 0x53, 0x4f, 0x28, 0x47, - 0x4d, 0x5f, 0xd0, 0xc7, 0x09, 0xbd, 0x93, 0xb0, 0x08, 0x79, 0x05, 0xbc, - 0xbc, 0xaf, 0x2c, 0xbd, 0xbb, 0x21, 0xd1, 0x60, 0xb8, 0x81, 0x4c, 0x6c, - 0x5e, 0x45, 0x39, 0xa3, 0x31, 0x54, 0xb7, 0x82, 0xef, 0x86, 0xe4, 0x5e, - 0xca, 0xd6, 0xb8, 0x31, 0xa2, 0x4c, 0x84, 0x5b, 0xac, 0xe5, 0x29, 0xbf, - 0xbf, 0x89, 0xb4, 0x4c, 0xd3, 0x69, 0x66, 0x50, 0xeb, 0xda, 0x7d, 0x00, - 0xbb, 0x45, 0x0f, 0xe1, 0xd1, 0x30, 0x1a, 0xc6, 0x94, 0x66, 0xdc, 0x01, - 0x75, 0xce, 0xf8, 0xfc, 0xd9, 0xce, 0xcf, 0x1f, 0x9e, 0x5a, 0x55, 0xa4, - 0x3e, 0xe6, 0x51, 0xc7, 0x74, 0x40, 0x82, 0x09, 0xea, 0xa0, 0xf5, 0xb2, - 0x70, 0x9f, 0x0e, 0xfb, 0x46, 0x8a, 0x69, 0xbf, 0x07, 0x92, 0xdc, 0x74, - 0x03, 0x70, 0xc6, 0x44, 0x81, 0x66, 0x40, 0xc7, 0xf5, 0xb8, 0xf0, 0x45, - 0x0f, 0xca, 0xd8, 0xb0, 0x9e, 0x48, 0x94, 0xff, 0x85, 0xcb, 0x7b, 0xec, - 0x67, 0x5d, 0xfe, 0xe9, 0x13, 0xd1, 0x67, 0x95, 0xd9, 0x35, 0x9e, 0x8a, - 0x53, 0x4d, 0x6b, 0x9d, 0x42, 0x53, 0xb1, 0x6b, 0x51, 0x1e, 0x35, 0x40, - 0x81, 0x92, 0x91, 0x5f, 0x1f, 0x8e, 0xbe, 0x37, 0xd3, 0x85, 0xab, 0x85, - 0x37, 0x1c, 0x0f, 0xae, 0xd9, 0xf7, 0xa2, 0x75, 0x3d, 0xd9, 0xd7, 0x2a, - 0x80, 0xb0, 0x4c, 0x14, 0x04, 0x40, 0xc5, 0xba, 0x0e, 0xbe, 0xab, 0xcc, - 0x38, 0x35, 0x62, 0x6c, 0xa5, 0xce, 0x49, 0x15, 0x2a, 0x10, 0xb5, 0x6a, - 0xd2, 0x3b, 0xd2, 0x6a, 0xad, 0x2e, 0x34, 0x46, 0x8b, 0x78, 0x57, 0x6e, - 0xc4, 0xde, 0x65, 0x68, 0x05, 0x8f, 0xd6, 0x6e, 0x34, 0xb9, 0xaa, 0x80, - 0x77, 0xff, 0x6c, 0x1a, 0x37, 0x87, 0xdd, 0x33, 0x13, 0x33, 0xa7, 0xa9, - 0x3a, 0x90, 0x32, 0x7b, 0x9b, 0x21, 0x31, 0xc8, 0xf5, 0x4c, 0xa6, 0x73, - 0x42, 0x79, 0x46, 0x14, 0x1b, 0xef, 0xf4, 0x78, 0xd9, 0x7e, 0x6f, 0x31, - 0xaa, 0x59, 0x97, 0x34, 0xe5, 0xe6, 0x67, 0xf3, 0x86, 0xf5, 0x61, 0xe7, - 0x51, 0x6d, 0xce, 0xb3, 0xdc, 0x86, 0xc7, 0x55, 0x43, 0xfa, 0x38, 0x78, - 0xb0, 0x8d, 0x03, 0x9c, 0xe4, 0x6c, 0xca, 0x73, 0x94, 0xa1, 0x0c, 0xb8, - 0x11, 0xda, 0x0c, 0x0b, 0x18, 0x1b, 0xd0, 0x99, 0xe7, 0xa9, 0x0d, 0xc3, - 0x36, 0xd7, 0x8c, 0x16, 0xad, 0x16, 0x1f, 0xb2, 0x3c, 0x07, 0x32, 0x11, - 0x6c, 0xd2, 0x8f, 0x33, 0x37, 0x5c, 0x3e, 0x4f, 0x7a, 0x76, 0xf7, 0x85, - 0xcc, 0x68, 0x1a, 0xf9, 0x26, 0x74, 0x42, 0xc9, 0xea, 0x21, 0x7e, 0x74, - 0x3c, 0x4f, 0xde, 0xfb, 0xd7, 0x83, 0x62, 0x12, 0xc7, 0x4f, 0xfc, 0x47, - 0x18, 0x9d, 0xc5, 0xf5, 0xe9, 0xd7, 0xaa, 0x76, 0x20, 0x99, 0x79, 0xae, - 0x9b, 0x7a, 0xde, 0x8b, 0x95, 0xc2, 0xa5, 0xa3, 0x6a, 0x30, 0x9b, 0x99, - 0x63, 0x34, 0x7c, 0xd1, 0x53, 0xa1, 0x6c, 0xd6, 0xed, 0x7d, 0x8c, 0xba, - 0xc8, 0x21, 0xf3, 0xe1, 0x31, 0x55, 0x3d, 0x88, 0x87, 0x04, 0xc7, 0xc9, - 0x65, 0x0c, 0x53, 0x1e, 0xd4, 0xd9, 0xaa, 0xda, 0xc2, 0x14, 0x88, 0xf2, - 0x07, 0x2c, 0x12, 0x4d, 0x79, 0x54, 0xaa, 0xd9, 0x47, 0x95, 0xf9, 0x7e, - 0x26, 0x89, 0x4b, 0x63, 0x7e, 0x44, 0x06, 0x0e, 0xe2, 0x8d, 0x9a, 0x0a, - 0xc3, 0xee, 0x55, 0x13, 0x55, 0x04, 0xcc, 0xb5, 0x2e, 0xa0, 0x0d, 0xec, - 0x76, 0x84, 0xc1, 0x1e, 0xdd, 0xe6, 0xfa, 0x54, 0x6e, 0x38, 0x30, 0x6f, - 0xcc, 0xa4, 0x8d, 0x76, 0x1e, 0xa3, 0x8e, 0x2c, 0x5e, 0x37, 0xeb, 0x0b, - 0xf4, 0xb5, 0x80, 0xde, 0x58, 0x13, 0x5a, 0x52, 0xdc, 0x65, 0x99, 0x1a, - 0x1b, 0x75, 0x0c, 0xbd, 0x83, 0xe8, 0x90, 0x8e, 0xa9, 0xbf, 0x42, 0x22, - 0xe1, 0x3a, 0x31, 0x4e, 0x54, 0xad, 0xd4, 0x6f, 0x80, 0xb4, 0xb5, 0x82, - 0x05, 0x20, 0xd7, 0x38, 0xd7, 0xeb, 0x25, 0x33, 0xe9, 0x4b, 0xc3, 0x5e, - 0xd1, 0x11, 0xb0, 0xd9, 0x8e, 0x90, 0x48, 0x2a, 0xe3, 0xa0, 0x60, 0x16, - 0x70, 0xe3, 0xd1, 0x45, 0x11, 0x64, 0x91, 0x69, 0x87, 0x1c, 0xbb, 0x91, - 0xc4, 0x43, 0x12, 0x62, 0x99, 0x69, 0xe5, 0x96, 0x01, 0x15, 0xdb, 0xdf, - 0x05, 0x55, 0x34, 0xbb, 0xd6, 0x76, 0x89, 0xcd, 0xb5, 0x4f, 0x2e, 0xa7, - 0x6e, 0x15, 0xc9, 0xc0, 0x8e, 0xa8, 0x63, 0x79, 0x12, 0xfb, 0x7e, 0x69, - 0x8f, 0x52, 0x5e, 0xe7, 0x76, 0x16, 0x28, 0x76, 0xca, 0xcb, 0xd8, 0x0e, - 0x4a, 0x93, 0x9d, 0x16, 0x68, 0x98, 0xf8, 0xc3, 0x39, 0xb2, 0x2d, 0xea, - 0xba, 0x72, 0x16, 0x33, 0xb7, 0xec, 0x61, 0x9e, 0x94, 0x32, 0x01, 0x22, - 0xde, 0x66, 0xfd, 0x68, 0xfa, 0xcf, 0xf2, 0x52, 0x4f, 0x02, 0xe8, 0x25, - 0xd3, 0xa3, 0x5b, 0x29, 0xae, 0xe9, 0x62, 0xfa, 0xd6, 0x1a, 0x50, 0x80, - 0x95, 0x96, 0xdf, 0x00, 0xfc, 0x23, 0xf1, 0x95, 0xef, 0xbb, 0xf5, 0x23, - 0x9d, 0x6b, 0xd6, 0xed, 0xb4, 0xe2, 0x4a, 0xf6, 0xb8, 0x20, 0x83, 0x6b, - 0x45, 0x92, 0x29, 0x5a, 0x02, 0xe9, 0xf7, 0x8e, 0x5c, 0x02, 0xde, 0xb4, - 0x9a, 0xdf, 0x18, 0x10, 0x17, 0x7f, 0xd8, 0x2e, 0x17, 0xc0, 0xf0, 0x6b, - 0x3b, 0x88, 0x09, 0x58, 0xf2, 0x18, 0x22, 0x09, 0x80, 0x4a, 0xe0, 0x51, - 0x6f, 0x7a, 0x70, 0x09, 0x1f, 0xe5, 0xfa, 0xa9, 0x4d, 0x24, 0x1f, 0x18, - 0x1c, 0x74, 0xcd, 0x87, 0x04, 0xfd, 0x85, 0x33, 0x4c, 0x28, 0xbd, 0xa3, - 0x66, 0x6c, 0x99, 0x7e, 0x50, 0x5e, 0xb5, 0x22, 0x33, 0x92, 0xd4, 0xd8, - 0x82, 0x4e, 0x38, 0xbe, 0xcb, 0x3d, 0x5f, 0x19, 0xd1, 0x0f, 0x8b, 0xa1, - 0x78, 0x08, 0x1c, 0x10, 0x0b, 0x77, 0xa7, 0x39, 0x2e, 0x91, 0x83, 0xee, - 0x1d, 0x36, 0xd8, 0x77, 0x87, 0x8a, 0x38, 0x45, 0x3c, 0xbd, 0xb9, 0x88, - 0xbb, 0x1b, 0x20, 0xd1, 0x95, 0xb9, 0x8f, 0x03, 0x46, 0xfa, 0xab, 0x70, - 0x68, 0x26, 0xd9, 0xb1, 0x25, 0x52, 0x5a, 0x77, 0x2d, 0x92, 0xc2, 0x1d, - 0xb6, 0x6e, 0xec, 0x67, 0xef, 0x34, 0xe2, 0x64, 0xb3, 0xa0, 0xae, 0x0c, - 0xd9, 0x36, 0xa1, 0xc7, 0xd8, 0xbf, 0x7a, 0x43, 0xbf, 0xc0, 0xc6, 0x90, - 0x60, 0x6a, 0x23, 0xc0, 0x6a, 0x5d, 0x62, 0x18, 0xac, 0xc1, 0x20, 0x35, - 0x17, 0xba, 0x4e, 0x54, 0xb7, 0xec, 0xd4, 0xad, 0x99, 0x94, 0xa4, 0xda, - 0x57, 0xe7, 0x46, 0xed, 0x47, 0xd1, 0xb4, 0xa2, 0x3e, 0x0f, 0x4a, 0xb6, - 0xa6, 0x68, 0x3e, 0x94, 0xb9, 0x18, 0x30, 0xe0, 0x75, 0x08, 0xe8, 0xf3, - 0x21, 0x79, 0x26, 0x68, 0x6a, 0x65, 0xb6, 0xbe, 0x03, 0x98, 0x8f, 0x04, - 0xad, 0x1e, 0xb0, 0x54, 0xd2, 0x28, 0xdd, 0x4a, 0xe9, 0xf3, 0xa0, 0x06, - 0xbf, 0x0b, 0x2a, 0xee, 0xf8, 0x03, 0x7e, 0x1d, 0x37, 0xc1, 0x32, 0xd1, - 0x41, 0xf4, 0x9b, 0xc5, 0x02, 0x10, 0x6f, 0x55, 0x5a, 0xec, 0x5b, 0xe7, - 0x61, 0x05, 0x17, 0xf0, 0xf8, 0xc6, 0x89, 0xe8, 0xad, 0x32, 0x57, 0x14, - 0xe5, 0xf8, 0xf5, 0x88, 0xd9, 0x73, 0x17, 0x10, 0xa7, 0xc3, 0xf8, 0x78, - 0x0b, 0x66, 0xab, 0x63, 0x4f, 0x96, 0x5d, 0xdf, 0x36, 0x83, 0xc4, 0x6f, - 0x20, 0xbd, 0xcb, 0x4c, 0xd2, 0xfa, 0x35, 0x87, 0xd8, 0xb6, 0xbb, 0xcc, - 0xb6, 0xd2, 0x85, 0x03, 0x6a, 0xea, 0xbb, 0x6d, 0x2f, 0xa2, 0x06, 0xc0, - 0xd6, 0x68, 0xd9, 0x7f, 0xd6, 0xa2, 0x3b, 0x08, 0x6a, 0x98, 0x26, 0x6d, - 0x9a, 0x2b, 0x68, 0x51, 0x78, 0xde, 0xa6, 0x96, 0x50, 0x7b, 0xfc, 0x03, - 0x43, 0xf8, 0x21, 0x01, 0x9d, 0xe2, 0x89, 0x65, 0x47, 0xae, 0x9c, 0x45, - 0x5e, 0xa5, 0xce, 0x97, 0xb3, 0xe6, 0xf6, 0xd4, 0x5a, 0xe8, 0x6b, 0x87, - 0xd6, 0xdf, 0xfb, 0x1f, 0xaf, 0xfb, 0xaf, 0x19, 0xa5, 0xfd, 0xba, 0xe0, - 0x22, 0x2f, 0x91, 0x97, 0xdf, 0xae, 0xe9, 0x39, 0xb1, 0xe4, 0xd3, 0x10, - 0xcb, 0xb3, 0x03, 0xb5, 0x0b, 0xf0, 0xd9, 0x70, 0x1e, 0x9c, 0x63, 0x6f, - 0x3a, 0xcf, 0x3c, 0x1b, 0x86, 0xa3, 0xad, 0x1a, 0xe7, 0x4c, 0x09, 0xd0, - 0x80, 0xf6, 0x8b, 0x72, 0x96, 0x53, 0x7e, 0x66, 0xfb, 0x7c, 0x7c, 0x8a, - 0xb0, 0x60, 0xa6, 0x4c, 0x20, 0xc4, 0x63, 0x69, 0x6a, 0xc3, 0x53, 0xf8, - 0x9a, 0x28, 0x30, 0x9d, 0x6f, 0x0e, 0x1b, 0xb2, 0x2c, 0xe6, 0x94, 0x9f, - 0xfc, 0xc0, 0x8d, 0x71, 0xbe, 0x37, 0xa6, 0xc9, 0xbd, 0x3c, 0x4a, 0xf3, - 0xc4, 0xb3, 0x88, 0x4c, 0x45, 0x26, 0x4e, 0x2f, 0x83, 0x16, 0x70, 0xb6, - 0xc7, 0xb2, 0x36, 0xf0, 0x0c, 0x67, 0xd2, 0x0a, 0xd3, 0xd9, 0x7c, 0x35, - 0x29, 0xac, 0xd4, 0x9c, 0x6d, 0xfc, 0xec, 0x58, 0x92, 0xf0, 0xba, 0x32, - 0x00, 0xae, 0xb1, 0xeb, 0x4d, 0x8c, 0x1a, 0x20, 0xe7, 0x5c, 0xfc, 0x9a, - 0x4d, 0x51, 0x24, 0x7b, 0x52, 0xeb, 0x13, 0x3d, 0xb4, 0xab, 0xda, 0xb3, - 0x74, 0x39, 0xd2, 0xf8, 0x2d, 0xef, 0x9b, 0x0f, 0xae, 0xf5, 0x3c, 0x99, - 0x34, 0xbe, 0x15, 0x5c, 0x9f, 0x5d, 0xae, 0xf4, 0x72, 0xc2, 0xac, 0x06, - 0xbe, 0xad, 0xe4, 0x68, 0xea, 0xd5, 0xa1, 0xdc, 0xdb, 0xf4, 0x61, 0x51, - 0xf5, 0x1a, 0x62, 0x15, 0xfd, 0x00, 0x51, 0x35, 0x53, 0x6c, 0x39, 0x3e, - 0xdb, 0x60, 0x0a, 0x52, 0xc1, 0x52, 0x3c, 0xd7, 0xab, 0x73, 0xea, 0x1e, - 0x38, 0x38, 0x65, 0x35, 0x35, 0x2b, 0x28, 0x04, 0x5c, 0x82, 0xea, 0x4a, - 0x9e, 0x96, 0x72, 0xa4, 0x8e, 0x42, 0xfd, 0x55, 0xa8, 0x66, 0x7a, 0x40, - 0xc9, 0xf2, 0xc2, 0x1e, 0x5d, 0x09, 0x90, 0x32, 0x18, 0xdb, 0x11, 0x4c, - 0x6c, 0x9c, 0x27, 0x62, 0x0a, 0xe6, 0xc1, 0xdf, 0xf2, 0x6a, 0x8c, 0x26, - 0xb4, 0xfb, 0xda, 0xa9, 0x08, 0x10, 0x3a, 0xf0, 0xe1, 0x64, 0xe5, 0x03, - 0x81, 0x7d, 0x15, 0x74, 0xa1, 0x8d, 0x10, 0xc8, 0xbb, 0x6a, 0x7c, 0x60, - 0xa1, 0x09, 0x35, 0x19, 0x2d, 0x70, 0xb5, 0x36, 0xc8, 0x8b, 0x66, 0x5f, - 0xe0, 0xe7, 0xea, 0x70, 0x2f, 0x5d, 0x3f, 0xae, 0x5e, 0x25, 0x84, 0xdd, - 0x9b, 0x69, 0x44, 0x37, 0x7c, 0x6b, 0x9e, 0x81, 0x18, 0x36, 0x4b, 0xff, - 0x86, 0x44, 0x2a, 0x39, 0x66, 0x7f, 0x71, 0x43, 0xe7, 0x65, 0xfe, 0xfd, - 0x34, 0xb9, 0xd9, 0x5a, 0x00, 0xd1, 0x41, 0x43, 0xc7, 0xbc, 0x65, 0x68, - 0xb7, 0x73, 0xff, 0x19, 0xd3, 0xed, 0x15, 0xa4, 0x67, 0xa1, 0x53, 0x0e, - 0xa6, 0xfb, 0x25, 0xce, 0x9d, 0x5b, 0x73, 0x08, 0xf3, 0x3b, 0x69, 0xe4, - 0x94, 0x9b, 0x94, 0x03, 0xb3, 0x8a, 0x2e, 0x07, 0x0c, 0xef, 0x18, 0x4c, - 0x2b, 0x1c, 0x83, 0x9f, 0x25, 0x20, 0x29, 0x72, 0x11, 0xa0, 0xaa, 0xed, - 0x0c, 0xf9, 0xce, 0x94, 0x0d, 0x7a, 0xb6, 0xb3, 0xa4, 0x57, 0xd6, 0x61, - 0xca, 0x1a, 0x0e, 0x89, 0x6d, 0x99, 0x4d, 0x06, 0xcd, 0x83, 0x7e, 0x09, - 0x14, 0x5b, 0xe7, 0x4c, 0x72, 0xa8, 0x98, 0xc8, 0x27, 0xf3, 0x70, 0x89, - 0x87, 0x11, 0xbb, 0x98, 0x82, 0x77, 0x9d, 0xaa, 0x95, 0x8c, 0xc1, 0xf8, - 0x39, 0x27, 0xd5, 0x64, 0x59, 0x6a, 0x8c, 0xbe, 0xe2, 0xe1, 0xd1, 0x6b, - 0xe3, 0xaf, 0x30, 0x6f, 0xf4, 0x9e, 0x35, 0x0b, 0x10, 0x24, 0x77, 0xd8, - 0xa4, 0x30, 0x2e, 0xf7, 0x97, 0xfd, 0xef, 0x1e, 0x9e, 0xf2, 0xbd, 0xf2, - 0x41, 0x73, 0x19, 0xe6, 0x7b, 0x7f, 0x74, 0x11, 0x91, 0x38, 0xc5, 0xac, - 0xd5, 0xb0, 0x48, 0xc4, 0xe9, 0x41, 0xd4, 0x50, 0x76, 0x13, 0xbf, 0xec, - 0xe8, 0x3a, 0xa8, 0x84, 0x42, 0x98, 0x12, 0x64, 0x95, 0x85, 0x79, 0x29, - 0xea, 0x3a, 0xf9, 0xa4, 0x5c, 0x9c, 0x35, 0x01, 0x68, 0x71, 0xb9, 0x5b, - 0xbe, 0xaa, 0x76, 0x9e, 0x63, 0x1c, 0xc1, 0x83, 0x94, 0xc6, 0x89, 0x2b, - 0x1d, 0x00, 0x43, 0x74, 0x00, 0x41, 0x93, 0x58, 0x52, 0xf9, 0x13, 0xfe, - 0x9f, 0x7a, 0xb7, 0x3d, 0x6b, 0x70, 0x4e, 0x4f, 0x8f, 0xf4, 0x9c, 0xe4, - 0x97, 0x62, 0xaf, 0x69, 0x45, 0xec, 0xf4, 0x53, 0x71, 0xdc, 0xc7, 0x8d, - 0x6f, 0xb2, 0x9d, 0xec, 0x43, 0xdd, 0xc0, 0xe5, 0xd1, 0x6c, 0x1a, 0x82, - 0x19, 0xf6, 0x18, 0xd3, 0x59, 0x0e, 0x07, 0x81, 0x5a, 0x23, 0x10, 0x8b, - 0xaa, 0x0b, 0x99, 0xc8, 0x34, 0xc2, 0xd0, 0xa9, 0x69, 0x7f, 0x54, 0xe3, - 0xc4, 0xa0, 0xe7, 0x4b, 0x31, 0x90, 0xe7, 0x3b, 0x45, 0x9b, 0x7f, 0xae, - 0xd2, 0xab, 0x22, 0xb9, 0xfc, 0x07, 0x39, 0x4b, 0x45, 0x83, 0x8d, 0x41, - 0x7a, 0x52, 0xb2, 0xae, 0x71, 0x78, 0x17, 0x63, 0xfa, 0xbe, 0x59, 0xca, - 0xf0, 0xfd, 0x68, 0xe5, 0xc4, 0x9a, 0x74, 0x3d, 0xec, 0xd4, 0x8b, 0xa1, - 0x2c, 0x31, 0x4d, 0x73, 0xfd, 0x5c, 0x1e, 0xeb, 0x5f, 0xf6, 0x42, 0x0d, - 0x79, 0x5f, 0x64, 0x10, 0xae, 0xb2, 0xf6, 0x9e, 0xa8, 0xab, 0xa5, 0x2b, - 0x9a, 0xcf, 0x25, 0xfa, 0xa2, 0xb3, 0xdc, 0x30, 0x3d, 0x08, 0x4e, 0xbb, - 0x7b, 0x0c, 0x28, 0x34, 0x9d, 0xda, 0xc4, 0x94, 0xa4, 0xf4, 0x1e, 0x78, - 0x8b, 0xa9, 0xd3, 0xa7, 0x1c, 0x2a, 0x27, 0x14, 0xa0, 0x44, 0x1a, 0x9a, - 0x87, 0x72, 0xa5, 0x6d, 0x69, 0x46, 0xe5, 0xc1, 0x4f, 0x29, 0x87, 0xc0, - 0xa7, 0xa8, 0x96, 0xde, 0xa9, 0x63, 0x08, 0xd8, 0x4a, 0xa1, 0x25, 0x43, - 0x76, 0x41, 0xf7, 0x9f, 0x17, 0xe3, 0xe1, 0x4b, 0xc6, 0x2b, 0x79, 0xea, - 0xd5, 0xa7, 0x72, 0x16, 0x0a, 0x8c, 0xcd, 0x49, 0x70, 0x75, 0xd4, 0x59, - 0x4a, 0x19, 0x7b, 0x31, 0x02, 0x7a, 0x3a, 0x20, 0x15, 0x62, 0x7e, 0x4e, - 0x6f, 0xac, 0xd0, 0xd1, 0x29, 0xbd, 0x2d, 0xa1, 0xc6, 0x3e, 0xa6, 0x1a, - 0x26, 0x18, 0x96, 0x98, 0x12, 0x56, 0x37, 0xbf, 0xb4, 0x91, 0x57, 0xe8, - 0xda, 0x61, 0x7c, 0x2f, 0x3e, 0xd4, 0x51, 0xfe, 0xe8, 0x5b, 0x00, 0x30, - 0x08, 0xf6, 0x4e, 0x69, 0xa8, 0x1a, 0x2b, 0x82, 0x41, 0x85, 0xa9, 0xd9, - 0x3c, 0xc8, 0x02, 0x91, 0x99, 0xd4, 0xa2, 0xfd, 0x9d, 0x1b, 0x08, 0xfc, - 0x41, 0x3e, 0x10, 0x6b, 0x80, 0x74, 0x3d, 0x72, 0x61, 0x97, 0xdd, 0x96, - 0xec, 0xf4, 0xd6, 0x6d, 0x68, 0x02, 0x6e, 0xbb, 0x55, 0x9d, 0x6f, 0x11, - 0xde, 0xd1, 0xad, 0x6d, 0x42, 0x96, 0x2c, 0x42, 0x1e, 0xa9, 0x19, 0x42, - 0x22, 0x38, 0x38, 0x18, 0x3c, 0x4b, 0xc1, 0x9c, 0x0f, 0xe1, 0x34, 0x61, - 0x06, 0x77, 0x54, 0x04, 0xe0, 0x87, 0x94, 0x5c, 0xc9, 0xa1, 0x35, 0x55, - 0x3d, 0x4a, 0xf2, 0x4f, 0x05, 0x11, 0x98, 0x6f, 0x3c, 0x85, 0x84, 0xe6, - 0xf8, 0x71, 0x8a, 0xdf, 0xe9, 0x9a, 0xe3, 0x70, 0xd6, 0x36, 0xd6, 0xc8, - 0x66, 0x3e, 0xba, 0x7c, 0x0a, 0x23, 0x0a, 0xd0, 0xb6, 0x66, 0x68, 0xa8, - 0xdf, 0x37, 0x17, 0xfb, 0xdd, 0x9c, 0x8b, 0xc7, 0x8e, 0xc4, 0x4f, 0x40, - 0x08, 0x23, 0x58, 0x15, 0xa2, 0xba, 0xef, 0xdf, 0x67, 0xcd, 0x1f, 0xb6, - 0xc4, 0xea, 0xce, 0x81, 0x38, 0x58, 0x92, 0x57, 0xcf, 0x83, 0x47, 0x29, - 0x9f, 0xde, 0x9b, 0xde, 0x01, 0xfe, 0x68, 0x91, 0x67, 0x06, 0x9d, 0x31, - 0xd0, 0xb9, 0xc3, 0xbb, 0xc3, 0x6b, 0xa0, 0x04, 0x1e, 0x34, 0xd5, 0x38, - 0xd4, 0xac, 0x70, 0xae, 0xab, 0xb2, 0xbd, 0x4b, 0xa0, 0xad, 0x2b, 0x82, - 0xaf, 0x8c, 0x90, 0x4d, 0xd3, 0xca, 0x71, 0x35, 0x75, 0x89, 0xe5, 0x42, - 0x91, 0x46, 0x8d, 0x18, 0x04, 0x7a, 0xb9, 0xaa, 0x3b, 0xe7, 0x1e, 0x8c, - 0x4e, 0xf9, 0x6e, 0x74, 0xaa, 0x2e, 0x36, 0x86, 0xfb, 0xef, 0x9c, 0xd7, - 0xba, 0x5e, 0x2e, 0x3c, 0x40, 0xce, 0x8b, 0x2b, 0x94, 0x55, 0xf2, 0xd4, - 0x7d, 0xbf, 0x8c, 0x8a, 0xa8, 0x59, 0x84, 0x6f, 0x32, 0x95, 0xc5, 0xcc, - 0xad, 0xee, 0x30, 0x23, 0x7c, 0x54, 0xea, 0x60, 0xb8, 0x88, 0x12, 0x45, - 0x03, 0xbc, 0xe3, 0x92, 0x9f, 0xa8, 0x5b, 0x07, 0x97, 0x53, 0x0d, 0xe1, - 0xe3, 0x3d, 0xdf, 0xf2, 0x2a, 0x12, 0xee, 0xdf, 0x73, 0x8d, 0x41, 0xf4, - 0xe4, 0x2c, 0xb4, 0xd4, 0x9e, 0xfe, 0xf2, 0xe6, 0xa0, 0x9e, 0x2a, 0x3a, - 0x36, 0x26, 0x7e, 0xd9, 0xe1, 0x22, 0xee, 0x0b, 0x5b, 0x48, 0xd2, 0xa9, - 0x55, 0xab, 0x50, 0x7c, 0xf6, 0xc8, 0x56, 0x31, 0xbb, 0x51, 0xe9, 0x31, - 0x4d, 0xaa, 0x13, 0x3a, 0x99, 0x9f, 0x8c, 0x59, 0x6a, 0xc9, 0xf1, 0x0a, - 0x89, 0xcc, 0x39, 0x98, 0xbd, 0xc3, 0x93, 0x97, 0x28, 0xe5, 0x73, 0x94, - 0xf2, 0x0a, 0x7a, 0x09, 0x38, 0x0b, 0xab, 0xd8, 0x49, 0x98, 0x14, 0x34, - 0x32, 0x9d, 0xef, 0x9d, 0x47, 0xdb, 0x82, 0xb9, 0x84, 0xd6, 0xd7, 0x9f, - 0xf7, 0xdf, 0x79, 0x5b, 0xe8, 0x92, 0x44, 0x31, 0x5d, 0x42, 0x80, 0x90, - 0x8d, 0x36, 0xa2, 0x39, 0x02, 0x64, 0x21, 0xa2, 0xb8, 0xfc, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xc8, 0xeb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0xd8, 0x03, 0x00, 0x00, 0xdc, 0x03, 0x00, 0x00, - 0xe0, 0x03, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xa8, 0x03, 0x00, 0x00, - 0x50, 0x03, 0x00, 0x00, 0x04, 0x03, 0x00, 0x00, 0xac, 0x02, 0x00, 0x00, - 0x74, 0x02, 0x00, 0x00, 0x2c, 0x02, 0x00, 0x00, 0xf4, 0x01, 0x00, 0x00, - 0xac, 0x01, 0x00, 0x00, 0x74, 0x01, 0x00, 0x00, 0x2c, 0x01, 0x00, 0x00, - 0xe4, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x9e, 0xfc, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x00, 0x00, 0x00, 0x5e, 0xfd, 0xff, 0xff, 0x1c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, - 0x96, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x88, 0xfd, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x31, 0x00, 0x00, 0x00, 0xca, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x78, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, - 0x2c, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x00, 0x00, 0x2e, 0x00, 0x00, 0x00, - 0x0e, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, - 0xbc, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x25, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x52, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x25, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x21, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x96, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x88, 0xfe, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, - 0x1e, 0x00, 0x00, 0x00, 0x1f, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x78, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x1d, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x19, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x0e, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, - 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x42, 0xff, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xf0, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x15, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x11, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x86, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x78, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xba, 0xff, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x68, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0d, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x14, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x14, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x07, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, - 0x34, 0x10, 0x00, 0x00, 0xd4, 0x0f, 0x00, 0x00, 0x7c, 0x0f, 0x00, 0x00, - 0x38, 0x0f, 0x00, 0x00, 0xdc, 0x0e, 0x00, 0x00, 0x74, 0x0e, 0x00, 0x00, - 0x2c, 0x0e, 0x00, 0x00, 0xe8, 0x0d, 0x00, 0x00, 0x90, 0x0d, 0x00, 0x00, - 0x48, 0x0d, 0x00, 0x00, 0x04, 0x0d, 0x00, 0x00, 0xc0, 0x0c, 0x00, 0x00, - 0x64, 0x0c, 0x00, 0x00, 0x0c, 0x0c, 0x00, 0x00, 0xc4, 0x0b, 0x00, 0x00, - 0x80, 0x0b, 0x00, 0x00, 0x28, 0x0b, 0x00, 0x00, 0xe0, 0x0a, 0x00, 0x00, - 0x9c, 0x0a, 0x00, 0x00, 0x58, 0x0a, 0x00, 0x00, 0xfc, 0x09, 0x00, 0x00, - 0xa4, 0x09, 0x00, 0x00, 0x5c, 0x09, 0x00, 0x00, 0x18, 0x09, 0x00, 0x00, - 0xc0, 0x08, 0x00, 0x00, 0x78, 0x08, 0x00, 0x00, 0x34, 0x08, 0x00, 0x00, - 0xf0, 0x07, 0x00, 0x00, 0x94, 0x07, 0x00, 0x00, 0x3c, 0x07, 0x00, 0x00, - 0xf4, 0x06, 0x00, 0x00, 0xb0, 0x06, 0x00, 0x00, 0x58, 0x06, 0x00, 0x00, - 0x10, 0x06, 0x00, 0x00, 0xcc, 0x05, 0x00, 0x00, 0x88, 0x05, 0x00, 0x00, - 0x2c, 0x05, 0x00, 0x00, 0xd4, 0x04, 0x00, 0x00, 0x8c, 0x04, 0x00, 0x00, - 0x48, 0x04, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0xa8, 0x03, 0x00, 0x00, - 0x50, 0x03, 0x00, 0x00, 0x08, 0x03, 0x00, 0x00, 0xc4, 0x02, 0x00, 0x00, - 0x80, 0x02, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0xcc, 0x01, 0x00, 0x00, - 0x84, 0x01, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, - 0x8c, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xb2, 0xf0, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0xfc, 0xf0, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x38, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0xf2, 0xf0, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x28, 0x00, 0x00, 0x00, 0x3c, 0xf1, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0a, 0xd7, 0x23, 0x3a, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x32, 0xf1, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x40, 0x00, 0x00, 0x00, - 0x24, 0xf1, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x37, 0x01, 0x00, 0x00, 0x00, - 0xc2, 0xff, 0x7f, 0x3f, 0x01, 0x00, 0x00, 0x00, 0xd2, 0x6f, 0x75, 0x36, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x8a, 0xf1, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xf1, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x06, 0x16, 0x49, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x87, 0x19, 0xb1, 0x40, 0x01, 0x00, 0x00, 0x00, 0x58, 0x80, 0xdf, 0xc0, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x3a, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1f, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x2c, 0xf2, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x5d, 0xd1, 0xce, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x7a, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x6c, 0xf2, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x23, 0x20, 0xb6, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x62, 0xf2, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x54, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0xa2, 0x5a, 0x91, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x47, 0xc9, 0x90, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x00, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, 0xac, 0xf2, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x81, 0xb7, 0xf1, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x9e, 0xb5, 0x71, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x20, 0x70, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x6a, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0x5c, 0xf3, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7a, 0x08, 0x97, 0x35, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xaa, 0xf3, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x24, 0x00, 0x00, 0x00, 0x9c, 0xf3, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0xf5, 0x1f, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xea, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0xdc, 0xf3, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xc7, 0xea, 0x1a, 0x3c, 0x02, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xd2, 0xf3, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xc4, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0xb2, 0x78, 0x3f, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x39, 0xb9, 0x3e, 0x41, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70, 0xf5, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x3c, 0x00, 0x00, 0x00, 0x1c, 0xf4, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x89, 0x25, 0xf2, 0x39, 0x01, 0x00, 0x00, 0x00, - 0xde, 0xdc, 0x1d, 0x41, 0x01, 0x00, 0x00, 0x00, 0xa5, 0x23, 0x72, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0xda, 0xf4, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0xcc, 0xf4, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x42, 0xe0, 0x90, 0x35, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x1a, 0xf5, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, - 0x0c, 0xf5, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x1a, 0x2a, 0x19, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x5a, 0xf5, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x4c, 0xf5, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xe9, 0x36, 0xdd, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x42, 0xf5, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x34, 0xf5, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0xdd, 0x43, 0x7e, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x99, 0x45, 0x7d, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xe0, 0xf6, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0x8c, 0xf5, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x5c, 0xfd, 0xa9, 0x39, 0x01, 0x00, 0x00, 0x00, 0x1e, 0xaa, 0x87, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x08, 0xfc, 0x29, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x4a, 0xf6, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x3c, 0xf6, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x55, 0xf7, 0x52, 0x35, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x8a, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, 0x7c, 0xf6, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xd0, 0xda, 0x1e, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xca, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0xbc, 0xf6, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x8e, 0x0b, 0xa8, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xb2, 0xf6, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0xa4, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x12, 0x1c, 0x6e, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0xdd, 0x4a, 0x00, 0x41, 0x01, 0x00, 0x00, 0x00, 0x31, 0xc6, 0xd9, 0xc0, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x62, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x54, 0xf7, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x9d, 0x16, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xa2, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x94, 0xf7, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xa4, 0x34, 0xab, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x8a, 0xf7, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x2e, 0x36, 0xe1, 0x3c, 0x01, 0x00, 0x00, 0x00, - 0xf8, 0x54, 0xe0, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x28, 0xf9, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, 0xd4, 0xf7, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xe1, 0xd0, 0xa2, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x9b, 0xcf, 0x22, 0x41, 0x01, 0x00, 0x00, 0x00, - 0xea, 0x23, 0x12, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x02, 0x00, 0x00, 0x92, 0xf8, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0x84, 0xf8, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x99, 0xd3, 0xf7, 0x34, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xd2, 0xf8, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x24, 0x00, 0x00, 0x00, 0xc4, 0xf8, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0xd5, 0xc2, 0x3a, - 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x12, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0x04, 0xf9, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x8f, 0x84, 0xa2, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xfa, 0xf8, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xec, 0xf8, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x64, 0xeb, 0x8e, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x3b, 0xf3, 0x17, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xb7, 0xc5, 0x04, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xaa, 0xf9, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x9c, 0xf9, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x92, 0xa8, 0x98, 0x39, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xea, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0xdc, 0xf9, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x58, 0x76, 0xb9, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xd2, 0xf9, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xc4, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x43, 0xb8, 0x52, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x8b, 0xe5, 0x51, 0x41, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70, 0xfb, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x3c, 0x00, 0x00, 0x00, 0x1c, 0xfa, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xe3, 0xa1, 0xf0, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x02, 0xa0, 0x70, 0x41, 0x01, 0x00, 0x00, 0x00, 0x87, 0x08, 0x65, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, - 0xda, 0xfa, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0xcc, 0xfa, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xcc, 0x98, 0x41, 0x35, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x1a, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, - 0x0c, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xed, 0xf5, 0xcd, 0x3a, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x5a, 0xfb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x4c, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x9d, 0xca, 0xd4, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x42, 0xfb, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x34, 0xfb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x58, 0x58, 0xce, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x49, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x06, 0x52, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xf2, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0xe4, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x9b, 0x9c, 0xe1, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x32, 0xfc, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x24, 0xfc, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xf8, 0xb6, 0xc3, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x1a, 0xfc, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x0c, 0xfc, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x94, 0x8d, 0x93, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x06, 0xfa, 0x92, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xb8, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0x64, 0xfc, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x7a, 0xf6, 0x5f, 0x3a, 0x01, 0x00, 0x00, 0x00, 0xba, 0xf4, 0xdf, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xf4, 0x7c, 0xcf, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x22, 0xfd, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x14, 0xfd, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x46, 0x2f, 0xc4, 0x35, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x62, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, 0x54, 0xfd, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x8f, 0x3f, 0xe0, 0x3a, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0xa2, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x94, 0xfd, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x25, 0xd7, 0xa9, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x8a, 0xfd, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0xc4, 0xf4, 0x39, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0xf4, 0x1f, 0xe3, 0x41, 0x01, 0x00, 0x00, 0x00, 0xaa, 0x55, 0x8f, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x3a, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x2c, 0xfe, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x8b, 0x00, 0x4b, 0x3a, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x7a, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x6c, 0xfe, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xd7, 0xdf, 0xc3, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x62, 0xfe, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x54, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x68, 0xa8, 0x04, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0xc0, 0x23, 0x04, 0x42, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0xbc, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x3b, 0xda, 0x75, 0x3b, 0x01, 0x00, 0x00, 0x00, 0x4f, 0xd8, 0xf5, 0x42, - 0x01, 0x00, 0x00, 0x00, 0xa8, 0x2a, 0x61, 0xc2, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x7a, 0xff, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x6c, 0xff, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xcf, 0x37, 0x69, 0x37, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xba, 0xff, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x28, 0x00, 0x00, 0x00, 0xac, 0xff, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x14, 0xd8, 0x72, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x30, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xd4, 0x42, 0x16, 0x3c, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x10, 0x00, 0x0c, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x4c, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xa8, 0x41, 0x5b, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x66, 0x66, 0x5a, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, - 0x0f, 0x00, 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, - 0xa4, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, - 0x5c, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, - 0x38, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x0b, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x96, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x72, - 0x9e, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x19, 0xa6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0xae, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, - 0xb6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, 0xbe, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x1b, 0xc6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0xce, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, 0xd6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0xde, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, - 0xe6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xfa, 0xff, 0xff, 0xff, - 0x00, 0x1b, 0x06, 0x00, 0x06, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x09, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x1b}; - -const unsigned int g_keyword_scrambled_model_data_length = 34520; diff --git a/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h b/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h deleted file mode 100644 index ce34426..0000000 --- a/examples/keyword_benchmark/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ -#define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ - -extern const unsigned char g_keyword_scrambled_model_data[]; -extern const unsigned int g_keyword_scrambled_model_data_length; - -#endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ diff --git a/examples/magic_wand/CMakeLists.txt b/examples/magic_wand/CMakeLists.txt deleted file mode 100644 index 634275a..0000000 --- a/examples/magic_wand/CMakeLists.txt +++ /dev/null @@ -1,185 +0,0 @@ - -cmake_minimum_required(VERSION 3.12) - -project(magic_wand C CXX ASM) -set(CMAKE_C_STANDARD 11) -set(CMAKE_CXX_STANDARD 11) - - -add_executable(gesture_predictor_test "") - -target_include_directories(gesture_predictor_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - gesture_predictor_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(gesture_predictor_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/gesture_predictor.cpp - ${CMAKE_CURRENT_LIST_DIR}/gesture_predictor_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/constants.h - ${CMAKE_CURRENT_LIST_DIR}/gesture_predictor.h -) - -target_link_libraries( - gesture_predictor_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(gesture_predictor_test) - - -add_executable(gesture_output_handler_test "") - -target_include_directories(gesture_output_handler_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - gesture_output_handler_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(gesture_output_handler_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/output_handler.cpp - ${CMAKE_CURRENT_LIST_DIR}/output_handler_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/output_handler.h -) - -target_link_libraries( - gesture_output_handler_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(gesture_output_handler_test) - - -add_executable(magic_wand "") - -target_include_directories(magic_wand - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - magic_wand - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(magic_wand - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/accelerometer_handler.cpp - ${CMAKE_CURRENT_LIST_DIR}/gesture_predictor.cpp - ${CMAKE_CURRENT_LIST_DIR}/magic_wand_model_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/main.cpp - ${CMAKE_CURRENT_LIST_DIR}/main_functions.cpp - ${CMAKE_CURRENT_LIST_DIR}/output_handler.cpp - ${CMAKE_CURRENT_LIST_DIR}/accelerometer_handler.h - ${CMAKE_CURRENT_LIST_DIR}/constants.h - ${CMAKE_CURRENT_LIST_DIR}/gesture_predictor.h - ${CMAKE_CURRENT_LIST_DIR}/magic_wand_model_data.h - ${CMAKE_CURRENT_LIST_DIR}/main_functions.h - ${CMAKE_CURRENT_LIST_DIR}/output_handler.h -) - -target_link_libraries( - magic_wand - pico-tflmicro - hardware_pwm -) - -pico_add_extra_outputs(magic_wand) - - -add_executable(magic_wand_test "") - -target_include_directories(magic_wand_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - magic_wand_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(magic_wand_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/magic_wand_model_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/magic_wand_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/ring_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/slope_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/magic_wand_model_data.h - ${CMAKE_CURRENT_LIST_DIR}/ring_micro_features_data.h - ${CMAKE_CURRENT_LIST_DIR}/slope_micro_features_data.h -) - -target_link_libraries( - magic_wand_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(magic_wand_test) - - -add_executable(gesture_accelerometer_handler_test "") - -target_include_directories(gesture_accelerometer_handler_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - gesture_accelerometer_handler_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(gesture_accelerometer_handler_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/accelerometer_handler.cpp - ${CMAKE_CURRENT_LIST_DIR}/accelerometer_handler_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/accelerometer_handler.h -) - -target_link_libraries( - gesture_accelerometer_handler_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(gesture_accelerometer_handler_test) - diff --git a/examples/magic_wand/accelerometer_handler.cpp b/examples/magic_wand/accelerometer_handler.cpp deleted file mode 100644 index e588ed8..0000000 --- a/examples/magic_wand/accelerometer_handler.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "accelerometer_handler.h" - -int begin_index = 0; - -TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter) { - return kTfLiteOk; -} - -bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, float* input, - int length) { - begin_index += 3; - // Reset begin_index to simulate behavior of loop buffer - if (begin_index >= 600) begin_index = 0; - // Only return true after the function was called 100 times, simulating the - // desired behavior of a real implementation (which does not return data until - // a sufficient amount is available) - if (begin_index > 300) { - for (int i = 0; i < length; ++i) input[i] = 0; - return true; - } else { - return false; - } -} diff --git a/examples/magic_wand/accelerometer_handler.h b/examples/magic_wand/accelerometer_handler.h deleted file mode 100644 index 5174cc0..0000000 --- a/examples/magic_wand/accelerometer_handler.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_ - -#define kChannelNumber 3 - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -extern int begin_index; -extern TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter); -extern bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, - float* input, int length); - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_ACCELEROMETER_HANDLER_H_ diff --git a/examples/magic_wand/accelerometer_handler_test.cpp b/examples/magic_wand/accelerometer_handler_test.cpp deleted file mode 100644 index 4d049cb..0000000 --- a/examples/magic_wand/accelerometer_handler_test.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "accelerometer_handler.h" - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestSetup) { - tflite::MicroErrorReporter micro_error_reporter; - TfLiteStatus setup_status = SetupAccelerometer(µ_error_reporter); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, setup_status); -} - -TF_LITE_MICRO_TEST(TestAccelerometer) { - float input[384] = {0.0}; - tflite::MicroErrorReporter micro_error_reporter; - // Test that the function returns false before insufficient data is available - bool inference_flag = ReadAccelerometer(µ_error_reporter, input, 384); - TF_LITE_MICRO_EXPECT_EQ(inference_flag, false); - - // Test that the function returns true once sufficient data is available to - // fill the model's input buffer (128 sets of values) - for (int i = 1; i <= 128; i++) { - inference_flag = ReadAccelerometer(µ_error_reporter, input, 384); - } - TF_LITE_MICRO_EXPECT_EQ(inference_flag, true); -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/magic_wand/constants.h b/examples/magic_wand/constants.h deleted file mode 100644 index 3f0da6c..0000000 --- a/examples/magic_wand/constants.h +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_ - -// The expected accelerometer data sample frequency -const float kTargetHz = 25; - -// What gestures are supported. -constexpr int kGestureCount = 4; -constexpr int kWingGesture = 0; -constexpr int kRingGesture = 1; -constexpr int kSlopeGesture = 2; -constexpr int kNoGesture = 3; - -// These control the sensitivity of the detection algorithm. If you're seeing -// too many false positives or not enough true positives, you can try tweaking -// these thresholds. Often, increasing the size of the training set will give -// more robust results though, so consider retraining if you are seeing poor -// predictions. -constexpr float kDetectionThreshold = 0.8f; -constexpr int kPredictionHistoryLength = 5; -constexpr int kPredictionSuppressionDuration = 25; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_CONSTANTS_H_ diff --git a/examples/magic_wand/gesture_predictor.cpp b/examples/magic_wand/gesture_predictor.cpp deleted file mode 100644 index 2313092..0000000 --- a/examples/magic_wand/gesture_predictor.cpp +++ /dev/null @@ -1,72 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "gesture_predictor.h" - -#include "constants.h" - -namespace { -// State for the averaging algorithm we're using. -float prediction_history[kGestureCount][kPredictionHistoryLength] = {}; -int prediction_history_index = 0; -int prediction_suppression_count = 0; -} // namespace - -// Return the result of the last prediction -// 0: wing("W"), 1: ring("O"), 2: slope("angle"), 3: unknown -int PredictGesture(float* output) { - // Record the latest predictions in our rolling history buffer. - for (int i = 0; i < kGestureCount; ++i) { - prediction_history[i][prediction_history_index] = output[i]; - } - // Figure out which slot to put the next predictions into. - ++prediction_history_index; - if (prediction_history_index >= kPredictionHistoryLength) { - prediction_history_index = 0; - } - - // Average the last n predictions for each gesture, and find which has the - // highest score. - int max_predict_index = -1; - float max_predict_score = 0.0f; - for (int i = 0; i < kGestureCount; i++) { - float prediction_sum = 0.0f; - for (int j = 0; j < kPredictionHistoryLength; ++j) { - prediction_sum += prediction_history[i][j]; - } - const float prediction_average = prediction_sum / kPredictionHistoryLength; - if ((max_predict_index == -1) || (prediction_average > max_predict_score)) { - max_predict_index = i; - max_predict_score = prediction_average; - } - } - - // If there's been a recent prediction, don't trigger a new one too soon. - if (prediction_suppression_count > 0) { - --prediction_suppression_count; - } - // If we're predicting no gesture, or the average score is too low, or there's - // been a gesture recognised too recently, return no gesture. - if ((max_predict_index == kNoGesture) || - (max_predict_score < kDetectionThreshold) || - (prediction_suppression_count > 0)) { - return kNoGesture; - } else { - // Reset the suppression counter so we don't come up with another prediction - // too soon. - prediction_suppression_count = kPredictionSuppressionDuration; - return max_predict_index; - } -} diff --git a/examples/magic_wand/gesture_predictor.h b/examples/magic_wand/gesture_predictor.h deleted file mode 100644 index 713cb56..0000000 --- a/examples/magic_wand/gesture_predictor.h +++ /dev/null @@ -1,21 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_ - -extern int PredictGesture(float* output); - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_GESTURE_PREDICTOR_H_ diff --git a/examples/magic_wand/gesture_predictor_test.cpp b/examples/magic_wand/gesture_predictor_test.cpp deleted file mode 100644 index 0eb9635..0000000 --- a/examples/magic_wand/gesture_predictor_test.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "gesture_predictor.h" - -#include "constants.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(SuccessfulPrediction) { - // Use the threshold from the 0th gesture. - float probabilities[kGestureCount] = {kDetectionThreshold, 0.0, 0.0, 0.0}; - int prediction; - // Loop just too few times to trigger a prediction. - for (int i = 0; i < kPredictionHistoryLength - 1; i++) { - prediction = PredictGesture(probabilities); - TF_LITE_MICRO_EXPECT_EQ(prediction, kNoGesture); - } - // Call once more, triggering a prediction - // for category 0. - prediction = PredictGesture(probabilities); - TF_LITE_MICRO_EXPECT_EQ(prediction, 0); -} - -TF_LITE_MICRO_TEST(FailPartWayThere) { - // Use the threshold from the 0th gesture. - float probabilities[kGestureCount] = {kDetectionThreshold, 0.0, 0.0, 0.0}; - int prediction; - // Loop just too few times to trigger a prediction. - for (int i = 0; i <= kPredictionHistoryLength - 1; i++) { - prediction = PredictGesture(probabilities); - TF_LITE_MICRO_EXPECT_EQ(prediction, kNoGesture); - } - // Call with a different prediction, triggering a failure. - probabilities[0] = 0.0; - probabilities[2] = 1.0; - prediction = PredictGesture(probabilities); - TF_LITE_MICRO_EXPECT_EQ(prediction, kNoGesture); -} - -TF_LITE_MICRO_TEST(InsufficientProbability) { - // Just below the detection threshold. - float probabilities[kGestureCount] = {kDetectionThreshold - 0.1f, 0.0, 0.0, - 0.0}; - int prediction; - // Loop the exact right number of times - for (int i = 0; i <= kPredictionHistoryLength; i++) { - prediction = PredictGesture(probabilities); - TF_LITE_MICRO_EXPECT_EQ(prediction, kNoGesture); - } -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/magic_wand/magic_wand_model_data.cpp b/examples/magic_wand/magic_wand_model_data.cpp deleted file mode 100644 index ef2c452..0000000 --- a/examples/magic_wand/magic_wand_model_data.cpp +++ /dev/null @@ -1,1659 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Automatically created from a TensorFlow Lite flatbuffer using the command: -// xxd -i magic_wand_model.tflite > magic_wand_model_data.cc -// See the README for a full description of the creation process. - -#include "magic_wand_model_data.h" - -// Keep model aligned to 8 bytes to guarantee aligned 64-bit accesses. -alignas(8) const unsigned char g_magic_wand_model_data[] = { - 0x1c, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x12, 0x00, - 0x1c, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, - 0x00, 0x00, 0x18, 0x00, 0x12, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x24, 0x4c, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x2c, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x14, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, - 0x0f, 0x00, 0x00, 0x00, 0x54, 0x4f, 0x43, 0x4f, 0x20, 0x43, 0x6f, 0x6e, - 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x00, 0x12, 0x00, 0x00, 0x00, - 0xb0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, - 0x8c, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00, - 0x68, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, - 0x50, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x38, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x6e, 0xb6, 0xff, 0xff, 0x68, 0x00, 0x00, 0x00, 0x30, 0xb6, 0xff, 0xff, - 0x34, 0xb6, 0xff, 0xff, 0x7e, 0xb6, 0xff, 0xff, 0xf0, 0x01, 0x00, 0x00, - 0x86, 0xb6, 0xff, 0xff, 0xc8, 0x03, 0x00, 0x00, 0x48, 0xb6, 0xff, 0xff, - 0x4c, 0xb6, 0xff, 0xff, 0x50, 0xb6, 0xff, 0xff, 0x54, 0xb6, 0xff, 0xff, - 0x58, 0xb6, 0xff, 0xff, 0x5c, 0xb6, 0xff, 0xff, 0xa6, 0xb6, 0xff, 0xff, - 0xc0, 0x0d, 0x00, 0x00, 0xae, 0xb6, 0xff, 0xff, 0x00, 0x46, 0x00, 0x00, - 0xb6, 0xb6, 0xff, 0xff, 0x60, 0x46, 0x00, 0x00, 0xbe, 0xb6, 0xff, 0xff, - 0xe0, 0x46, 0x00, 0x00, 0xc6, 0xb6, 0xff, 0xff, 0x48, 0x47, 0x00, 0x00, - 0xce, 0xb6, 0xff, 0xff, 0x98, 0x48, 0x00, 0x00, 0x90, 0xb6, 0xff, 0xff, - 0x05, 0x00, 0x00, 0x00, 0x31, 0x2e, 0x35, 0x2e, 0x30, 0x00, 0x00, 0x00, - 0x54, 0xf4, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, - 0x13, 0x00, 0x00, 0x00, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, - 0x0c, 0x00, 0x14, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x88, 0x48, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x9c, 0x0c, 0x00, 0x00, 0x14, 0x0b, 0x00, 0x00, - 0xd4, 0x00, 0x00, 0x00, 0x24, 0x45, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, - 0xa4, 0x02, 0x00, 0x00, 0x80, 0x45, 0x00, 0x00, 0x9c, 0x0b, 0x00, 0x00, - 0xb0, 0x0c, 0x00, 0x00, 0xc4, 0x47, 0x00, 0x00, 0x50, 0x0b, 0x00, 0x00, - 0x2c, 0x0c, 0x00, 0x00, 0x48, 0x46, 0x00, 0x00, 0xec, 0x45, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0xc8, 0x0b, 0x00, 0x00, 0x66, 0xb8, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x2a, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x69, - 0x6e, 0x67, 0x32, 0x64, 0x2f, 0x4d, 0x61, 0x78, 0x50, 0x6f, 0x6f, 0x6c, - 0x00, 0x00, 0x00, 0x00, 0x88, 0xb7, 0xff, 0xff, 0xba, 0xb8, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x16, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x52, 0x65, - 0x6c, 0x75, 0x00, 0x00, 0xd0, 0xb7, 0xff, 0xff, 0x02, 0xb9, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x44, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x27, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x43, 0x6f, - 0x6e, 0x76, 0x32, 0x44, 0x2f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x61, 0x72, - 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x00, 0x28, 0xb8, 0xff, 0xff, - 0x80, 0x01, 0x00, 0x00, 0x1c, 0x6a, 0xf6, 0xbd, 0xaa, 0x16, 0xfd, 0x3c, - 0xf6, 0xd9, 0x20, 0x3e, 0x64, 0xf8, 0xdd, 0x3d, 0x07, 0xc0, 0x82, 0xbe, - 0x9e, 0xa3, 0x38, 0xbd, 0x13, 0x41, 0xa1, 0x3d, 0xb0, 0x81, 0x90, 0xbd, - 0xc7, 0xdd, 0xbc, 0xbb, 0x87, 0x9c, 0x24, 0xbe, 0x72, 0x08, 0x6a, 0xbd, - 0x10, 0x1b, 0x61, 0x3e, 0x79, 0x49, 0x18, 0xbe, 0xda, 0x09, 0x88, 0xbe, - 0x2d, 0x70, 0x4d, 0x3d, 0x5c, 0x4a, 0x9e, 0xbd, 0x0f, 0xf1, 0x46, 0x3e, - 0x1c, 0xbd, 0x02, 0xbf, 0x56, 0xbc, 0x07, 0x3e, 0x63, 0x92, 0x39, 0xbe, - 0x4e, 0xde, 0x84, 0x3e, 0x64, 0x38, 0x88, 0xbd, 0xa0, 0x32, 0xc3, 0xbd, - 0x0f, 0x94, 0xb7, 0xbe, 0xd6, 0x11, 0x27, 0xbc, 0xcc, 0x7e, 0xf3, 0x3d, - 0xf3, 0x4d, 0xaa, 0x3d, 0xbc, 0x8a, 0x28, 0x3e, 0xa2, 0xb5, 0xda, 0xbd, - 0x92, 0x1a, 0xb6, 0xbd, 0x9a, 0x49, 0xb1, 0x3d, 0xfc, 0x93, 0x1c, 0x3d, - 0x74, 0xa1, 0xa1, 0xbd, 0xc7, 0x48, 0x1d, 0xbe, 0x3a, 0x53, 0xb2, 0x3b, - 0x92, 0x51, 0xa5, 0xbd, 0x6a, 0xc4, 0x3c, 0xbd, 0xdb, 0x61, 0x6d, 0xbd, - 0x78, 0x9f, 0x03, 0xbe, 0x40, 0x1f, 0x30, 0xbd, 0x17, 0xde, 0xad, 0x3d, - 0xd7, 0xee, 0x74, 0xbd, 0xb6, 0x5c, 0xc2, 0x3d, 0x1c, 0x89, 0x65, 0xbe, - 0xfd, 0xc4, 0x48, 0x3e, 0xb2, 0x29, 0x13, 0x3d, 0xcc, 0x56, 0x13, 0x3d, - 0xf8, 0xce, 0x1b, 0xbc, 0xb5, 0x4b, 0xe8, 0xbc, 0x48, 0x05, 0x5c, 0xbe, - 0xaf, 0xfa, 0x0d, 0x3e, 0x74, 0x84, 0xa4, 0x3d, 0x4c, 0x84, 0x04, 0x3e, - 0x09, 0x7a, 0xba, 0x3c, 0xb3, 0xa6, 0x07, 0x3e, 0x7d, 0xe5, 0xe5, 0x3d, - 0x7e, 0xb9, 0xa5, 0x3c, 0x4e, 0x70, 0x49, 0x3e, 0x39, 0xfe, 0x12, 0xbe, - 0xfa, 0x8b, 0x01, 0xbe, 0xb9, 0x8e, 0xe6, 0xbc, 0xc8, 0x2f, 0xb3, 0xbd, - 0x1b, 0x2b, 0x9e, 0xbd, 0xe7, 0x7f, 0x0e, 0x3d, 0x3e, 0xa3, 0x2a, 0x3d, - 0xa1, 0x73, 0x31, 0x3d, 0xc8, 0xc7, 0x03, 0xbd, 0x07, 0x71, 0xaf, 0xbd, - 0xb2, 0x6b, 0x2b, 0xbe, 0x06, 0xc2, 0x1f, 0xbe, 0x3b, 0xbf, 0x30, 0xbe, - 0x7e, 0x51, 0x22, 0x3e, 0x5a, 0xa7, 0x92, 0x3d, 0xb8, 0x60, 0x35, 0xbe, - 0xa7, 0xdf, 0x8f, 0x3d, 0xbc, 0xfc, 0x42, 0x3e, 0x42, 0x86, 0x7d, 0xbc, - 0x3a, 0xd0, 0xd8, 0x3c, 0xea, 0x45, 0x40, 0xbc, 0x04, 0xd3, 0x9d, 0xb7, - 0xe3, 0xdf, 0xae, 0xbd, 0x80, 0x5e, 0x59, 0xbe, 0x88, 0x15, 0xc0, 0xbd, - 0xea, 0x86, 0xaa, 0xbd, 0x3b, 0x4a, 0x64, 0x3d, 0x89, 0x25, 0x42, 0xbe, - 0xc2, 0x29, 0x93, 0xbe, 0x62, 0x85, 0x00, 0x3e, 0xf1, 0x0e, 0xda, 0xbd, - 0x48, 0x09, 0xb8, 0xbe, 0xad, 0xe2, 0x4d, 0xbe, 0x69, 0x26, 0x99, 0xbe, - 0x86, 0x3c, 0xcd, 0xbe, 0x05, 0xe6, 0x4e, 0xbd, 0xdb, 0x8f, 0xfb, 0x3d, - 0xc6, 0xf5, 0x97, 0x3e, 0xde, 0xba, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x0d, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x43, 0x6f, 0x6e, 0x76, - 0x32, 0x44, 0x2f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, - 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x00, 0x00, 0x00, 0x08, 0xba, 0xff, 0xff, - 0x00, 0x08, 0x00, 0x00, 0x40, 0x7a, 0x10, 0xbf, 0x9b, 0xf1, 0xcc, 0xbe, - 0x78, 0x88, 0x1d, 0xbf, 0xc2, 0xb7, 0x09, 0xbf, 0xbd, 0xd6, 0xaf, 0xbe, - 0xf5, 0x29, 0xed, 0xbe, 0xad, 0x18, 0x70, 0xbe, 0x3d, 0x91, 0x40, 0x3b, - 0x2f, 0xcf, 0x55, 0xbe, 0xc6, 0x0b, 0xed, 0x3e, 0xbe, 0x7a, 0x80, 0xbe, - 0x3c, 0x3c, 0xb1, 0x3a, 0xab, 0xea, 0xa6, 0x3d, 0x88, 0x57, 0x55, 0xbe, - 0x0c, 0x62, 0xdc, 0xbe, 0x07, 0x6b, 0xbe, 0x3e, 0x9e, 0x0d, 0x28, 0x3d, - 0xd9, 0xd2, 0xb1, 0x3d, 0x4c, 0x45, 0xbd, 0xbd, 0x3c, 0xe0, 0x1f, 0x3d, - 0x76, 0xb5, 0x48, 0xbd, 0x2b, 0xe8, 0x3a, 0xbe, 0xd3, 0x21, 0x57, 0xbe, - 0xdc, 0x21, 0x70, 0x3e, 0x00, 0x71, 0xec, 0x3d, 0x9c, 0xeb, 0x61, 0x3d, - 0x0d, 0x8a, 0x9c, 0x3d, 0x2a, 0x35, 0x03, 0x3d, 0x1b, 0x1c, 0x28, 0x3b, - 0xa6, 0xf9, 0xa9, 0xbd, 0x20, 0xfb, 0x2f, 0xbe, 0x8c, 0xcb, 0x04, 0x3d, - 0x43, 0x62, 0x64, 0x3d, 0xf8, 0x65, 0xd6, 0x3d, 0xf3, 0xbe, 0x3a, 0xbd, - 0xaf, 0x2b, 0x53, 0x3d, 0x99, 0x58, 0xf8, 0xbd, 0x3a, 0xbe, 0x43, 0xbd, - 0xd0, 0xe7, 0xc0, 0xbc, 0xa4, 0x8d, 0xf3, 0x3d, 0xa2, 0xd7, 0x9c, 0xbc, - 0x87, 0x1b, 0xb0, 0x3e, 0x18, 0x20, 0x88, 0xbd, 0xee, 0x9e, 0xc5, 0x3e, - 0xc1, 0x35, 0xaf, 0x3c, 0xc0, 0x97, 0x39, 0xbd, 0xa5, 0x6f, 0x04, 0xbd, - 0x7b, 0x03, 0x1c, 0xbe, 0x0f, 0x75, 0xbb, 0xbd, 0xdf, 0x1b, 0x54, 0xbd, - 0xfb, 0xbf, 0xcc, 0xbd, 0x1b, 0x01, 0x10, 0xbd, 0x20, 0x67, 0xb4, 0xbc, - 0xdf, 0xa6, 0x40, 0x3c, 0x74, 0xb4, 0x28, 0x3d, 0x65, 0xe7, 0xc3, 0xbd, - 0x8d, 0x38, 0x91, 0xbd, 0x8f, 0xba, 0x45, 0xbc, 0x69, 0xc7, 0x49, 0xbc, - 0xa3, 0xcb, 0xf8, 0x3c, 0x69, 0xed, 0x2d, 0x3e, 0x0d, 0xf4, 0xc6, 0xbc, - 0xd6, 0xe7, 0xfe, 0xbd, 0xfa, 0xa0, 0x86, 0xbd, 0x30, 0x83, 0xf0, 0xbd, - 0xf5, 0xb9, 0xd1, 0xbd, 0x1c, 0x39, 0x90, 0xbe, 0x74, 0x62, 0xd8, 0xbd, - 0x13, 0x28, 0x07, 0xbe, 0xbf, 0xbd, 0xbf, 0xbd, 0x15, 0xcf, 0xa8, 0xbb, - 0x9c, 0x1a, 0x8f, 0xbd, 0x91, 0x23, 0x93, 0x3d, 0x79, 0x63, 0xc4, 0xbd, - 0xec, 0xdd, 0x72, 0xbe, 0xd2, 0xd8, 0xac, 0xbe, 0xbf, 0x1e, 0x21, 0xbe, - 0xae, 0xab, 0x20, 0xbd, 0x8f, 0xe9, 0x90, 0x3d, 0xbb, 0x47, 0xdd, 0x3d, - 0xb1, 0x93, 0x37, 0xbd, 0xeb, 0xcf, 0x45, 0xbc, 0x33, 0x8f, 0x15, 0xbe, - 0xeb, 0x19, 0x9f, 0xbe, 0x66, 0x21, 0xe3, 0xbd, 0x53, 0x82, 0x60, 0x3c, - 0x11, 0xdc, 0xf1, 0xbc, 0x0c, 0x01, 0x1e, 0x3d, 0xdb, 0xd5, 0x1d, 0x3f, - 0x58, 0xa3, 0x61, 0x3d, 0x0a, 0x2b, 0x16, 0xbe, 0x01, 0x9d, 0x50, 0xbe, - 0xac, 0xac, 0x63, 0x3e, 0x76, 0xdb, 0x8a, 0xbc, 0x57, 0xec, 0x8f, 0xbc, - 0xad, 0x20, 0xd6, 0x3d, 0xc2, 0x63, 0x89, 0x3d, 0xc3, 0x1e, 0xe9, 0x3e, - 0xa8, 0x41, 0x9e, 0xbd, 0xac, 0x2c, 0x2b, 0xbe, 0x98, 0x73, 0xbf, 0x3d, - 0x7a, 0x22, 0x54, 0xbd, 0x44, 0xaf, 0x2c, 0xbe, 0x05, 0x45, 0xd9, 0xbc, - 0x74, 0xaa, 0x20, 0x3e, 0x6e, 0x1e, 0x95, 0x3e, 0x54, 0x20, 0x12, 0xbc, - 0xbe, 0x20, 0xcf, 0x3d, 0xa6, 0x02, 0x28, 0xbe, 0xd1, 0xfe, 0xe8, 0xbd, - 0x1f, 0x2a, 0x83, 0xbe, 0x33, 0x44, 0xf3, 0x3d, 0xff, 0x48, 0xda, 0xbd, - 0x8d, 0x1e, 0x79, 0x3e, 0xdb, 0xee, 0xb2, 0x3d, 0xdb, 0xde, 0x5b, 0xbe, - 0x55, 0x57, 0x49, 0xbe, 0x62, 0x4b, 0x29, 0xbf, 0x93, 0xbf, 0x23, 0xbf, - 0xea, 0xa3, 0x71, 0x3d, 0xa5, 0x50, 0x00, 0xbe, 0xb6, 0xd1, 0xc0, 0xbf, - 0x1d, 0x22, 0x7d, 0xbd, 0x00, 0x09, 0x81, 0x3d, 0xde, 0x28, 0x75, 0xbe, - 0x03, 0x1b, 0x2e, 0xbf, 0x3c, 0x3a, 0x3b, 0xbf, 0x52, 0x1a, 0xe6, 0xbd, - 0x9d, 0xe9, 0xea, 0xbd, 0x49, 0x71, 0x2c, 0x3d, 0xe8, 0x8b, 0x55, 0xbb, - 0x6c, 0x97, 0x24, 0xbe, 0x44, 0xe8, 0xe0, 0xba, 0x6d, 0x45, 0x4b, 0x3f, - 0x7f, 0x26, 0x38, 0x3e, 0xab, 0x04, 0x6e, 0xbe, 0x70, 0x59, 0x0e, 0xbe, - 0xd6, 0xfb, 0x7a, 0x3d, 0x45, 0x72, 0xa2, 0xbd, 0xb5, 0x6f, 0x2e, 0xbe, - 0xda, 0xf5, 0x07, 0x3e, 0xe0, 0x2b, 0xac, 0xbd, 0xaf, 0x35, 0xf6, 0xbd, - 0xd0, 0x2b, 0xac, 0xbd, 0x26, 0x2d, 0x11, 0xbe, 0x7e, 0xfa, 0x87, 0x3d, - 0x3a, 0xb7, 0xf6, 0xbd, 0xb1, 0xd0, 0xe2, 0xbc, 0xc8, 0xa2, 0x86, 0xbd, - 0x19, 0xf5, 0xb1, 0xbd, 0xf6, 0x65, 0x4d, 0xbe, 0x23, 0x63, 0x47, 0x3e, - 0xb7, 0x26, 0xd3, 0xbd, 0x57, 0xf4, 0x12, 0xbf, 0x93, 0xd4, 0x39, 0xbe, - 0x77, 0xf2, 0x62, 0x3d, 0xf6, 0x3d, 0xc3, 0xbe, 0xb6, 0xf5, 0x2b, 0xbe, - 0xbe, 0x8a, 0x76, 0xbe, 0xb1, 0x39, 0x63, 0x3e, 0xde, 0xbe, 0x3c, 0xbe, - 0xd4, 0x01, 0x94, 0xbe, 0x19, 0x1a, 0x97, 0xbb, 0xcb, 0x83, 0x4e, 0xbe, - 0x50, 0x19, 0x94, 0xbd, 0xf8, 0x8a, 0x95, 0xbe, 0xc8, 0xab, 0x86, 0xbe, - 0x18, 0x57, 0x6d, 0x3e, 0x87, 0xad, 0x8b, 0x3c, 0x72, 0x7b, 0x8d, 0x3e, - 0x54, 0x39, 0x95, 0x3d, 0x1d, 0xfa, 0x4b, 0xbe, 0x97, 0xd2, 0x7a, 0xbe, - 0x68, 0x4a, 0xcb, 0xbe, 0xf0, 0x10, 0x04, 0xbf, 0x2b, 0xb5, 0x82, 0x3e, - 0xf8, 0x71, 0x1a, 0x3e, 0x29, 0xf0, 0x29, 0x3d, 0x74, 0x5a, 0x1a, 0x3e, - 0x58, 0x75, 0xd1, 0xbd, 0x38, 0x6c, 0x99, 0x3e, 0x6c, 0xd4, 0x63, 0xbe, - 0xc3, 0x51, 0x90, 0xbe, 0xcf, 0xff, 0xae, 0xbe, 0xfe, 0xf1, 0x00, 0x3d, - 0x52, 0x64, 0x90, 0xbd, 0x02, 0x1a, 0xce, 0xbd, 0x86, 0x74, 0x00, 0x3d, - 0x82, 0x40, 0x04, 0x3e, 0x38, 0x03, 0x82, 0x3e, 0x8f, 0x1c, 0xf4, 0x3e, - 0x6f, 0x04, 0x68, 0xbe, 0x00, 0x12, 0xe3, 0x3d, 0x01, 0xf6, 0xb5, 0x3d, - 0xd9, 0x99, 0x36, 0x3d, 0x40, 0xad, 0xde, 0x3e, 0xaf, 0x74, 0xc1, 0x3d, - 0xb7, 0x8e, 0x6f, 0xbe, 0xb3, 0xa6, 0x32, 0xbe, 0xff, 0xca, 0x23, 0x3e, - 0x0c, 0xf1, 0x42, 0x3e, 0xe3, 0x85, 0x2c, 0x3d, 0xca, 0xc8, 0xb7, 0xba, - 0x1a, 0x94, 0x53, 0xbd, 0x9a, 0x33, 0xaa, 0x3d, 0x9c, 0x7c, 0x79, 0xbe, - 0x84, 0xb2, 0x71, 0xbe, 0x48, 0xc1, 0x2b, 0xbd, 0xf4, 0x89, 0x6c, 0xbd, - 0x1f, 0xd2, 0xf2, 0xbd, 0xd2, 0x4f, 0x28, 0xbd, 0xb4, 0xbb, 0xb3, 0xbd, - 0x6f, 0x96, 0xab, 0xbc, 0x23, 0x9d, 0x82, 0xbe, 0xe6, 0x6b, 0x59, 0xbd, - 0x27, 0x09, 0x03, 0xbe, 0x42, 0xa6, 0xac, 0xbd, 0xb6, 0x12, 0x20, 0x3d, - 0x0a, 0x63, 0x24, 0xbe, 0x75, 0x27, 0x28, 0xbe, 0xa5, 0x62, 0x2b, 0xbe, - 0x1f, 0x48, 0x06, 0xbe, 0x7e, 0xd0, 0xb2, 0xbd, 0xa9, 0xd6, 0x80, 0x3a, - 0xff, 0x7a, 0x11, 0xbe, 0x76, 0x5f, 0x41, 0x3e, 0x17, 0xa9, 0xfa, 0xbd, - 0x5b, 0xd1, 0x71, 0xbd, 0xf3, 0x23, 0xaf, 0xbd, 0x63, 0x24, 0xe0, 0xbc, - 0xc6, 0x62, 0x9d, 0x3e, 0xd6, 0x19, 0x47, 0xbe, 0x92, 0x69, 0xf1, 0xbd, - 0x8a, 0x67, 0x82, 0x3d, 0x17, 0x33, 0x69, 0x3d, 0x1a, 0x91, 0x25, 0xbe, - 0xf1, 0xab, 0xae, 0x3d, 0x3a, 0x21, 0xc1, 0x3e, 0xd8, 0xc4, 0x5d, 0xbd, - 0xc7, 0x58, 0xa6, 0xbe, 0xc6, 0xb0, 0xed, 0x3b, 0x75, 0xd6, 0xa2, 0x3c, - 0x64, 0xa8, 0x1d, 0xbe, 0xe5, 0x1f, 0x3a, 0xbe, 0x7b, 0x03, 0x39, 0xbd, - 0x14, 0xa2, 0x81, 0x3d, 0xdb, 0xfd, 0xb2, 0xbc, 0xca, 0x96, 0x9a, 0xbe, - 0x7c, 0xcc, 0xc9, 0x3c, 0xb8, 0x7d, 0x88, 0x3d, 0x36, 0x39, 0x0b, 0xbd, - 0x5e, 0x1f, 0x3c, 0xbe, 0x27, 0x36, 0x83, 0x3c, 0x38, 0xa1, 0x23, 0xbd, - 0xba, 0xfa, 0xf6, 0x3b, 0x8d, 0xa9, 0xc3, 0xbe, 0x50, 0x34, 0xf0, 0xbd, - 0x92, 0x0f, 0xb3, 0xbd, 0xd9, 0xad, 0x5e, 0xbe, 0xc1, 0x27, 0xb2, 0x3c, - 0x6a, 0x29, 0x07, 0xbe, 0x0f, 0xb5, 0x26, 0xbe, 0xc8, 0xf9, 0x27, 0x3e, - 0x2a, 0x97, 0xa4, 0x3e, 0xe1, 0x45, 0x53, 0x3e, 0xec, 0xd7, 0xa0, 0x3d, - 0xfd, 0x1a, 0x8c, 0xbe, 0x1d, 0x4c, 0xd6, 0xbd, 0x4a, 0x78, 0x63, 0xbe, - 0x18, 0xa4, 0xd9, 0xbc, 0x5a, 0xaa, 0x37, 0x3d, 0xff, 0xe8, 0x3b, 0xbe, - 0x6b, 0x8c, 0x67, 0x3e, 0x13, 0xec, 0x12, 0xbc, 0xae, 0xcc, 0xab, 0xbc, - 0x2e, 0x9b, 0x72, 0xbd, 0x46, 0x3f, 0xb4, 0x3e, 0xdb, 0xba, 0xd3, 0xbd, - 0x7b, 0xdb, 0x86, 0xbe, 0x6a, 0x66, 0xd9, 0xbe, 0x8c, 0x5c, 0x80, 0x3d, - 0x60, 0x64, 0x4d, 0xbe, 0x4d, 0x91, 0x58, 0x3e, 0xa9, 0xfc, 0x0e, 0xbe, - 0x32, 0xc8, 0xce, 0x3e, 0xa8, 0xc8, 0xb3, 0xbe, 0x4d, 0x07, 0xae, 0xbe, - 0xbc, 0xa3, 0x2c, 0xbf, 0x57, 0x9c, 0x21, 0xbe, 0x0e, 0x6d, 0x6e, 0xbe, - 0x30, 0xa6, 0x15, 0xbf, 0xd6, 0x76, 0x01, 0xbf, 0x80, 0x3e, 0xab, 0xbe, - 0xbe, 0x98, 0x2d, 0xbe, 0xe2, 0x02, 0x48, 0xbe, 0xc8, 0x4b, 0x96, 0xbe, - 0x48, 0xaa, 0x2e, 0x3e, 0xa2, 0x19, 0x01, 0x3f, 0xa8, 0xec, 0x8f, 0xbe, - 0x15, 0xd2, 0x24, 0x3e, 0x5c, 0x80, 0xc2, 0xbc, 0xf0, 0x78, 0x29, 0xbe, - 0xfe, 0x1d, 0x63, 0xbe, 0x32, 0xf1, 0x22, 0xbd, 0x35, 0x8c, 0x1d, 0x3e, - 0xb9, 0x22, 0xc2, 0x3e, 0xde, 0x75, 0xc0, 0xbe, 0x27, 0x71, 0x73, 0xbb, - 0x37, 0x41, 0xde, 0x3d, 0x0a, 0x71, 0xfe, 0xbd, 0x9e, 0x66, 0xf6, 0xbd, - 0x2b, 0x93, 0x07, 0xbc, 0x75, 0x1e, 0x90, 0x3d, 0x01, 0x49, 0x59, 0x3e, - 0x0a, 0xb2, 0xbe, 0xbe, 0xd4, 0x65, 0x9f, 0xbc, 0x43, 0x20, 0xdd, 0x3d, - 0xef, 0x01, 0x74, 0xbd, 0xb2, 0xa4, 0xd5, 0x3b, 0xa4, 0x30, 0xf9, 0xbc, - 0xb8, 0x15, 0x68, 0xb8, 0x58, 0xa2, 0xa7, 0xbe, 0x5a, 0x25, 0xa5, 0x3d, - 0x2d, 0x86, 0xb2, 0xbe, 0xc9, 0x31, 0xc2, 0x3e, 0xd2, 0x61, 0x28, 0x3d, - 0xa6, 0xfe, 0xba, 0x3d, 0x4b, 0x6c, 0xf6, 0xbd, 0xaa, 0x14, 0xdc, 0xbc, - 0xf6, 0x7d, 0xdc, 0xbd, 0xce, 0xb6, 0x75, 0xbd, 0x0b, 0xa5, 0xa8, 0xbe, - 0x9b, 0xb5, 0x4a, 0x3e, 0xfc, 0xfa, 0x98, 0x3d, 0x27, 0xd6, 0x39, 0x3d, - 0x1a, 0xbf, 0x67, 0x3d, 0x3f, 0x04, 0x04, 0xbc, 0x07, 0x56, 0x42, 0xbd, - 0xd8, 0xe6, 0x52, 0xbe, 0x72, 0xff, 0xc7, 0xbd, 0xd8, 0x5b, 0xba, 0xbd, - 0xe9, 0xb9, 0xc8, 0xbd, 0xe2, 0x54, 0x05, 0xbe, 0xb5, 0x8f, 0xf2, 0x3e, - 0x74, 0xe9, 0x68, 0xbd, 0x6f, 0x16, 0xcd, 0xbe, 0x2a, 0x22, 0x40, 0x3c, - 0xfc, 0x03, 0xf2, 0x3d, 0x91, 0x74, 0xaa, 0x3d, 0x7d, 0xb1, 0x1f, 0xbe, - 0x95, 0xc1, 0x14, 0xbe, 0xbb, 0xe5, 0x89, 0xbe, 0xae, 0xff, 0x5a, 0x3d, - 0x31, 0x79, 0x07, 0xbe, 0x07, 0xfb, 0xba, 0x3e, 0x4e, 0xd0, 0x86, 0xbd, - 0x68, 0x36, 0x29, 0x3e, 0xec, 0x14, 0xc7, 0x3d, 0xef, 0xf6, 0x06, 0x3b, - 0x76, 0xa0, 0xe8, 0x3c, 0x97, 0x57, 0xac, 0x3c, 0xec, 0x02, 0x8d, 0xbe, - 0x43, 0xaf, 0x42, 0x3d, 0x13, 0x39, 0x0e, 0x3d, 0xf4, 0xed, 0x5a, 0x3e, - 0xbb, 0xf1, 0x18, 0xbe, 0x71, 0x1f, 0xc8, 0xbb, 0x6d, 0x8c, 0x0b, 0xbe, - 0xfe, 0x5d, 0xc3, 0xbd, 0x2c, 0xae, 0x87, 0xbe, 0x58, 0x74, 0x3e, 0x3d, - 0x14, 0x52, 0x13, 0xbe, 0x41, 0x11, 0x55, 0xbe, 0x43, 0x03, 0x0e, 0x3e, - 0xf8, 0x4c, 0x2e, 0x3e, 0x09, 0x6a, 0xea, 0xbd, 0xec, 0xe1, 0xc3, 0xbd, - 0xd5, 0xdf, 0x2a, 0xbe, 0x2e, 0xc1, 0xd9, 0xbd, 0xc1, 0x5b, 0x72, 0xbe, - 0x73, 0xe9, 0x0f, 0xbe, 0xab, 0xc3, 0x0c, 0x3e, 0x85, 0xd1, 0x5e, 0x3e, - 0x08, 0x70, 0x0d, 0xbe, 0x6f, 0xb7, 0x01, 0x3d, 0x0c, 0x0f, 0x86, 0xbd, - 0x0d, 0x23, 0x56, 0x3e, 0x16, 0x6f, 0x10, 0xbc, 0x4f, 0x98, 0x42, 0xbf, - 0x85, 0x4e, 0x44, 0xbe, 0xf0, 0x20, 0x0b, 0xbe, 0x5b, 0xa3, 0x0f, 0xbc, - 0xbd, 0x33, 0x45, 0xbd, 0x84, 0xfb, 0x48, 0xbd, 0x11, 0x99, 0x8c, 0x3c, - 0x41, 0x1e, 0x08, 0x3e, 0xe3, 0x3e, 0x6c, 0xbf, 0x97, 0x2b, 0x0c, 0xbe, - 0x94, 0xec, 0x23, 0xbb, 0x8f, 0x35, 0x4f, 0x3c, 0xea, 0xec, 0x0c, 0xbd, - 0x04, 0x13, 0x3d, 0xbe, 0x13, 0x76, 0x23, 0x3e, 0x37, 0x0d, 0x99, 0x3c, - 0xd4, 0xa3, 0xf4, 0xbe, 0x18, 0x6a, 0x6c, 0xbe, 0x3d, 0x3c, 0xf6, 0xbd, - 0xf8, 0x51, 0xaf, 0xbc, 0x1f, 0x6e, 0x8a, 0xbc, 0x55, 0xc5, 0x8c, 0xbe, - 0x9e, 0x9c, 0x79, 0xbd, 0x13, 0x14, 0xb7, 0xbd, 0x89, 0xcd, 0x1a, 0xbe, - 0x79, 0x14, 0x2e, 0x3e, 0xdd, 0xa2, 0x71, 0x3e, 0xad, 0x71, 0xbe, 0xbc, - 0xa3, 0xc9, 0x22, 0x3f, 0x66, 0x4b, 0x0f, 0x3d, 0x45, 0x1c, 0x29, 0xbe, - 0xf6, 0x79, 0x93, 0xbe, 0x71, 0x18, 0xb6, 0x3d, 0xcc, 0xcb, 0x9d, 0x3c, - 0xa1, 0xbb, 0xfd, 0xbc, 0xc9, 0x75, 0x05, 0x3e, 0x77, 0x4b, 0xad, 0xbd, - 0x81, 0x1d, 0x5c, 0x3e, 0x2d, 0xcc, 0x24, 0xbd, 0x3a, 0xce, 0x36, 0xbe, - 0xb8, 0x37, 0x27, 0xbe, 0xe6, 0x3e, 0x75, 0x3b, 0xb7, 0xb4, 0x2c, 0xbd, - 0x1f, 0x05, 0x47, 0x3c, 0x81, 0x1d, 0x33, 0x3e, 0x8a, 0xfd, 0x4f, 0x3e, - 0xaf, 0x7c, 0x3b, 0x3d, 0x00, 0xa0, 0xda, 0xbd, 0x39, 0xd1, 0x20, 0xbf, - 0xc9, 0x78, 0xf3, 0xbd, 0x9d, 0x01, 0xa3, 0xbe, 0x42, 0x44, 0xbb, 0xbc, - 0x5a, 0xc1, 0xd4, 0xbd, 0xfd, 0xe7, 0x3c, 0xbf, 0x46, 0x37, 0x85, 0x3d, - 0x79, 0x4e, 0xbc, 0x3d, 0xa4, 0xcd, 0x7f, 0xbf, 0x1d, 0xca, 0x69, 0xbf, - 0x97, 0xeb, 0x69, 0xbf, 0xaa, 0xc9, 0x9f, 0x3c, 0xb4, 0x82, 0x9d, 0x3e, - 0xf1, 0x94, 0x77, 0x3e, 0xf2, 0x74, 0x84, 0xbe, 0x88, 0x66, 0x9c, 0xbe, - 0xdf, 0x4e, 0xf1, 0xbd, 0xa2, 0x9e, 0x31, 0x3e, 0x8b, 0xc9, 0x49, 0x3d, - 0x5a, 0x63, 0x5c, 0x3e, 0xf9, 0xa5, 0x4e, 0x3d, 0x95, 0x3f, 0x8d, 0x3d, - 0x1c, 0xe0, 0x68, 0xbe, 0xb6, 0xe1, 0x7c, 0xbe, 0x82, 0x2b, 0x63, 0xbe, - 0x76, 0x6c, 0x02, 0xbe, 0xfe, 0x30, 0x36, 0xbe, 0x8f, 0x5f, 0x36, 0x3d, - 0x17, 0x52, 0x15, 0x3c, 0x1e, 0xc8, 0x88, 0xbf, 0x0a, 0xa1, 0x5d, 0x3d, - 0xe8, 0x31, 0x71, 0x3e, 0xd2, 0x45, 0x01, 0xbc, 0x41, 0x3c, 0x27, 0xbe, - 0xbb, 0xa9, 0x4d, 0xbc, 0x0f, 0xde, 0x9d, 0x3c, 0xbf, 0x35, 0xc3, 0xbd, - 0x5b, 0x0e, 0x70, 0xbf, 0xe9, 0xf4, 0xd5, 0x3b, 0x60, 0x9b, 0xec, 0x3d, - 0x8b, 0x75, 0x23, 0xbc, 0x17, 0x03, 0x84, 0xbe, 0x99, 0x04, 0xd0, 0x3c, - 0xdd, 0x01, 0x08, 0xbe, 0x82, 0xd5, 0x75, 0xbd, 0x05, 0xaa, 0xec, 0x3c, - 0xb9, 0x4d, 0x45, 0x3d, 0xa3, 0x11, 0x69, 0xbb, 0xa3, 0xb0, 0x50, 0x3e, - 0x7a, 0x5f, 0xaa, 0xbd, 0x6a, 0x73, 0xbe, 0xbd, 0x91, 0x25, 0xa9, 0xbd, - 0x0f, 0x8e, 0xe0, 0xbd, 0x50, 0x51, 0x8f, 0x3c, 0xf4, 0x7d, 0xb9, 0x3d, - 0xa2, 0x11, 0x50, 0x3d, 0x3a, 0xb5, 0x32, 0x3e, 0xe1, 0x28, 0x87, 0x3e, - 0x44, 0x83, 0x09, 0x3e, 0xc3, 0x5f, 0x0a, 0xbe, 0xc4, 0xb8, 0x0f, 0xbe, - 0xaa, 0xb2, 0xab, 0xbd, 0x93, 0x40, 0x5c, 0xbd, 0x35, 0xf0, 0x19, 0xbd, - 0x4a, 0xa8, 0x02, 0x3b, 0x3c, 0x51, 0x1a, 0x3c, 0xbe, 0x2d, 0xdd, 0xbb, - 0x55, 0x5d, 0xc3, 0x3d, 0x10, 0x6f, 0x7c, 0xbd, 0x62, 0xf7, 0x45, 0xbe, - 0xd5, 0xda, 0xe1, 0x3d, 0x25, 0xd5, 0x13, 0x3e, 0xf0, 0xd6, 0xea, 0xbd, - 0x62, 0x2b, 0x56, 0xbd, 0x80, 0x0c, 0xb1, 0x3d, 0x19, 0xbe, 0xa5, 0x3d, - 0x3e, 0xc3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x63, 0x6f, 0x6e, 0x76, - 0x32, 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x7f, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x9e, 0xc3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, - 0x65, 0x6e, 0x73, 0x65, 0x2f, 0x52, 0x65, 0x6c, 0x75, 0x00, 0x00, 0x00, - 0xac, 0xc2, 0xff, 0xff, 0xde, 0xc3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x52, 0x65, 0x6c, 0x75, - 0x00, 0x00, 0x00, 0x00, 0xf8, 0xc2, 0xff, 0xff, 0x2a, 0xc4, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x22, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x69, - 0x6e, 0x67, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x4d, 0x61, 0x78, 0x50, 0x6f, - 0x6f, 0x6c, 0x00, 0x00, 0x4c, 0xc3, 0xff, 0xff, 0x7e, 0xc4, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, 0x65, 0x6e, 0x73, 0x65, - 0x5f, 0x31, 0x2f, 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, 0x00, 0x00, - 0x90, 0xc3, 0xff, 0xff, 0xc2, 0xc4, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x00, 0x00, 0x00, 0x00, 0xc4, 0xc3, 0xff, 0xff, 0xf6, 0xc4, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xe0, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, 0x65, 0x6e, 0x73, 0x65, - 0x2f, 0x4d, 0x61, 0x74, 0x4d, 0x75, 0x6c, 0x2f, 0x52, 0x65, 0x61, 0x64, - 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x2f, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x73, 0x65, 0x00, 0x00, 0x00, 0x00, - 0x20, 0xc4, 0xff, 0xff, 0x00, 0x38, 0x00, 0x00, 0x91, 0x78, 0x9e, 0x3d, - 0x02, 0x6e, 0x72, 0x3e, 0xaf, 0x5f, 0x65, 0xbc, 0x83, 0x89, 0xa5, 0x3e, - 0x99, 0x1e, 0xf5, 0x3d, 0xb4, 0x02, 0x92, 0x3d, 0xaf, 0x31, 0x96, 0x3d, - 0x44, 0x77, 0xa2, 0xbe, 0xf0, 0x3c, 0x73, 0xbe, 0x0f, 0xec, 0x35, 0x3f, - 0x47, 0xd1, 0x20, 0xbc, 0xae, 0x8d, 0x48, 0xbe, 0xce, 0xcc, 0x3d, 0x3e, - 0xad, 0x49, 0x78, 0x3e, 0x11, 0x2e, 0x82, 0xbd, 0xa7, 0xf3, 0x7e, 0x3d, - 0x7b, 0xea, 0x7a, 0x3d, 0xd1, 0xe5, 0x1f, 0x3e, 0x92, 0x8c, 0x7a, 0x3d, - 0xe8, 0x22, 0x46, 0xbe, 0xe4, 0x5c, 0x24, 0x3e, 0xa2, 0x0d, 0x6b, 0x3c, - 0xfb, 0x04, 0x21, 0xbd, 0x1c, 0x6e, 0xd1, 0xbe, 0xd5, 0xc6, 0xd9, 0xbc, - 0xb6, 0xe8, 0xdf, 0x3d, 0xd8, 0x73, 0x09, 0x3d, 0xcb, 0x45, 0xb1, 0xbe, - 0xda, 0x6a, 0x0e, 0x3d, 0x40, 0xbe, 0xef, 0xbc, 0xe4, 0xbb, 0xcb, 0xbd, - 0xf6, 0x35, 0x30, 0x3d, 0x25, 0x3a, 0x56, 0xbe, 0x1f, 0x35, 0x0a, 0x3d, - 0x95, 0x31, 0x21, 0x3d, 0xde, 0xaa, 0x54, 0xbe, 0x8d, 0x0a, 0x6b, 0x3e, - 0xd5, 0x70, 0x02, 0xbd, 0xdc, 0x18, 0xaa, 0x3c, 0x2a, 0x0c, 0x79, 0xbe, - 0xee, 0xc5, 0x04, 0x3b, 0x2c, 0xb9, 0xbe, 0x3d, 0x0f, 0x55, 0x82, 0xbc, - 0x94, 0xf6, 0x00, 0xbf, 0x0a, 0xa2, 0x02, 0xbe, 0xa3, 0x2b, 0x58, 0xbd, - 0x09, 0x4f, 0xd3, 0xbd, 0x57, 0x98, 0x36, 0xbe, 0xcd, 0xed, 0x81, 0xbe, - 0x78, 0x4d, 0x3b, 0xbd, 0xa1, 0xf9, 0xdc, 0xbd, 0x18, 0xc4, 0x29, 0xbd, - 0xf5, 0x6d, 0xb2, 0x3e, 0x43, 0x7b, 0x53, 0x3e, 0x2b, 0x6a, 0x69, 0x3c, - 0xec, 0x2e, 0x13, 0xbf, 0x6a, 0x0d, 0x2c, 0xbe, 0x3d, 0xe3, 0x32, 0x3e, - 0xf4, 0x41, 0x39, 0x3d, 0x48, 0xd3, 0x49, 0xbe, 0x7f, 0x25, 0x9a, 0xbe, - 0xd3, 0x36, 0x0b, 0xbe, 0xa5, 0xa3, 0x89, 0xbd, 0x09, 0x30, 0xe5, 0xbd, - 0x13, 0x17, 0x83, 0xbe, 0x1a, 0x4c, 0xc4, 0xbc, 0x81, 0x1e, 0x67, 0xbe, - 0x82, 0x77, 0xdf, 0xbd, 0x02, 0x7d, 0x33, 0x3e, 0xd3, 0x35, 0x02, 0x3e, - 0x8a, 0xc0, 0x90, 0x3c, 0x8b, 0xd0, 0x95, 0xbe, 0x2a, 0x67, 0x6f, 0xbe, - 0xf3, 0xf4, 0x20, 0x3e, 0x01, 0x28, 0xba, 0x3c, 0x55, 0x65, 0xaa, 0xbd, - 0x76, 0x1d, 0x90, 0xbd, 0xa5, 0x37, 0xce, 0x3d, 0x8f, 0xd7, 0x80, 0xbd, - 0x02, 0xea, 0x7d, 0xbc, 0xd1, 0xff, 0xe6, 0xbd, 0x96, 0xb5, 0xa0, 0x3d, - 0xea, 0xb0, 0x90, 0xbe, 0x9e, 0xed, 0x99, 0xbd, 0x2e, 0xee, 0xd9, 0x3d, - 0xe9, 0xf4, 0xb5, 0x3e, 0xa2, 0xb4, 0xfe, 0x3d, 0xe5, 0x4b, 0x30, 0x3d, - 0x07, 0xf3, 0x58, 0xbe, 0x29, 0xa8, 0x2a, 0x3b, 0xf6, 0x0c, 0x40, 0x3e, - 0xb9, 0x87, 0xc0, 0xbc, 0xf3, 0x12, 0x67, 0x3d, 0xd7, 0x33, 0x82, 0xbd, - 0xba, 0x47, 0x15, 0xbd, 0x64, 0xca, 0x29, 0x3e, 0xca, 0x70, 0x5d, 0x3e, - 0x9c, 0xa8, 0xcb, 0x3d, 0xbe, 0xe3, 0xaf, 0xbd, 0xaf, 0x93, 0x2c, 0xbe, - 0x07, 0x36, 0xb5, 0x3e, 0xfc, 0xff, 0x34, 0x3e, 0x71, 0x9a, 0xbb, 0xbd, - 0xa8, 0x92, 0x4b, 0x3e, 0xb1, 0x22, 0x29, 0xbe, 0xce, 0x5d, 0x3e, 0x3e, - 0xda, 0xca, 0x42, 0x3e, 0x20, 0x1a, 0x58, 0x3d, 0x5c, 0x0a, 0x0c, 0x3d, - 0xe2, 0xff, 0xf0, 0x3d, 0x79, 0xfd, 0x0c, 0x3e, 0x69, 0x5f, 0x03, 0x3e, - 0x66, 0xd3, 0x2e, 0xbb, 0x3a, 0x63, 0x64, 0x3c, 0x10, 0x2f, 0x48, 0xbe, - 0xc5, 0xa7, 0x47, 0xbe, 0xda, 0x5a, 0x97, 0x3e, 0xc1, 0x6e, 0xcd, 0xbc, - 0x9a, 0x9c, 0x51, 0xbd, 0x31, 0x27, 0xdd, 0x3d, 0x0b, 0xb2, 0x80, 0xbd, - 0xbf, 0x75, 0xa7, 0xbc, 0xd5, 0x65, 0x2f, 0x3e, 0xc4, 0x0d, 0x1b, 0x3e, - 0xcf, 0x7f, 0xf2, 0x3d, 0x73, 0xc7, 0xf2, 0x3d, 0x69, 0x2e, 0x98, 0xbb, - 0xa8, 0x5b, 0xa8, 0x3d, 0xfd, 0xb0, 0xbf, 0xbd, 0xa3, 0x49, 0xfc, 0xbd, - 0xad, 0xf5, 0x02, 0xbe, 0x60, 0x1e, 0x26, 0xbe, 0x1d, 0x96, 0x3d, 0x3e, - 0xf7, 0x23, 0x2c, 0x3e, 0x44, 0x1b, 0x86, 0x3d, 0x88, 0x56, 0x48, 0xbd, - 0xad, 0xf6, 0xee, 0x3c, 0x0d, 0x81, 0x13, 0x3d, 0xd0, 0x76, 0x09, 0x3e, - 0x49, 0x83, 0x83, 0xbd, 0x50, 0xd6, 0x79, 0xbe, 0x8c, 0x17, 0x4f, 0x3d, - 0xec, 0xe5, 0x90, 0x3d, 0x1e, 0x19, 0x4f, 0x3d, 0x1f, 0x3c, 0x9f, 0xbd, - 0xe5, 0x47, 0x4b, 0xbe, 0x33, 0xf0, 0x14, 0xbe, 0x58, 0xbf, 0x21, 0x3d, - 0xd2, 0x8c, 0x42, 0x3e, 0x31, 0xe6, 0x9a, 0x3d, 0xf9, 0x4e, 0xab, 0xbd, - 0x6f, 0x46, 0x1f, 0xbe, 0x9e, 0xf1, 0x21, 0x3d, 0x04, 0x72, 0xfb, 0x3d, - 0x29, 0xca, 0x24, 0x3e, 0x32, 0x01, 0xa1, 0xbe, 0x07, 0x9b, 0x45, 0xbe, - 0xf9, 0x09, 0xc5, 0x3d, 0xc9, 0x84, 0x44, 0xbd, 0xde, 0xb5, 0x68, 0xbd, - 0x0a, 0xf6, 0x3e, 0xbe, 0x78, 0x6e, 0xbc, 0xbd, 0x03, 0xf8, 0x38, 0xbd, - 0xe9, 0xf6, 0x17, 0xbd, 0x1a, 0x19, 0x3b, 0x3e, 0x43, 0xb1, 0xdb, 0x3c, - 0xc5, 0x5b, 0x1e, 0xbb, 0xcc, 0x9b, 0x00, 0xbe, 0x01, 0xe4, 0xe4, 0xba, - 0xe5, 0x8d, 0x26, 0x3e, 0x4b, 0x09, 0x0a, 0xbc, 0x50, 0x4e, 0xe0, 0xbe, - 0xe3, 0x93, 0xf3, 0xbc, 0xe8, 0xe9, 0x20, 0x3d, 0x23, 0xa7, 0xe2, 0x3c, - 0xe2, 0x05, 0xa7, 0x3d, 0xd4, 0xda, 0x29, 0xbd, 0xb3, 0x43, 0xa7, 0xbc, - 0x28, 0x61, 0x0d, 0xbd, 0x7e, 0x55, 0xa7, 0x3d, 0x5f, 0x27, 0x3f, 0x3e, - 0x12, 0x19, 0xca, 0x3b, 0xc9, 0x89, 0x0b, 0xbd, 0x57, 0x99, 0x33, 0xbd, - 0x61, 0x8f, 0xda, 0xbc, 0x6a, 0x54, 0x5a, 0x3e, 0x31, 0xeb, 0x2b, 0x3d, - 0x8c, 0x95, 0x97, 0xbe, 0x5b, 0x2d, 0x85, 0x3e, 0x49, 0x3f, 0xf4, 0xbc, - 0x20, 0xbb, 0x62, 0x3c, 0x01, 0x69, 0xae, 0xbd, 0xe1, 0x2c, 0x43, 0xbe, - 0xe9, 0x5d, 0x84, 0x3d, 0xb3, 0x61, 0x17, 0x3e, 0x47, 0x07, 0x95, 0xbc, - 0xcd, 0x7c, 0x87, 0x3e, 0xd9, 0xb3, 0x03, 0x3d, 0x1c, 0x7e, 0x15, 0x3d, - 0xe1, 0x0b, 0xb0, 0xbd, 0x23, 0xfe, 0x94, 0x3c, 0xf0, 0x36, 0xd7, 0x3d, - 0x9e, 0x2b, 0x82, 0x3c, 0x78, 0x43, 0x9b, 0xbe, 0xf9, 0x5d, 0x0c, 0xbe, - 0x07, 0x45, 0xda, 0x3d, 0x79, 0x36, 0x19, 0x3d, 0x49, 0xff, 0xbc, 0x3c, - 0xd6, 0x6e, 0xec, 0xbe, 0x6c, 0xb2, 0xd5, 0x3c, 0x2a, 0xb0, 0x92, 0x3b, - 0x45, 0x87, 0x3a, 0x3e, 0xd3, 0xe5, 0xb8, 0xbd, 0x92, 0x1a, 0x2e, 0x3c, - 0x9b, 0x33, 0x3c, 0x3e, 0x0f, 0x3d, 0xa8, 0xbe, 0x53, 0x7c, 0xa6, 0x3d, - 0x3b, 0x9e, 0x98, 0xbe, 0x96, 0x91, 0xd6, 0x3c, 0x71, 0x5b, 0x99, 0xbe, - 0x73, 0x0b, 0x04, 0x3e, 0xfa, 0x8a, 0xc0, 0x3d, 0x7f, 0x1b, 0xdd, 0x3d, - 0xe4, 0x01, 0x84, 0xbd, 0xcf, 0x63, 0xdb, 0xbd, 0xda, 0x5d, 0x8d, 0xbd, - 0x44, 0xe1, 0x46, 0xbd, 0x65, 0x6c, 0x05, 0xbe, 0x0a, 0x83, 0xb1, 0xbd, - 0x97, 0x4a, 0x59, 0xbe, 0x77, 0x26, 0xa7, 0x3d, 0x0d, 0x22, 0xea, 0xbd, - 0x70, 0x48, 0x14, 0xbe, 0x01, 0x31, 0x04, 0x3e, 0xe0, 0x5e, 0xb8, 0xbd, - 0xd3, 0xe3, 0xee, 0xbd, 0x4e, 0x6f, 0xc4, 0xbc, 0x2f, 0xab, 0x53, 0x3d, - 0xd2, 0x79, 0x2c, 0xbe, 0xea, 0x5e, 0xdb, 0xbd, 0x02, 0x40, 0x4d, 0xbd, - 0xcf, 0x47, 0x5d, 0xbd, 0x1e, 0x48, 0x97, 0xbd, 0x7c, 0x3b, 0xca, 0x3d, - 0x75, 0x1d, 0x43, 0xbe, 0xb7, 0xab, 0x86, 0x3b, 0xfa, 0x51, 0xe2, 0x3b, - 0xcc, 0x0c, 0x1a, 0xbe, 0xda, 0x56, 0xc0, 0x3d, 0xd2, 0xa5, 0x6b, 0xbd, - 0x46, 0xe8, 0x27, 0xbe, 0x95, 0x71, 0x4e, 0xbe, 0x78, 0xda, 0xb0, 0x3d, - 0xec, 0xfd, 0x31, 0xbe, 0x5f, 0xb5, 0x44, 0xbe, 0x2b, 0x48, 0x06, 0xbe, - 0x28, 0x5c, 0xf4, 0xbd, 0x1a, 0xb1, 0xa3, 0x3c, 0x77, 0xd6, 0xef, 0xbd, - 0xec, 0xe1, 0x93, 0xbd, 0x85, 0xb2, 0xcd, 0xbd, 0xf8, 0x0b, 0x52, 0xbd, - 0x16, 0x95, 0xd7, 0x3c, 0xb2, 0x00, 0x29, 0x3c, 0x42, 0x8c, 0xb6, 0x3d, - 0xa8, 0x79, 0x1f, 0xbe, 0xa5, 0xfe, 0xe8, 0xbd, 0x28, 0x30, 0xb8, 0x3d, - 0xb8, 0x23, 0x9e, 0x3d, 0x7f, 0xe1, 0x33, 0xbe, 0x2b, 0xf8, 0x3f, 0xbe, - 0x05, 0x8c, 0x70, 0xbd, 0x30, 0x32, 0xe0, 0xbd, 0xff, 0xd3, 0x45, 0xbe, - 0x29, 0x82, 0x33, 0xbc, 0x2b, 0x86, 0x13, 0xbe, 0x0b, 0x81, 0x07, 0xbd, - 0xb1, 0xd4, 0xa8, 0x3c, 0x42, 0xd6, 0x2d, 0xbc, 0xfc, 0x19, 0x33, 0xba, - 0xd5, 0xf7, 0x29, 0xbe, 0xff, 0xb9, 0x18, 0xbe, 0x34, 0x96, 0x36, 0xbe, - 0x8d, 0x80, 0xcc, 0xbd, 0x55, 0x1f, 0xe9, 0xbc, 0xa1, 0xdd, 0x69, 0xbe, - 0xd3, 0x86, 0xb4, 0x3c, 0x3a, 0xc2, 0x0f, 0xbe, 0xc0, 0x63, 0xcd, 0xbc, - 0xcb, 0xf8, 0xcf, 0xbd, 0x45, 0x7f, 0x5f, 0x3d, 0x95, 0x59, 0xbd, 0x3d, - 0x7b, 0x9c, 0xf0, 0xbd, 0x57, 0xaf, 0xfb, 0x3c, 0xad, 0x44, 0xaf, 0xbd, - 0xa5, 0xf3, 0xbc, 0xbd, 0xb4, 0xe1, 0x59, 0xbd, 0xa6, 0x28, 0x29, 0x3d, - 0xcb, 0x8b, 0x50, 0xbe, 0x20, 0x85, 0x95, 0xbd, 0x33, 0xcf, 0xfa, 0xbb, - 0xde, 0xfc, 0x1c, 0x3d, 0x91, 0xb6, 0x43, 0xbe, 0x54, 0x84, 0xaf, 0xbd, - 0xdc, 0xde, 0x04, 0xbe, 0x69, 0xc6, 0x19, 0xbe, 0x43, 0xcf, 0x23, 0xbd, - 0x77, 0x3b, 0x58, 0xbe, 0x50, 0x09, 0x50, 0xbd, 0x17, 0xa2, 0x2d, 0xbd, - 0xe0, 0xad, 0xb5, 0xba, 0x47, 0x9d, 0xcc, 0x3d, 0x06, 0x72, 0xe3, 0x3d, - 0x92, 0x81, 0x9f, 0x3c, 0x55, 0x1d, 0x06, 0xbe, 0xa0, 0x79, 0x9c, 0x3c, - 0xe1, 0xec, 0xe6, 0xbd, 0x63, 0x0c, 0x9a, 0xbd, 0xc1, 0x82, 0x5e, 0xbe, - 0x01, 0x4c, 0x38, 0xbe, 0x64, 0x06, 0x52, 0xbd, 0xd1, 0x54, 0x08, 0xbe, - 0x0c, 0xb8, 0xc2, 0x3d, 0x5a, 0xd2, 0xb4, 0x3d, 0x84, 0xcb, 0x24, 0xbe, - 0x80, 0xb4, 0x8f, 0x3c, 0x84, 0x69, 0x0c, 0xbe, 0x3d, 0xda, 0x05, 0xbe, - 0x4c, 0x48, 0x0c, 0x3e, 0xdc, 0x35, 0xcf, 0xbc, 0x80, 0x1b, 0x0b, 0xbe, - 0xaa, 0x3a, 0x9c, 0xbc, 0x21, 0xd4, 0x83, 0x3d, 0x26, 0x6e, 0xe8, 0x3d, - 0xe4, 0x41, 0x28, 0x3d, 0x88, 0x35, 0x1d, 0xbe, 0x2c, 0xfb, 0xb2, 0x3d, - 0xe4, 0xbb, 0x0a, 0xbe, 0x95, 0x00, 0xfe, 0xbd, 0x3d, 0x87, 0x89, 0x3c, - 0x19, 0x9f, 0xa0, 0xbc, 0xac, 0xce, 0x6f, 0xbd, 0x16, 0x48, 0x21, 0xbe, - 0xd8, 0x21, 0x13, 0xbe, 0x15, 0x49, 0xfc, 0xbd, 0x6c, 0x10, 0x31, 0x3e, - 0x93, 0x04, 0xa2, 0xbd, 0xbc, 0xce, 0xbe, 0xbd, 0x59, 0xce, 0x51, 0xbd, - 0xd6, 0xf1, 0x60, 0x3d, 0x3a, 0x92, 0x76, 0xbd, 0xb8, 0xef, 0x66, 0x3b, - 0x26, 0x2c, 0x8e, 0x3d, 0xf1, 0xff, 0x1e, 0xbe, 0xc2, 0x6f, 0x26, 0x3d, - 0xe7, 0xb2, 0x4e, 0xbe, 0x31, 0x1e, 0xc4, 0xbd, 0x2f, 0x0a, 0x81, 0x3c, - 0xb9, 0x73, 0xea, 0xbd, 0x41, 0xdf, 0x4b, 0xbd, 0x9d, 0x47, 0x88, 0xbd, - 0xab, 0x21, 0x68, 0xbd, 0x77, 0x20, 0xf9, 0xbc, 0x40, 0x55, 0xaa, 0x3d, - 0x26, 0xe1, 0xf9, 0xbd, 0x97, 0xbe, 0xdd, 0x3d, 0x57, 0xe8, 0x91, 0xbd, - 0x00, 0xbb, 0x3e, 0xbe, 0x22, 0x51, 0x91, 0xbd, 0x6b, 0xe6, 0xa1, 0x3d, - 0x7d, 0xf1, 0xa6, 0x3d, 0xa9, 0x89, 0x86, 0x3d, 0x57, 0x91, 0xef, 0xbd, - 0xcb, 0x06, 0x01, 0xba, 0x9d, 0xc0, 0x4a, 0x3d, 0x71, 0xaf, 0x35, 0xbe, - 0x01, 0x3d, 0x16, 0xbc, 0x01, 0xa4, 0x81, 0xbd, 0xa4, 0xf6, 0x2e, 0xbd, - 0xb7, 0xe9, 0x0d, 0xbd, 0x94, 0xef, 0x26, 0xbe, 0xee, 0x31, 0x20, 0xbe, - 0x43, 0x8a, 0x30, 0x3d, 0x09, 0xa3, 0xb1, 0xbd, 0x20, 0xb8, 0x11, 0x3c, - 0x55, 0x7c, 0x1e, 0x3d, 0xbd, 0x60, 0x4f, 0xbe, 0x05, 0x06, 0xa3, 0x3c, - 0x92, 0x48, 0xfa, 0xbb, 0x6a, 0x53, 0x0a, 0xbe, 0xd5, 0x01, 0x19, 0xbc, - 0x69, 0xf6, 0x2a, 0xbe, 0xf9, 0xbe, 0x08, 0xbe, 0x4b, 0x17, 0x49, 0x3c, - 0xb4, 0x10, 0x79, 0x3d, 0x4f, 0xb1, 0xf2, 0xbc, 0xc4, 0x6b, 0x8a, 0x3c, - 0x6c, 0xa7, 0x35, 0xbe, 0xe2, 0xfb, 0xe0, 0xbd, 0xf3, 0xc0, 0x2a, 0xbd, - 0xe6, 0x47, 0xbd, 0xbd, 0xc3, 0x30, 0x66, 0xbe, 0xfb, 0x2d, 0x35, 0x3d, - 0x13, 0xd6, 0xad, 0xbd, 0x7f, 0xd2, 0x01, 0xbe, 0x9e, 0xe1, 0x57, 0xbd, - 0x8c, 0x02, 0xe2, 0x3c, 0x21, 0x90, 0x11, 0xbe, 0x56, 0x8f, 0xab, 0x3d, - 0xba, 0x5b, 0xdc, 0x3d, 0xaa, 0x5e, 0x77, 0xbe, 0x1c, 0xc9, 0x64, 0x3d, - 0xfa, 0xf6, 0xd4, 0x3b, 0x72, 0x3d, 0x4a, 0x3d, 0x8c, 0xd5, 0x34, 0xbe, - 0x32, 0x30, 0xa8, 0x3b, 0x60, 0x0c, 0x8e, 0x3c, 0x7b, 0xc7, 0x30, 0x3d, - 0x86, 0x51, 0xb9, 0xbb, 0xed, 0x50, 0x0e, 0x3e, 0xb3, 0x70, 0x8a, 0xbc, - 0xc6, 0x3a, 0x1d, 0xbe, 0x77, 0x4d, 0x09, 0xbe, 0xb7, 0x5b, 0x39, 0xbd, - 0x23, 0xc9, 0x94, 0x3d, 0x8c, 0x6b, 0x7d, 0xbd, 0xc7, 0x7e, 0x45, 0xbe, - 0xf7, 0x39, 0xb8, 0xbd, 0x22, 0x46, 0x41, 0xbe, 0x9c, 0xcc, 0x64, 0x3c, - 0x97, 0xae, 0x94, 0xbd, 0xf9, 0x00, 0x8e, 0xbd, 0x34, 0xd3, 0xae, 0xbd, - 0x95, 0x7e, 0x4c, 0x3d, 0x16, 0x3f, 0x81, 0x3d, 0x77, 0x7e, 0x9b, 0xbc, - 0x47, 0x7b, 0x87, 0xbc, 0xb8, 0xc1, 0x14, 0xbe, 0x48, 0x64, 0xff, 0xbd, - 0x41, 0x09, 0xe2, 0xbc, 0xcb, 0x02, 0x2d, 0xbd, 0x52, 0x57, 0x26, 0xba, - 0x2b, 0x92, 0x83, 0xbd, 0x12, 0x88, 0x81, 0xbe, 0x11, 0x87, 0xe4, 0x3d, - 0xf6, 0x25, 0x51, 0xbe, 0xd5, 0x2d, 0xe9, 0xbd, 0xca, 0xc7, 0x6b, 0xbe, - 0x20, 0x33, 0x42, 0x3c, 0xfd, 0x3b, 0x54, 0xbe, 0xcc, 0x6d, 0x18, 0xbd, - 0x50, 0x31, 0x1f, 0xbe, 0x15, 0x5a, 0x48, 0x3e, 0x6a, 0xa8, 0x1e, 0x3e, - 0x1c, 0x72, 0x9d, 0xbe, 0xc2, 0xcf, 0x19, 0x3e, 0xda, 0x99, 0x3e, 0x3d, - 0x33, 0x9c, 0x84, 0xbf, 0xc3, 0xf1, 0x19, 0x3d, 0x3f, 0xf7, 0x24, 0xbd, - 0x29, 0x2a, 0xf7, 0x3e, 0x48, 0xf5, 0x48, 0xbe, 0xf4, 0xbc, 0xf4, 0xbd, - 0xed, 0x44, 0x7f, 0xbd, 0x3b, 0x94, 0x8a, 0x3e, 0xcd, 0x23, 0x5d, 0xbc, - 0x58, 0xdb, 0x8b, 0x3e, 0xe7, 0x74, 0xac, 0xbe, 0x6e, 0x53, 0x84, 0xbd, - 0x18, 0x4a, 0x4a, 0x3e, 0x96, 0x8c, 0xf1, 0x3d, 0xae, 0xc0, 0x5b, 0xbc, - 0x92, 0x87, 0x52, 0x3e, 0x51, 0xcd, 0x55, 0xbd, 0x2d, 0x96, 0x0f, 0x3d, - 0xba, 0xee, 0x95, 0xbc, 0x79, 0xf2, 0x32, 0x3e, 0x9a, 0x6c, 0xd8, 0xbe, - 0x67, 0xfd, 0x06, 0xbe, 0x47, 0x20, 0xd5, 0xbc, 0x67, 0x20, 0xf5, 0x3d, - 0xb6, 0x84, 0xbb, 0x3b, 0x20, 0x91, 0x3a, 0x3e, 0x86, 0x8c, 0x2b, 0xbe, - 0x94, 0x2b, 0xeb, 0x3d, 0x57, 0xbd, 0x17, 0x3e, 0x8f, 0x5f, 0x18, 0xbc, - 0x7d, 0x8f, 0x65, 0xbd, 0x99, 0x37, 0xc3, 0x3e, 0x04, 0x8c, 0xa8, 0xbd, - 0x8a, 0x8d, 0xd3, 0x3d, 0xdc, 0x19, 0xa9, 0xbd, 0x92, 0x13, 0x84, 0x3d, - 0x91, 0xb8, 0xa3, 0xbe, 0x7b, 0x31, 0x87, 0xbd, 0x5b, 0xf4, 0x29, 0xbb, - 0x99, 0x9a, 0x44, 0x3e, 0x7a, 0x99, 0x26, 0xbd, 0xe1, 0xd1, 0x03, 0x3e, - 0x37, 0xac, 0xa1, 0x3c, 0x46, 0xe3, 0x0d, 0x3e, 0xcc, 0xad, 0x96, 0x3d, - 0x34, 0xab, 0xf9, 0xbd, 0xcb, 0x7e, 0x36, 0xbe, 0x75, 0xa7, 0x8c, 0x3e, - 0x65, 0x58, 0x41, 0xbe, 0x12, 0x45, 0xa3, 0xba, 0xd5, 0x51, 0xe2, 0xbc, - 0xac, 0x2c, 0xc8, 0xbc, 0x8a, 0x1d, 0x70, 0xbe, 0x58, 0xb0, 0x65, 0x3c, - 0x00, 0x29, 0xdc, 0x3d, 0xf7, 0x94, 0x9d, 0x3e, 0x24, 0xfa, 0x84, 0xbd, - 0xa0, 0x06, 0xfe, 0x3d, 0x84, 0x08, 0x10, 0xbd, 0xf0, 0x0a, 0xc5, 0x3d, - 0xd4, 0xf2, 0xd3, 0x3c, 0xfd, 0xa3, 0xd5, 0xbd, 0xac, 0x95, 0x4e, 0xbb, - 0x0a, 0x6d, 0x99, 0x3e, 0x5a, 0x84, 0x1d, 0x3c, 0x56, 0x76, 0x8c, 0xbc, - 0xa3, 0xff, 0xa8, 0xbc, 0xb2, 0x9d, 0x4b, 0x3c, 0xe3, 0x87, 0x8b, 0xbe, - 0x30, 0xe9, 0xe6, 0xbd, 0x97, 0xf3, 0xef, 0xbc, 0x67, 0x40, 0x9f, 0x3e, - 0x7e, 0x95, 0x9c, 0xbd, 0xa1, 0xd7, 0xf4, 0x3d, 0x14, 0x05, 0x44, 0xbd, - 0x32, 0x50, 0x40, 0x3e, 0x7f, 0x4f, 0x0e, 0xbe, 0x24, 0xb4, 0x35, 0xbd, - 0xbb, 0x01, 0x13, 0xbe, 0x75, 0x97, 0x72, 0x3e, 0x72, 0xb5, 0xc4, 0xbc, - 0x2d, 0x03, 0xa3, 0xbe, 0x30, 0x9c, 0x85, 0xbd, 0xe9, 0x8a, 0xdd, 0x3d, - 0x66, 0x85, 0xe1, 0xbd, 0x00, 0x78, 0x16, 0xbe, 0xa6, 0xe0, 0x5d, 0xbd, - 0x39, 0xa7, 0x61, 0x3e, 0x40, 0xe9, 0xfa, 0xbd, 0x03, 0x1a, 0x78, 0x3e, - 0xae, 0x8a, 0x10, 0xbe, 0xff, 0x69, 0x73, 0x3d, 0x83, 0xc1, 0xd1, 0xbd, - 0xe9, 0xdc, 0x01, 0xbe, 0xef, 0xa7, 0x5f, 0x3d, 0x1d, 0xe3, 0x3f, 0x3e, - 0xe2, 0x74, 0x36, 0x3d, 0xda, 0xb4, 0x5d, 0xbe, 0xdf, 0x67, 0x56, 0xbd, - 0x3b, 0xe8, 0xca, 0x3d, 0xdb, 0x14, 0x21, 0xbe, 0x26, 0x0e, 0x21, 0xbe, - 0x70, 0xee, 0xce, 0xbd, 0xce, 0xd1, 0x8d, 0x3e, 0xf7, 0x98, 0xdb, 0xbd, - 0x76, 0xd8, 0x78, 0x3d, 0xd9, 0xc5, 0x25, 0xbe, 0x7b, 0x1e, 0x97, 0x3d, - 0x36, 0x31, 0x11, 0xbe, 0x1b, 0x15, 0x09, 0xbe, 0x20, 0xa6, 0x0b, 0xbb, - 0x25, 0xa1, 0xa0, 0x3e, 0x1b, 0xb2, 0xbb, 0xbd, 0x6d, 0x78, 0x9f, 0xbe, - 0xdf, 0xfb, 0x6a, 0xbd, 0xae, 0xdc, 0xc3, 0xbd, 0xb2, 0xe1, 0x8a, 0xbe, - 0x5a, 0x86, 0x90, 0xbd, 0x36, 0x2c, 0x91, 0xbd, 0xa1, 0x4f, 0x8b, 0x3e, - 0xef, 0x10, 0x11, 0xbd, 0x4e, 0xcc, 0xa8, 0x3d, 0x54, 0xf0, 0x7b, 0xbe, - 0x28, 0x40, 0x60, 0x3d, 0xa8, 0x9b, 0x00, 0x3d, 0xcf, 0xce, 0x28, 0xbd, - 0x3c, 0x31, 0x48, 0x3d, 0x41, 0xa1, 0xa4, 0x3e, 0xa7, 0x2a, 0x8b, 0xbe, - 0xdf, 0xd5, 0xf4, 0xbd, 0xac, 0xc1, 0xa2, 0x3b, 0xc4, 0x44, 0xcf, 0xbc, - 0x4f, 0x37, 0x5b, 0xbe, 0x67, 0xc2, 0x4c, 0xbd, 0x8b, 0x10, 0xb2, 0x3c, - 0x50, 0xe3, 0x26, 0x3e, 0xf8, 0x5e, 0xc2, 0x3d, 0x03, 0x12, 0x05, 0x3e, - 0x62, 0xbd, 0x1f, 0xbe, 0x1c, 0xec, 0xfe, 0x3d, 0x6e, 0x47, 0x50, 0x3d, - 0x60, 0x32, 0x89, 0x3d, 0xac, 0x39, 0x92, 0xbd, 0x23, 0x38, 0x3c, 0x3e, - 0x5f, 0x1e, 0x76, 0x3c, 0x94, 0xe2, 0x19, 0xbd, 0x7a, 0xd7, 0xc8, 0xbc, - 0xd8, 0xe3, 0x91, 0x3b, 0x0d, 0x26, 0x99, 0xbe, 0x2a, 0xad, 0x5d, 0xbe, - 0x94, 0x8e, 0x89, 0x3c, 0x4f, 0x99, 0xe6, 0x3d, 0x38, 0xd7, 0x98, 0x3b, - 0xe2, 0x9d, 0x12, 0x3e, 0xe8, 0xb1, 0x8e, 0xbe, 0x2c, 0x89, 0x28, 0x3e, - 0x8f, 0xd0, 0x3e, 0x3e, 0x22, 0x07, 0x50, 0xbd, 0x01, 0x49, 0xb5, 0xbd, - 0x06, 0x7e, 0x76, 0x3e, 0xaa, 0x8b, 0xa0, 0xbd, 0xa5, 0x43, 0x4a, 0xbc, - 0xe5, 0x64, 0x84, 0xbc, 0x7c, 0xb1, 0x36, 0x3d, 0xae, 0x00, 0xc6, 0xbe, - 0x7f, 0x17, 0x0a, 0xbe, 0xa0, 0x53, 0x3c, 0xbd, 0xb2, 0x43, 0xab, 0x3d, - 0xf2, 0xff, 0xcd, 0x3d, 0x42, 0xde, 0xb6, 0x3e, 0x36, 0x05, 0x07, 0xbf, - 0x2d, 0x47, 0x54, 0xbd, 0x73, 0xa0, 0x21, 0x3e, 0xc1, 0x61, 0x05, 0x3e, - 0x27, 0x6b, 0xcb, 0xbd, 0x9a, 0xe1, 0x5b, 0x3e, 0xfe, 0xdc, 0x33, 0xbd, - 0x12, 0x05, 0x2f, 0x3e, 0x84, 0x79, 0x28, 0xbc, 0x3e, 0x68, 0x66, 0xbe, - 0x7a, 0x0f, 0xb7, 0xbe, 0xcb, 0xff, 0xa6, 0xbe, 0xe1, 0x4c, 0x6c, 0x3d, - 0x10, 0x0f, 0xbe, 0x3e, 0x90, 0xef, 0xd3, 0xbd, 0x20, 0x98, 0x8e, 0x3e, - 0x3f, 0x83, 0xbb, 0xbe, 0x22, 0x0e, 0xc8, 0xbd, 0x5b, 0x94, 0x08, 0x3e, - 0xf7, 0x96, 0x9b, 0x3d, 0x53, 0x44, 0x46, 0xbd, 0x31, 0x47, 0xbc, 0x3d, - 0x4b, 0x9a, 0xee, 0xbd, 0x38, 0xf9, 0x35, 0x3d, 0xce, 0xb3, 0x3e, 0x3d, - 0x9b, 0x95, 0x95, 0x3d, 0xcd, 0x6f, 0xbf, 0xbe, 0x13, 0x43, 0x6a, 0xbe, - 0xa7, 0x4d, 0xe2, 0x3d, 0x76, 0xb4, 0x0f, 0x3f, 0x8e, 0x98, 0xa0, 0xbc, - 0x26, 0x91, 0x33, 0xbe, 0xc6, 0x43, 0x7c, 0xbe, 0xbe, 0x98, 0xd5, 0xbc, - 0x48, 0x72, 0x43, 0x3e, 0xf7, 0x74, 0x88, 0xbc, 0xc2, 0x58, 0xec, 0x3c, - 0xb6, 0x16, 0xa7, 0x3d, 0x17, 0x8c, 0x39, 0x3e, 0x84, 0xf5, 0x00, 0xbe, - 0xc4, 0xa8, 0xe2, 0x3d, 0x56, 0xc9, 0x22, 0x3e, 0xa1, 0x57, 0x96, 0xbe, - 0x06, 0x1c, 0x97, 0xbd, 0xda, 0x24, 0x82, 0x3d, 0xb1, 0xaf, 0x1e, 0xbc, - 0x4c, 0x3d, 0x7e, 0xbe, 0xca, 0xe6, 0xbc, 0x3d, 0xec, 0xd0, 0x93, 0xbd, - 0x13, 0x35, 0xeb, 0xbd, 0xbb, 0x88, 0x0e, 0xbd, 0xeb, 0x2b, 0xf4, 0x3c, - 0x13, 0x20, 0x46, 0x3e, 0x54, 0x9f, 0x78, 0xbd, 0x3d, 0x11, 0x44, 0xbd, - 0xb2, 0x3d, 0xe3, 0xbd, 0x3d, 0x61, 0xa8, 0x3d, 0xa9, 0xf8, 0x10, 0xbd, - 0xe2, 0xdc, 0x94, 0x3d, 0x14, 0x39, 0xde, 0xbd, 0x65, 0xa4, 0xde, 0xbd, - 0x72, 0xdd, 0x92, 0xbd, 0xb3, 0x05, 0x53, 0xbd, 0xcf, 0x8d, 0x1d, 0x3d, - 0x0a, 0x84, 0xa3, 0xbe, 0x5c, 0x03, 0x86, 0xbe, 0x16, 0xb6, 0xcb, 0xbe, - 0x30, 0x14, 0xfd, 0xbe, 0xe8, 0xfe, 0x3f, 0x3d, 0xec, 0x02, 0xc4, 0xbd, - 0x55, 0x4b, 0x99, 0x3c, 0xcb, 0xa5, 0x1e, 0xbd, 0xb4, 0x06, 0x82, 0x3d, - 0x2a, 0xd8, 0x92, 0xbe, 0x49, 0xea, 0xa1, 0xbd, 0x25, 0x78, 0x7b, 0xbe, - 0x34, 0xd6, 0xe8, 0xbd, 0xf3, 0x6a, 0x95, 0xbe, 0xde, 0x0c, 0xa1, 0xbd, - 0x81, 0xac, 0x78, 0x3b, 0x55, 0xa5, 0xd6, 0xbe, 0xfb, 0xfa, 0x65, 0xbe, - 0xd2, 0xa0, 0xb2, 0xbe, 0x01, 0x33, 0x4f, 0xbe, 0xc6, 0xa4, 0x8f, 0x3d, - 0xb4, 0x35, 0x72, 0x3d, 0xfd, 0xfb, 0xc4, 0xbc, 0x8a, 0xc5, 0x02, 0xbc, - 0x2e, 0x81, 0x1b, 0x3d, 0xf1, 0x88, 0x71, 0xbe, 0x10, 0xe7, 0x23, 0xbe, - 0xe5, 0xd4, 0x9b, 0xbd, 0x7e, 0x92, 0x9c, 0xbd, 0x32, 0x5d, 0xa5, 0xbe, - 0x3f, 0x12, 0x10, 0xbd, 0x06, 0xa8, 0xa4, 0xbc, 0x81, 0xe0, 0xb0, 0xbe, - 0x1a, 0xef, 0x5d, 0xbe, 0x15, 0x9b, 0xca, 0xbe, 0xa3, 0xb4, 0x36, 0xbe, - 0x80, 0x96, 0x54, 0xbd, 0x27, 0x41, 0xd4, 0xbc, 0xe1, 0x96, 0x7a, 0x3d, - 0xb0, 0xf1, 0xa4, 0x3d, 0x2e, 0x9f, 0x37, 0x3d, 0x69, 0x19, 0xa0, 0xbe, - 0xb0, 0xd8, 0xae, 0x3d, 0x42, 0x80, 0x0e, 0xbd, 0xbe, 0xbb, 0x8d, 0x3c, - 0x1f, 0x71, 0x93, 0xbe, 0xe5, 0x13, 0xa0, 0xbc, 0x55, 0xb8, 0xcd, 0xbc, - 0x55, 0xf0, 0xc0, 0x3b, 0xba, 0xe0, 0x2c, 0xbe, 0xa5, 0x38, 0xfc, 0xbe, - 0x03, 0x08, 0xdc, 0xbd, 0x8c, 0x72, 0x14, 0xbd, 0xef, 0xd9, 0x67, 0x3c, - 0xc9, 0x03, 0xe0, 0x3d, 0x69, 0xe2, 0xa0, 0x3d, 0xe1, 0x86, 0x06, 0x3d, - 0xa4, 0x52, 0x90, 0xbe, 0xda, 0x56, 0x29, 0xbc, 0x2b, 0x9c, 0xbd, 0x3d, - 0x12, 0xf7, 0xf3, 0xbc, 0x96, 0xad, 0x41, 0xbd, 0xb6, 0x4a, 0x10, 0xbd, - 0x7a, 0xee, 0xb5, 0xbd, 0x88, 0x83, 0xaa, 0x3d, 0xae, 0x03, 0xbd, 0xbe, - 0x4d, 0xaf, 0xe1, 0xbe, 0x32, 0x22, 0x4a, 0x3c, 0x6b, 0xa2, 0x90, 0xbd, - 0x7e, 0x81, 0x95, 0xbd, 0xc7, 0xe1, 0xbc, 0x3d, 0x56, 0x42, 0x7e, 0xbd, - 0xb4, 0xdb, 0xcb, 0x3d, 0xfe, 0x8e, 0x0e, 0xbf, 0x68, 0xe9, 0x60, 0x3e, - 0xea, 0x83, 0xce, 0x3c, 0x04, 0x08, 0x6d, 0xbb, 0xff, 0xb2, 0x38, 0x3d, - 0x26, 0xe2, 0x82, 0x3c, 0x71, 0x20, 0x10, 0xbe, 0x82, 0x64, 0x13, 0x3e, - 0xa7, 0x1a, 0xc6, 0xbe, 0x3e, 0xe8, 0xc7, 0xbe, 0x30, 0x1e, 0xd8, 0x3d, - 0x66, 0x87, 0x50, 0xbe, 0x5d, 0xbf, 0x4b, 0xbe, 0xf9, 0x9e, 0xb8, 0xbd, - 0x22, 0x9e, 0x04, 0x3d, 0x89, 0x8f, 0x7a, 0x3d, 0x4a, 0xd9, 0x15, 0xbe, - 0x4f, 0x77, 0x5e, 0x3e, 0xc0, 0x19, 0x08, 0x3d, 0xe0, 0xd6, 0x47, 0xbd, - 0xfb, 0x2b, 0xb6, 0x3d, 0x64, 0xa3, 0xf1, 0x3c, 0x36, 0xee, 0xd1, 0xbd, - 0x3c, 0x60, 0x60, 0x3d, 0x23, 0xae, 0x75, 0xbe, 0xc8, 0x00, 0x89, 0xbe, - 0xc4, 0x9c, 0x22, 0x3e, 0xc9, 0x29, 0x88, 0xbe, 0xd5, 0x6a, 0xc2, 0xbe, - 0x87, 0x71, 0xca, 0xbd, 0x76, 0x80, 0xa3, 0xbc, 0x84, 0xcf, 0xbc, 0xbd, - 0x4c, 0xac, 0x17, 0xbe, 0xaa, 0xd8, 0x91, 0x3e, 0xa9, 0x44, 0x52, 0x3c, - 0xc0, 0xee, 0xfa, 0xbd, 0x2c, 0x3b, 0x24, 0x3d, 0xc8, 0x0a, 0x8c, 0x3d, - 0x37, 0x10, 0x07, 0x3d, 0x98, 0x78, 0xdf, 0x3d, 0x0c, 0xe2, 0xe5, 0xbd, - 0x2c, 0x38, 0x34, 0xbe, 0xe5, 0x49, 0xb7, 0xbd, 0xc7, 0xcf, 0xd8, 0xbe, - 0x54, 0xf3, 0x6e, 0xbe, 0x2d, 0xbc, 0x19, 0xbe, 0xe4, 0x0f, 0x8d, 0x3d, - 0xf1, 0x48, 0xdc, 0xbd, 0xa2, 0x21, 0xdc, 0x3c, 0x86, 0x4c, 0x9d, 0x3e, - 0x93, 0xcd, 0xe7, 0x3d, 0x30, 0x77, 0xbf, 0xbd, 0xe0, 0xd2, 0x9f, 0xbc, - 0x55, 0x3a, 0x8e, 0x3d, 0xf2, 0x3f, 0x6e, 0xbe, 0xfc, 0xb4, 0x96, 0x3d, - 0xf3, 0xfe, 0xa1, 0xbe, 0x80, 0xf7, 0xfc, 0xbd, 0x34, 0xd4, 0x6c, 0x3d, - 0x31, 0x5b, 0x44, 0xbe, 0xcc, 0x50, 0x45, 0xbe, 0xd0, 0x2b, 0xf4, 0xbd, - 0xf7, 0x13, 0x02, 0xbe, 0x60, 0x08, 0xd7, 0xbd, 0xe5, 0x7e, 0x95, 0x3e, - 0xed, 0x1e, 0x7e, 0x3e, 0x30, 0x5f, 0x10, 0x3d, 0xc7, 0xf2, 0x47, 0xbe, - 0x69, 0x3c, 0x8e, 0x3b, 0xf1, 0xee, 0x51, 0xbd, 0x8e, 0x09, 0x41, 0xbe, - 0x6f, 0x3e, 0xbf, 0x3d, 0x30, 0x8d, 0x09, 0xbe, 0xc1, 0xa9, 0x19, 0xbe, - 0xa7, 0xb8, 0x96, 0xbd, 0x6c, 0xd8, 0x82, 0x3c, 0x45, 0x45, 0x3d, 0xbd, - 0x81, 0xb8, 0x4f, 0xbe, 0xd5, 0xe1, 0x32, 0x3d, 0x26, 0x85, 0x51, 0xbc, - 0x86, 0x09, 0x8b, 0x3e, 0xa3, 0x45, 0x87, 0x3e, 0x50, 0xeb, 0x52, 0x3e, - 0x17, 0xe7, 0x97, 0x3c, 0xc6, 0x63, 0x18, 0xbe, 0x34, 0xff, 0xd4, 0xbc, - 0xf9, 0xdc, 0xe5, 0xbe, 0x4c, 0x05, 0x86, 0x3d, 0xee, 0x91, 0xf2, 0xbc, - 0x9f, 0x83, 0xfa, 0xbc, 0x02, 0x38, 0x9e, 0xbd, 0x7c, 0x8a, 0xfe, 0xbc, - 0x9a, 0x13, 0x02, 0xbe, 0xc3, 0xcf, 0x08, 0xbe, 0x49, 0xfb, 0x0d, 0x3d, - 0x17, 0xf6, 0x29, 0xbd, 0x88, 0x7a, 0xdd, 0x3e, 0x6f, 0x3b, 0x01, 0x3e, - 0x19, 0xdd, 0x5b, 0x3e, 0x47, 0xbc, 0x19, 0xbd, 0x43, 0xc4, 0x9a, 0xbd, - 0xdd, 0x16, 0x82, 0x3d, 0xd4, 0x08, 0x89, 0xbe, 0x1f, 0xb5, 0xd8, 0xbe, - 0x5a, 0x44, 0x69, 0x3d, 0x35, 0x15, 0x60, 0xbe, 0x06, 0x34, 0x9e, 0xbe, - 0xff, 0x5a, 0xa1, 0x3e, 0x61, 0x69, 0x86, 0xbe, 0x90, 0xee, 0x8a, 0xbd, - 0x4a, 0x55, 0x36, 0xbe, 0x43, 0x71, 0x57, 0x3d, 0xaa, 0xcb, 0x0e, 0xbf, - 0xb4, 0x91, 0x35, 0xbf, 0x6f, 0x5b, 0x0f, 0xbf, 0x40, 0x82, 0x03, 0x3e, - 0x07, 0x78, 0x98, 0xbe, 0xc2, 0x15, 0x90, 0xbe, 0xf9, 0x72, 0xaa, 0xbe, - 0xe2, 0x17, 0x9c, 0xbe, 0x08, 0x3e, 0xa9, 0x3d, 0x9f, 0xae, 0x1a, 0xbf, - 0xf0, 0xf7, 0x28, 0xbf, 0xe1, 0x8b, 0xfc, 0x3d, 0x5b, 0xb5, 0xec, 0xbe, - 0x64, 0x14, 0xa9, 0x3c, 0x5f, 0x3a, 0xdb, 0xbc, 0xc2, 0xc8, 0xd3, 0xbd, - 0x1d, 0xa2, 0x52, 0xbf, 0x8d, 0x06, 0x85, 0x3d, 0x59, 0xfc, 0xaf, 0xbe, - 0xab, 0xa1, 0xb0, 0x3c, 0x4d, 0xa0, 0x2f, 0xbe, 0x04, 0x1f, 0xd3, 0xbb, - 0x20, 0x49, 0xe2, 0xbc, 0x12, 0xc7, 0x02, 0x3d, 0x76, 0x89, 0x31, 0x3e, - 0xb6, 0x83, 0x48, 0x3d, 0x40, 0xc8, 0xcf, 0xbe, 0xa3, 0x9e, 0xdb, 0x3d, - 0x2f, 0x47, 0x2a, 0x3e, 0xa8, 0xf7, 0x8d, 0xbf, 0xfc, 0x3a, 0x25, 0x3d, - 0x82, 0x9c, 0xb3, 0xbd, 0xe4, 0x25, 0x05, 0x3e, 0x56, 0x7d, 0xc0, 0xbe, - 0x7c, 0xec, 0x9f, 0x3d, 0x48, 0x59, 0x8e, 0xbd, 0xcd, 0x4b, 0x91, 0x3e, - 0x53, 0x6e, 0x0d, 0x3e, 0x91, 0x99, 0x93, 0x3e, 0x54, 0x9c, 0x16, 0xbe, - 0x68, 0x8f, 0xbf, 0x3d, 0xbd, 0xef, 0xa1, 0x3d, 0xc4, 0x83, 0xcf, 0x3b, - 0xd6, 0xc4, 0xc1, 0xbe, 0x11, 0x47, 0x3e, 0x3e, 0x37, 0x5c, 0xf5, 0xbd, - 0xc2, 0x27, 0xdb, 0x3d, 0x5d, 0x79, 0x97, 0xbd, 0x98, 0xf2, 0x0c, 0x3d, - 0x6c, 0xa6, 0xaa, 0xbe, 0xd9, 0x30, 0xb5, 0xbd, 0x90, 0x05, 0x01, 0x3a, - 0xf1, 0x8f, 0x35, 0x3e, 0x82, 0x5d, 0xfd, 0xbc, 0xc7, 0xf8, 0x54, 0x3e, - 0x9e, 0xe1, 0xd8, 0xbc, 0xba, 0x9d, 0x23, 0x3e, 0xc5, 0x87, 0x06, 0x3e, - 0x6b, 0xe7, 0xee, 0x3c, 0xae, 0x75, 0x2a, 0xbe, 0x9b, 0xf5, 0x2c, 0x3e, - 0xc5, 0x58, 0x9d, 0xbc, 0x4b, 0x3b, 0xbb, 0x3c, 0x96, 0x33, 0x7a, 0x3c, - 0xd7, 0xd3, 0xab, 0xbe, 0x33, 0x03, 0x5e, 0xbe, 0x24, 0xe4, 0x2c, 0x3d, - 0xc6, 0x66, 0x09, 0xbc, 0x6f, 0x16, 0x8e, 0x3e, 0xd1, 0x41, 0x07, 0x3b, - 0x25, 0xcc, 0x40, 0x3e, 0xe2, 0x18, 0x0f, 0x3d, 0x15, 0xe8, 0x82, 0x3e, - 0x41, 0x69, 0xdb, 0x3d, 0xe1, 0x27, 0x37, 0xbe, 0x00, 0x5b, 0x9d, 0xbe, - 0xcb, 0xdb, 0x40, 0x3e, 0xe1, 0x3c, 0xea, 0xbb, 0x0e, 0x61, 0x30, 0xbc, - 0xba, 0x37, 0x95, 0x3d, 0xfd, 0x17, 0xf3, 0xbe, 0xc2, 0x74, 0x50, 0xbe, - 0x9a, 0x7a, 0x7e, 0xbd, 0x30, 0x34, 0xc3, 0x3d, 0x54, 0xe2, 0xc3, 0x3e, - 0x6d, 0xa6, 0x9a, 0x3d, 0xaf, 0xa2, 0x88, 0x3e, 0xd8, 0x11, 0x71, 0x3d, - 0x4b, 0xa8, 0x26, 0x3e, 0xb8, 0x2f, 0xeb, 0x3b, 0x65, 0x63, 0x2b, 0xbd, - 0x92, 0xd3, 0x37, 0x3c, 0x95, 0xf0, 0xa6, 0x3e, 0x00, 0x6a, 0x15, 0xbe, - 0x36, 0x55, 0x37, 0x3d, 0x1d, 0x59, 0x4d, 0x3d, 0x7c, 0xdf, 0x91, 0xbe, - 0xa2, 0x6b, 0x6f, 0xbe, 0x27, 0x4b, 0xe2, 0x3c, 0x1a, 0x00, 0x50, 0xbc, - 0xe9, 0x40, 0x9e, 0x3e, 0x99, 0xaa, 0x01, 0xbe, 0xc2, 0x58, 0x70, 0xbb, - 0x83, 0x8c, 0xd9, 0x3c, 0x0d, 0x3e, 0xe6, 0x3d, 0xf1, 0x6f, 0x81, 0x3d, - 0xcb, 0x83, 0x7b, 0x3c, 0x78, 0xff, 0x3b, 0xbd, 0xe1, 0xae, 0x79, 0x3e, - 0x27, 0x72, 0x85, 0xbc, 0x72, 0xb1, 0x0a, 0xbe, 0xda, 0xc6, 0x8a, 0x3b, - 0x6b, 0xe5, 0x14, 0xbe, 0xaf, 0x70, 0x9e, 0xbd, 0xa2, 0xba, 0xc3, 0x3d, - 0x3e, 0x64, 0x10, 0xbd, 0x6c, 0xe2, 0x94, 0x3e, 0x65, 0x6a, 0x08, 0xbd, - 0x70, 0x66, 0x9a, 0x3e, 0x9e, 0x96, 0xd4, 0xbd, 0xb3, 0xcd, 0x70, 0x3e, - 0x93, 0x7a, 0x0f, 0x3e, 0x8c, 0x94, 0xfa, 0xbd, 0xa4, 0x90, 0x63, 0xbd, - 0xcb, 0x07, 0x88, 0x3e, 0x06, 0xab, 0x48, 0x3d, 0x44, 0x5b, 0x3e, 0x3e, - 0x7f, 0xd6, 0x6c, 0xbb, 0x04, 0xed, 0xf0, 0xbc, 0xe4, 0xc3, 0x7c, 0xbe, - 0x26, 0x94, 0x2c, 0xbd, 0x4a, 0x27, 0xcc, 0x3c, 0xfc, 0xc2, 0x92, 0x3e, - 0x82, 0x1b, 0x27, 0xbd, 0x9a, 0xf4, 0x9b, 0x3e, 0x36, 0x53, 0x8b, 0xbe, - 0x0f, 0x9c, 0x8c, 0x3e, 0x2f, 0x68, 0x99, 0x3d, 0x20, 0x62, 0xaa, 0x3c, - 0xbb, 0x1e, 0x6f, 0xbc, 0x9d, 0x1f, 0x4b, 0x3e, 0x54, 0x7f, 0x45, 0xbe, - 0x9b, 0xba, 0xb5, 0x3c, 0x4b, 0x19, 0x2b, 0x3c, 0x99, 0xe6, 0xb7, 0x3c, - 0x0b, 0x45, 0x23, 0xbe, 0x78, 0xd6, 0x2a, 0x3d, 0x02, 0xe2, 0x07, 0x3e, - 0xd6, 0x26, 0x66, 0x3e, 0x3a, 0x28, 0x2b, 0xbe, 0x45, 0x77, 0x36, 0x3e, - 0x2e, 0xf3, 0xf6, 0xbd, 0xb9, 0x28, 0xea, 0x3d, 0x74, 0xdf, 0xdb, 0x3d, - 0x15, 0x69, 0x82, 0x3a, 0xe2, 0x89, 0xa1, 0x3c, 0x04, 0xfe, 0xb6, 0x3e, - 0xc2, 0x5b, 0x1c, 0x3e, 0x73, 0x7a, 0x08, 0xbd, 0x4d, 0x50, 0x0e, 0x3d, - 0xa0, 0x55, 0xb3, 0xbd, 0x18, 0x24, 0x1d, 0xbe, 0xbc, 0x5b, 0xd1, 0xbd, - 0xcc, 0xb0, 0xd9, 0x3d, 0xa1, 0x2c, 0x95, 0x3d, 0x79, 0x70, 0x08, 0xbe, - 0xeb, 0x53, 0x18, 0x3e, 0xc5, 0x8d, 0x43, 0xbd, 0x5d, 0xa3, 0x6e, 0x3d, - 0x66, 0x05, 0x0f, 0x3e, 0xa4, 0x01, 0x8b, 0xbd, 0x57, 0xb8, 0x47, 0xbd, - 0xe8, 0x97, 0x8a, 0x3e, 0x77, 0x52, 0x13, 0xbd, 0x40, 0xa2, 0xae, 0x3d, - 0x8f, 0xbc, 0xd4, 0x3a, 0x8b, 0x08, 0xd4, 0xbd, 0x56, 0x1a, 0x44, 0xbe, - 0x22, 0x87, 0x35, 0xbd, 0xb1, 0x29, 0xa9, 0xbc, 0xfe, 0x06, 0x8c, 0x3e, - 0xca, 0x88, 0x38, 0xbd, 0xba, 0xdd, 0x04, 0x3e, 0x66, 0x0b, 0xa9, 0xbe, - 0x9d, 0xa8, 0x9d, 0xbe, 0x55, 0x60, 0x32, 0x3b, 0x7d, 0x09, 0x09, 0x3e, - 0x4f, 0xb5, 0x3f, 0xbc, 0x71, 0x79, 0xde, 0x3e, 0x2d, 0xe9, 0x38, 0xbe, - 0x9a, 0x4c, 0x76, 0xbd, 0xa2, 0xe8, 0xa0, 0xbc, 0xf4, 0xc7, 0x10, 0xbd, - 0x71, 0x07, 0x20, 0x3c, 0x6d, 0xc8, 0x46, 0x3d, 0x44, 0x3c, 0x16, 0xbd, - 0xfb, 0x5a, 0x5a, 0x3e, 0x43, 0x01, 0xe0, 0xbe, 0x2e, 0xbf, 0x41, 0xbf, - 0x14, 0x7f, 0x1e, 0xbf, 0xcf, 0x88, 0x4d, 0xbf, 0x85, 0xb0, 0xa6, 0xbe, - 0x65, 0xd0, 0x9a, 0x3d, 0xad, 0xbe, 0x1c, 0xbc, 0xfa, 0xc1, 0x85, 0x3d, - 0xfc, 0x72, 0xae, 0xbd, 0x14, 0xf5, 0xc8, 0xbd, 0x0c, 0x20, 0x08, 0x3d, - 0xe8, 0x35, 0x63, 0x3e, 0xc0, 0xf5, 0x74, 0x3d, 0x8b, 0xd5, 0x0d, 0xbf, - 0x3d, 0x94, 0x4b, 0x3d, 0x69, 0xff, 0x33, 0xbe, 0x6f, 0xb7, 0x81, 0xbf, - 0x2a, 0x05, 0x5e, 0xbf, 0x21, 0x19, 0x94, 0xbe, 0x34, 0x2a, 0xed, 0xbe, - 0x10, 0xf1, 0x03, 0xbf, 0x40, 0xf0, 0x80, 0xbf, 0x08, 0x44, 0x1e, 0xbd, - 0x0d, 0x59, 0x46, 0xbf, 0x31, 0xa6, 0xb3, 0xbe, 0x58, 0x5d, 0xc4, 0xbe, - 0x1a, 0x05, 0x10, 0xbd, 0xa7, 0xe6, 0xe1, 0x3e, 0xf7, 0x06, 0xb9, 0xbe, - 0xce, 0xf0, 0xa0, 0xbf, 0xf4, 0xcf, 0x85, 0xbc, 0x3a, 0xe2, 0x82, 0xbe, - 0xfd, 0x85, 0x73, 0xbf, 0x24, 0x22, 0x55, 0xbf, 0x49, 0xad, 0x20, 0xbf, - 0x90, 0x58, 0x3f, 0xbc, 0x5d, 0xbf, 0xb4, 0xbe, 0x23, 0x4b, 0x61, 0xbf, - 0x4a, 0x9a, 0x4d, 0x3c, 0x7a, 0x20, 0x3d, 0xbf, 0xfd, 0x8c, 0xa1, 0x3b, - 0xc4, 0x15, 0x0e, 0xbe, 0x2a, 0xb8, 0x56, 0xbc, 0xb5, 0x41, 0x0c, 0x3f, - 0xf6, 0xfa, 0xcf, 0xbe, 0x3b, 0x1c, 0xfc, 0xbe, 0xb5, 0x8b, 0x24, 0xbd, - 0xca, 0xe0, 0xfd, 0x3e, 0x2a, 0xf3, 0xc9, 0x3e, 0x58, 0x8b, 0x1a, 0x3b, - 0x06, 0x37, 0x0f, 0x3f, 0xe5, 0x79, 0x5a, 0xbe, 0xcb, 0x6c, 0xa5, 0x3d, - 0x61, 0xa0, 0xc6, 0x3e, 0x86, 0x27, 0xbc, 0x3c, 0xee, 0x51, 0x4b, 0xbe, - 0x88, 0x4a, 0x0d, 0x3f, 0x16, 0x0e, 0x82, 0xbe, 0xca, 0xd9, 0xcd, 0xbd, - 0x53, 0x26, 0xec, 0x3e, 0x67, 0xa6, 0x89, 0x3e, 0x3e, 0x89, 0x21, 0x3d, - 0x89, 0x94, 0x8a, 0xba, 0x4e, 0xbc, 0x99, 0xbe, 0xb9, 0xf0, 0xdc, 0x3d, - 0x4b, 0x61, 0xa7, 0xbe, 0x5a, 0x3c, 0x2e, 0xbe, 0xa8, 0x2e, 0x82, 0x3e, - 0x68, 0x10, 0x85, 0xbd, 0x28, 0x23, 0x7c, 0xbe, 0x82, 0x7b, 0x01, 0x3e, - 0xc1, 0xc3, 0x1b, 0xbe, 0x60, 0x8b, 0xc5, 0x3e, 0xda, 0x55, 0xb1, 0xbd, - 0xd7, 0x46, 0x29, 0x3d, 0x8e, 0xff, 0xcd, 0x3d, 0xf1, 0xac, 0xe3, 0xbd, - 0x72, 0x78, 0x43, 0xbe, 0xd3, 0xed, 0x52, 0x3e, 0x6d, 0x9e, 0xe8, 0xbe, - 0x79, 0x9a, 0x9a, 0x3d, 0x98, 0xf6, 0x63, 0xbe, 0x0c, 0x7c, 0xbf, 0xbe, - 0x83, 0x45, 0x9a, 0x3e, 0x1d, 0x35, 0x0b, 0xbe, 0xc5, 0x00, 0x67, 0x3b, - 0x75, 0x3e, 0xf4, 0x3d, 0x77, 0x2b, 0x1b, 0xbe, 0xa5, 0xff, 0xeb, 0xbc, - 0xb6, 0xb3, 0x74, 0x3e, 0x52, 0xbf, 0x3d, 0xbd, 0x7a, 0xdc, 0x2d, 0xbe, - 0x8a, 0x5b, 0x3d, 0xbe, 0xfb, 0x30, 0x6d, 0xbe, 0x67, 0xd8, 0x35, 0x3e, - 0x0e, 0x72, 0xb8, 0xbe, 0xd2, 0x0a, 0x80, 0x3a, 0x43, 0x8b, 0x8d, 0x3d, - 0xd9, 0x8f, 0x9a, 0xbe, 0xba, 0x80, 0x8a, 0x3e, 0xc7, 0x1b, 0x02, 0xbd, - 0x2a, 0xae, 0x80, 0xbb, 0x31, 0x31, 0x08, 0x3e, 0x55, 0x75, 0x2f, 0xbe, - 0xad, 0xc6, 0x48, 0xbd, 0x43, 0x03, 0x7e, 0x3e, 0x48, 0xe4, 0xb8, 0xbd, - 0x75, 0xc6, 0xe8, 0xbd, 0x06, 0x3c, 0xa1, 0xbd, 0xd7, 0xa2, 0xc2, 0x3d, - 0xe5, 0x47, 0x07, 0x3e, 0xa6, 0x42, 0xdb, 0xbe, 0x6b, 0xbf, 0x05, 0xbe, - 0x84, 0x62, 0xc7, 0xbd, 0xb7, 0x2e, 0x97, 0xbe, 0xdc, 0x29, 0x9e, 0x3e, - 0x3b, 0xc5, 0x0a, 0x3d, 0x78, 0xd2, 0x28, 0xbd, 0x41, 0x00, 0x0b, 0xbc, - 0x08, 0xcc, 0xc4, 0xbc, 0xa2, 0x88, 0xe0, 0xbd, 0x23, 0x48, 0xb0, 0x3e, - 0x00, 0x0e, 0x2f, 0xbe, 0x69, 0xb0, 0x84, 0x3d, 0x20, 0x9d, 0x83, 0xbd, - 0x53, 0xe3, 0xa6, 0xbc, 0x99, 0x74, 0xd1, 0x3d, 0x36, 0x33, 0x8d, 0xbe, - 0x35, 0x2e, 0xfe, 0xbc, 0xd4, 0xea, 0xf6, 0xbc, 0xc2, 0xf9, 0x90, 0xbe, - 0x72, 0x67, 0xa7, 0x3d, 0x44, 0xae, 0x08, 0x3e, 0xed, 0x6f, 0x49, 0x3d, - 0xed, 0xe3, 0xa4, 0xbd, 0xc8, 0x11, 0x25, 0x3d, 0xc0, 0x7b, 0x30, 0xbd, - 0x80, 0x6e, 0xf7, 0x3d, 0xb9, 0xe9, 0x94, 0xbe, 0x12, 0x80, 0xba, 0x3d, - 0x03, 0xfb, 0xac, 0xbc, 0xbe, 0x53, 0xf4, 0xbc, 0x8c, 0x26, 0x27, 0xbd, - 0x71, 0x13, 0xc7, 0xbe, 0x6e, 0xc5, 0x02, 0xbe, 0x54, 0x9d, 0x23, 0x3d, - 0xa5, 0xba, 0x39, 0xbe, 0xf0, 0x26, 0x52, 0x3d, 0xfe, 0xf6, 0xc9, 0x3d, - 0x94, 0x18, 0xa8, 0x3d, 0xe6, 0xd3, 0x1c, 0xbe, 0x9a, 0xd2, 0xe6, 0xbd, - 0xbc, 0xb0, 0x1d, 0xbd, 0x22, 0x0a, 0xb0, 0x3d, 0x09, 0x0f, 0x74, 0xbe, - 0x22, 0x36, 0xde, 0x3d, 0xab, 0xc6, 0x20, 0x3d, 0x5b, 0xeb, 0xd1, 0xbd, - 0xaf, 0x09, 0x1a, 0xbd, 0xaa, 0x83, 0x31, 0xbe, 0x2d, 0x9d, 0xbd, 0xbc, - 0x8f, 0xca, 0xb6, 0xbd, 0x8c, 0xa0, 0x4a, 0xbe, 0x4b, 0x67, 0xfa, 0x3d, - 0x3f, 0x99, 0x01, 0x3e, 0x0e, 0x64, 0xa5, 0x3d, 0xe3, 0xe9, 0xd3, 0xba, - 0x9d, 0x09, 0x6a, 0xbc, 0x28, 0x4f, 0x82, 0x3e, 0xe3, 0x4c, 0x0a, 0x3e, - 0x44, 0xfd, 0x6e, 0xbd, 0x94, 0xfc, 0xa0, 0x3d, 0x8e, 0x35, 0x83, 0x3d, - 0xef, 0x14, 0xea, 0xbc, 0x4b, 0xf9, 0x10, 0xbd, 0x42, 0x43, 0x8e, 0xbd, - 0x0b, 0x78, 0x3b, 0xbd, 0x0d, 0xc8, 0x2f, 0xbd, 0x43, 0xc0, 0xb1, 0xbe, - 0xf4, 0x92, 0x85, 0x3e, 0xfd, 0x10, 0xea, 0x3d, 0x3f, 0xa1, 0x89, 0x3d, - 0xb9, 0xd2, 0x2a, 0xbd, 0x6c, 0xfd, 0x99, 0xbe, 0x68, 0xf0, 0x22, 0x3e, - 0x4e, 0x3b, 0xf8, 0xbd, 0x6a, 0xa0, 0x18, 0xbe, 0x91, 0xee, 0x1d, 0xbd, - 0x48, 0x14, 0x0e, 0x3d, 0x64, 0x2b, 0xb2, 0xbd, 0x47, 0x34, 0xad, 0x3c, - 0xb1, 0x9c, 0xe7, 0xbd, 0xd0, 0x9a, 0x8b, 0x3d, 0xc6, 0x5b, 0xe5, 0x3d, - 0xa5, 0x1f, 0x21, 0xbe, 0x0a, 0x26, 0x0b, 0x3e, 0xa4, 0x1a, 0x5a, 0xbd, - 0x3e, 0x53, 0xe8, 0xbd, 0xda, 0xb5, 0xc9, 0x3b, 0xc0, 0x68, 0x53, 0xbe, - 0x85, 0x31, 0x9a, 0x3e, 0xf9, 0x64, 0x1a, 0x3e, 0xe0, 0x4a, 0x35, 0xbd, - 0xe8, 0xaa, 0xa0, 0xbd, 0xac, 0xf7, 0xbd, 0x3d, 0xbf, 0xbd, 0x1a, 0xbe, - 0x71, 0x3e, 0x41, 0x3d, 0x5c, 0x55, 0xfb, 0xbd, 0x4b, 0x79, 0xe0, 0x3d, - 0xc1, 0x0e, 0x87, 0x3d, 0xac, 0xfa, 0x83, 0xbd, 0x1c, 0x1b, 0x1e, 0x3e, - 0xcd, 0xe6, 0xa7, 0x3d, 0xdd, 0x2a, 0xa3, 0xbd, 0xd2, 0x48, 0xc0, 0x3c, - 0x4e, 0x4a, 0xd5, 0xbe, 0x61, 0xa2, 0x16, 0x3e, 0x97, 0xe6, 0x82, 0xbc, - 0x79, 0x7e, 0xaf, 0xbd, 0x3b, 0x1a, 0xfb, 0x3d, 0xc9, 0x30, 0x3d, 0x3e, - 0x3d, 0x27, 0x86, 0xbe, 0xd1, 0x31, 0x5f, 0x3e, 0x42, 0xfc, 0x99, 0xbe, - 0xe0, 0x82, 0xc3, 0x3d, 0x91, 0x85, 0xd1, 0x3d, 0x95, 0xbf, 0xea, 0x3c, - 0x39, 0x19, 0xe4, 0x3d, 0x4f, 0x40, 0x17, 0xbe, 0x5b, 0x8f, 0xd8, 0xbd, - 0x16, 0x8b, 0x63, 0xbd, 0xd5, 0x12, 0xa1, 0x3b, 0xc0, 0xd5, 0x68, 0x3c, - 0x55, 0x96, 0xb8, 0xbd, 0xbc, 0xf3, 0x9d, 0xbe, 0xf8, 0x79, 0xd5, 0x3d, - 0xac, 0x1a, 0xd5, 0x3d, 0xd0, 0x00, 0x14, 0xbd, 0xca, 0x1d, 0xad, 0xba, - 0x79, 0xed, 0x2c, 0xbe, 0xc4, 0x05, 0xc6, 0xbc, 0x0d, 0x1c, 0xf7, 0x3d, - 0xd8, 0xd0, 0x8f, 0xbd, 0x79, 0x0e, 0xba, 0x3d, 0x3c, 0x33, 0x30, 0x3d, - 0x2c, 0x08, 0xbf, 0xbd, 0x4a, 0x7a, 0x71, 0xbd, 0xc2, 0x3b, 0x45, 0xbc, - 0x7a, 0xac, 0x8e, 0xbe, 0x13, 0x06, 0xa9, 0xbd, 0x0e, 0xd4, 0x3b, 0xbe, - 0xc5, 0x4b, 0x47, 0x3d, 0x90, 0xb5, 0x85, 0xbd, 0xe5, 0xce, 0x0f, 0xbe, - 0x02, 0xae, 0xc7, 0x3c, 0xc1, 0x9c, 0x4d, 0xbe, 0x60, 0x07, 0x8f, 0x3d, - 0xa0, 0x57, 0x78, 0xbd, 0x48, 0x93, 0x86, 0x3e, 0xe4, 0xed, 0xfb, 0xbd, - 0x6e, 0x57, 0x74, 0x3d, 0x85, 0x53, 0x01, 0xbd, 0x56, 0xaf, 0xff, 0xbd, - 0xfe, 0xe2, 0x07, 0x3e, 0x13, 0xb4, 0xdb, 0xbd, 0xfb, 0x89, 0x9f, 0xbe, - 0x74, 0xb9, 0xe2, 0xbd, 0x4b, 0xac, 0x8c, 0x3e, 0xf8, 0x2a, 0x8d, 0xbd, - 0x76, 0x7d, 0x08, 0xbe, 0x39, 0xc8, 0x84, 0x3d, 0x62, 0x02, 0xe9, 0xbd, - 0xb0, 0x1c, 0xfc, 0x3a, 0x52, 0x7e, 0x06, 0xbd, 0xec, 0xf1, 0xce, 0xbc, - 0x00, 0x3b, 0xc3, 0x3c, 0x08, 0x4e, 0x3e, 0x3d, 0x5c, 0x19, 0x45, 0xbe, - 0xbb, 0x06, 0xbd, 0xbd, 0x9d, 0xbb, 0xf7, 0xba, 0x9c, 0xa5, 0x87, 0x3b, - 0x1a, 0x83, 0x85, 0xbd, 0xa3, 0x38, 0xeb, 0xbc, 0x83, 0x65, 0xdc, 0x3d, - 0x5c, 0x8d, 0x0e, 0x3e, 0xcf, 0x7f, 0xe6, 0x3b, 0x7d, 0x2c, 0xaf, 0xbd, - 0x9c, 0xc5, 0xaf, 0x3c, 0xd7, 0x6a, 0xc7, 0xbc, 0x7a, 0xc2, 0x48, 0xbe, - 0xfc, 0x1a, 0x0e, 0x3d, 0x9e, 0x97, 0x3e, 0x3d, 0xf1, 0x34, 0xd0, 0xba, - 0xbb, 0x60, 0xee, 0xbd, 0x85, 0x2f, 0xec, 0xbd, 0x8e, 0x6f, 0x7b, 0x3c, - 0xb9, 0x28, 0xfc, 0xbd, 0x4d, 0x84, 0x6b, 0xbe, 0x26, 0x35, 0x4c, 0xbd, - 0x8a, 0xe9, 0x2f, 0xbd, 0x2a, 0x11, 0x1c, 0xbe, 0xbb, 0x7e, 0x08, 0xbe, - 0xf6, 0x73, 0x81, 0xbe, 0x36, 0xfd, 0x11, 0xbd, 0x6a, 0x85, 0xcf, 0xbd, - 0x64, 0x9f, 0x93, 0xbd, 0xb8, 0xfe, 0xff, 0xbd, 0x4f, 0xa5, 0x19, 0xbe, - 0xd9, 0x9b, 0x34, 0xbe, 0x17, 0xac, 0xdb, 0x3a, 0x2d, 0x1c, 0xa1, 0xbd, - 0x22, 0x94, 0x90, 0x3c, 0xcb, 0xa9, 0x6d, 0xbb, 0x84, 0xad, 0x9e, 0x3d, - 0xfd, 0x9a, 0x11, 0xbe, 0x93, 0xaa, 0x13, 0x3d, 0x42, 0x61, 0x8b, 0xbd, - 0x5d, 0x21, 0x28, 0xbe, 0x15, 0x7b, 0xd2, 0xbd, 0xf5, 0x7e, 0x54, 0x3d, - 0xc1, 0x76, 0x81, 0x3d, 0x45, 0x13, 0xe7, 0xbd, 0xac, 0x4f, 0x12, 0x3d, - 0x8f, 0x1c, 0x0e, 0x3c, 0xb3, 0x6d, 0x70, 0x3c, 0x6b, 0xfa, 0xdc, 0xbb, - 0xd6, 0xab, 0xf9, 0xbc, 0x49, 0x49, 0xf7, 0xbc, 0x4b, 0x50, 0x59, 0xbd, - 0x10, 0x1e, 0xb0, 0xbd, 0x43, 0x71, 0x39, 0xbd, 0xd5, 0xe0, 0xc1, 0xbd, - 0x33, 0xc2, 0x5f, 0x3d, 0x48, 0xa7, 0x4a, 0xbe, 0xef, 0x8d, 0x35, 0xbe, - 0xa1, 0x9e, 0x3c, 0xbd, 0xb0, 0x3a, 0xa5, 0xbd, 0xa8, 0x9d, 0xa1, 0x3d, - 0x61, 0xb0, 0x73, 0x3d, 0xdb, 0x36, 0xbd, 0x3d, 0xa1, 0xc1, 0x03, 0xbd, - 0xde, 0x98, 0xac, 0xbd, 0xe1, 0x5a, 0xd4, 0x3d, 0x87, 0x04, 0xa1, 0xbd, - 0x22, 0xcf, 0x2f, 0x3d, 0xae, 0xf3, 0xc2, 0xbd, 0x7d, 0xe0, 0x30, 0xbe, - 0x1d, 0x36, 0x03, 0xbe, 0xc3, 0x86, 0xb1, 0x3d, 0x00, 0xf1, 0xbb, 0xbd, - 0x5f, 0x98, 0x56, 0xbd, 0xdd, 0xcd, 0x06, 0xbe, 0xcf, 0x6e, 0xa5, 0x3b, - 0xaa, 0xa0, 0x88, 0xbd, 0x99, 0x87, 0x56, 0xbe, 0x59, 0xce, 0x37, 0x3d, - 0xc6, 0x1e, 0x9c, 0xbb, 0xf9, 0x24, 0x1f, 0xbe, 0x6e, 0xee, 0x89, 0x3d, - 0xa5, 0xd5, 0xe8, 0xbb, 0xde, 0x47, 0x7d, 0xbd, 0xe3, 0x46, 0x26, 0xbe, - 0x8b, 0x42, 0xc7, 0x3d, 0xb5, 0xc8, 0xc6, 0xbd, 0x58, 0xc1, 0xc3, 0xbd, - 0x4d, 0x67, 0x4f, 0x3d, 0x9f, 0xdd, 0x2b, 0xbe, 0x13, 0x24, 0x02, 0xbd, - 0x8c, 0x30, 0x2a, 0xbd, 0x99, 0x30, 0x54, 0x3c, 0x86, 0xce, 0xa4, 0x3c, - 0x3c, 0xea, 0x1b, 0x3c, 0x18, 0x20, 0x00, 0x3d, 0xf5, 0x1d, 0x0a, 0xbc, - 0x3f, 0xc0, 0x74, 0x3d, 0x5a, 0x4f, 0xf4, 0xbb, 0x1e, 0x77, 0xc5, 0xbd, - 0xee, 0x16, 0xe4, 0xbd, 0xb5, 0xb4, 0x60, 0x3d, 0x25, 0xa2, 0xed, 0xbd, - 0x97, 0x7d, 0x9e, 0xbd, 0xc9, 0xd4, 0x36, 0xbd, 0xbf, 0x1b, 0xa6, 0x3c, - 0x6e, 0xb9, 0x27, 0x3d, 0xe8, 0x11, 0x2e, 0x3d, 0xa5, 0xae, 0xbd, 0xbd, - 0xdd, 0xfa, 0x04, 0xbd, 0xe6, 0x01, 0xe5, 0xbd, 0x03, 0x43, 0x65, 0x3d, - 0x36, 0xf4, 0x34, 0xbd, 0xc8, 0x33, 0xd1, 0xbd, 0x06, 0x06, 0x1c, 0xbe, - 0x17, 0x94, 0xb7, 0x3b, 0x64, 0x11, 0x01, 0xbe, 0xdf, 0xb1, 0xe6, 0xbd, - 0xa5, 0x78, 0x3b, 0xbe, 0x0d, 0x16, 0x58, 0xbd, 0x4b, 0xbc, 0xd4, 0x3c, - 0x92, 0x23, 0x52, 0xbe, 0x0d, 0x3f, 0x15, 0xbe, 0x8c, 0xd3, 0x52, 0xbe, - 0x00, 0xa4, 0x35, 0x3b, 0x37, 0x84, 0xa5, 0x3d, 0xdf, 0xe3, 0x02, 0xbe, - 0xbd, 0x99, 0x0f, 0xbe, 0x9d, 0x37, 0x33, 0xbe, 0x87, 0xd1, 0xfd, 0x3d, - 0xcb, 0x26, 0x39, 0xbe, 0x5d, 0x3f, 0x0e, 0xbe, 0xe6, 0xb1, 0x2d, 0xbe, - 0x4c, 0x00, 0x7d, 0xbd, 0xf7, 0x32, 0xd8, 0x3b, 0xc5, 0xcf, 0x86, 0x3d, - 0x60, 0x62, 0xa8, 0xbd, 0x44, 0x0d, 0x2f, 0xbe, 0xed, 0x1e, 0xf9, 0x3d, - 0x4d, 0x79, 0x51, 0xbe, 0x22, 0xdd, 0x50, 0xbe, 0x74, 0x3b, 0x1e, 0xbe, - 0xe1, 0x4f, 0x77, 0xbd, 0x00, 0x02, 0x1d, 0xbe, 0x07, 0x5f, 0x44, 0xbd, - 0x44, 0x3f, 0xb8, 0x3c, 0x9d, 0x95, 0xa9, 0xbb, 0x4a, 0xe8, 0x55, 0x3d, - 0xfd, 0x6e, 0x5d, 0x3d, 0xcd, 0x7c, 0x20, 0xbe, 0xb5, 0x23, 0xa2, 0x3d, - 0x92, 0xc8, 0x84, 0x3d, 0xa4, 0x4a, 0x3f, 0xbe, 0xae, 0xd7, 0x75, 0xbd, - 0x46, 0x5e, 0xc4, 0xbd, 0xb9, 0xe0, 0x24, 0xbd, 0x2a, 0xba, 0x83, 0xbd, - 0xdd, 0xfe, 0xd9, 0x3d, 0x98, 0x72, 0x94, 0x3d, 0x6b, 0x91, 0x09, 0x3d, - 0x51, 0xae, 0x88, 0x3b, 0x65, 0x6a, 0xd8, 0xbb, 0x44, 0x15, 0x17, 0xbe, - 0xcf, 0xbb, 0xb3, 0x3d, 0xfe, 0xa1, 0x3f, 0xbe, 0x57, 0xbf, 0xd8, 0xbd, - 0xcf, 0xc5, 0x9d, 0x3d, 0xce, 0x80, 0x59, 0xbc, 0x14, 0x38, 0xdd, 0xbd, - 0x49, 0xb5, 0x56, 0xbe, 0xc5, 0xb0, 0xa5, 0x3d, 0xa9, 0x99, 0x10, 0xbd, - 0x39, 0xea, 0x95, 0xbc, 0x73, 0x55, 0x3f, 0xbe, 0xfd, 0x57, 0x85, 0xbd, - 0x1d, 0xf1, 0x65, 0x3d, 0xab, 0xe9, 0x06, 0xbd, 0x77, 0x53, 0xf8, 0xbc, - 0xfa, 0xbc, 0x75, 0xbd, 0x32, 0xc8, 0x6c, 0x3d, 0xec, 0xfb, 0x42, 0xbe, - 0x4f, 0xe3, 0x8a, 0xbd, 0xc9, 0xd4, 0x32, 0xbc, 0xe0, 0x14, 0xbe, 0x3d, - 0xdd, 0xd2, 0x2a, 0xbe, 0x2d, 0xf3, 0x42, 0xbe, 0x8a, 0x61, 0x6e, 0x3d, - 0x34, 0xed, 0x85, 0xbb, 0xcc, 0x83, 0x3c, 0xbd, 0xf4, 0xe4, 0x8f, 0xbc, - 0x6a, 0x4e, 0x2a, 0xbd, 0x32, 0xb8, 0x1b, 0xbe, 0x45, 0xd2, 0x73, 0x3d, - 0xa9, 0x63, 0x64, 0xbd, 0x34, 0x23, 0xb5, 0xbd, 0xe3, 0x16, 0x00, 0x3e, - 0xa5, 0xac, 0xf0, 0xbd, 0x1c, 0xef, 0x3d, 0x3d, 0xfc, 0xba, 0xff, 0x3c, - 0xbc, 0xfa, 0x0b, 0x3d, 0xfc, 0xd7, 0x60, 0xbd, 0x91, 0x10, 0xdd, 0xbb, - 0xe4, 0x68, 0xbe, 0x3d, 0xa5, 0x92, 0x50, 0xbe, 0x98, 0x7b, 0x27, 0xbe, - 0x78, 0xe3, 0xa2, 0xbe, 0xa6, 0x63, 0x21, 0xbd, 0x6d, 0xe4, 0xc0, 0x3d, - 0x68, 0x9c, 0xd2, 0xbd, 0x5b, 0xb4, 0x11, 0x3e, 0xa7, 0xef, 0xa9, 0xbd, - 0xf9, 0xfb, 0x83, 0x3d, 0xd8, 0x6d, 0xe5, 0x3c, 0xc2, 0x59, 0x3d, 0xbd, - 0x6f, 0x02, 0xe3, 0xbc, 0xdd, 0x52, 0xcf, 0xbb, 0x58, 0x34, 0xbc, 0xbd, - 0xa9, 0x43, 0x45, 0xbd, 0x1e, 0xf0, 0x9a, 0xbd, 0x80, 0x43, 0x61, 0x3d, - 0x77, 0xa4, 0x72, 0xbe, 0xa1, 0x17, 0xb0, 0x3e, 0x36, 0x5f, 0xd1, 0x3e, - 0x12, 0x20, 0xfe, 0xbd, 0x5d, 0x4d, 0x7c, 0xbc, 0xfe, 0x0a, 0x3d, 0x3e, - 0xd5, 0xcf, 0x22, 0x3b, 0xdc, 0x2e, 0x0f, 0x3c, 0x79, 0x3c, 0x90, 0x3d, - 0xac, 0x83, 0x2e, 0x3d, 0x75, 0xce, 0x22, 0xbf, 0x23, 0x4e, 0x35, 0xbe, - 0x92, 0x0b, 0x61, 0x3c, 0xcf, 0xdc, 0x70, 0x3e, 0x3f, 0xe4, 0xa2, 0xbe, - 0x63, 0x01, 0x9e, 0x3d, 0x61, 0x05, 0xa8, 0xbe, 0x1b, 0x7d, 0xe0, 0x3d, - 0xb5, 0x3b, 0xcc, 0x3e, 0xdc, 0x97, 0x9d, 0xbe, 0x13, 0xf1, 0x60, 0x3d, - 0xfc, 0x5b, 0x95, 0xbd, 0xd8, 0x27, 0x06, 0x3e, 0x27, 0x3e, 0x65, 0xbd, - 0x3a, 0xc3, 0x7b, 0xbd, 0x77, 0xa5, 0x37, 0x3d, 0x30, 0x67, 0x14, 0xbe, - 0xf9, 0x10, 0xbe, 0xbe, 0x4f, 0xec, 0x53, 0x3e, 0xf5, 0x26, 0xd9, 0x3d, - 0x48, 0x3b, 0x2d, 0xbf, 0xd1, 0x9b, 0x35, 0xbc, 0x7e, 0x60, 0xae, 0x3d, - 0xdf, 0x08, 0x5b, 0x3d, 0x58, 0xe9, 0x9b, 0xbe, 0xb6, 0xc6, 0xf6, 0xbe, - 0x57, 0xa3, 0x45, 0x3e, 0xa1, 0x11, 0x19, 0xbd, 0xaf, 0xb8, 0x2d, 0x3e, - 0xb8, 0x9b, 0x06, 0x3e, 0xbc, 0x03, 0x31, 0xbe, 0x49, 0xe0, 0xa4, 0x3c, - 0x67, 0x3d, 0x16, 0xbd, 0x5d, 0xa2, 0x54, 0xbe, 0xc7, 0x85, 0x1b, 0x3d, - 0xfb, 0x76, 0x05, 0xbb, 0x20, 0x1f, 0xd5, 0xbb, 0xc3, 0x12, 0x65, 0xbe, - 0x1e, 0x60, 0x9d, 0x3e, 0x74, 0x8d, 0x03, 0xbd, 0xe6, 0x87, 0x01, 0xbf, - 0x4e, 0x58, 0xc3, 0xbe, 0x0b, 0x3e, 0x09, 0x3e, 0xae, 0x0f, 0x17, 0xbd, - 0xab, 0xeb, 0x5b, 0x3d, 0x33, 0x41, 0x89, 0x3e, 0xfe, 0x66, 0x14, 0xbe, - 0x82, 0xfd, 0x63, 0xbc, 0x07, 0x1d, 0xbb, 0xbe, 0x80, 0x13, 0x7e, 0x3d, - 0x51, 0x53, 0xdf, 0xbe, 0xb3, 0x78, 0x91, 0x3d, 0x7e, 0x79, 0xac, 0xbd, - 0x2e, 0xe2, 0x3f, 0xbe, 0x46, 0x85, 0x01, 0x3d, 0xec, 0xf8, 0x64, 0x3e, - 0x06, 0x8e, 0xed, 0xbe, 0xf7, 0x7b, 0x1c, 0xbf, 0x09, 0x5b, 0x42, 0x3d, - 0x5b, 0xfd, 0x82, 0x3b, 0xde, 0xa5, 0xe4, 0xbd, 0xcb, 0x7c, 0x82, 0x3e, - 0xd6, 0xe7, 0x77, 0xbe, 0x28, 0x8e, 0xe3, 0xba, 0xf5, 0x19, 0x16, 0xbf, - 0x47, 0xf9, 0x0c, 0x3e, 0x93, 0xa4, 0xa7, 0xbe, 0xa1, 0xfd, 0x0b, 0xbd, - 0x34, 0x25, 0x2a, 0xbe, 0x79, 0xa6, 0x3e, 0xbe, 0x72, 0x46, 0x7a, 0x3e, - 0xdd, 0xb1, 0xd4, 0x3e, 0xa6, 0xae, 0xd7, 0x3d, 0xcd, 0xa3, 0xb8, 0xbe, - 0x83, 0xf2, 0xea, 0xbd, 0xda, 0x19, 0xfc, 0xbc, 0x8d, 0x76, 0xa2, 0xbc, - 0x17, 0x56, 0xa2, 0x3c, 0xf4, 0xa9, 0x00, 0xbe, 0x5b, 0xde, 0x36, 0x3d, - 0x0f, 0x78, 0x2d, 0xbf, 0x7f, 0xdc, 0x7e, 0xbd, 0x8b, 0x23, 0x93, 0xbd, - 0x0b, 0x44, 0x0e, 0x3d, 0xd1, 0xda, 0x73, 0xbe, 0xcf, 0x9f, 0x5b, 0xbe, - 0x50, 0xe9, 0x25, 0x3e, 0x83, 0x6c, 0x95, 0x3e, 0x72, 0x08, 0x76, 0x3d, - 0xae, 0xe5, 0x51, 0xbe, 0xd4, 0xf0, 0xa4, 0xbd, 0xf9, 0x06, 0xce, 0xbd, - 0x90, 0x98, 0x14, 0x3e, 0x24, 0x29, 0x20, 0x3c, 0x11, 0x75, 0x1c, 0xbe, - 0x99, 0x76, 0x2b, 0x3c, 0x9b, 0x64, 0xc1, 0xbe, 0x58, 0x5d, 0x08, 0xbe, - 0xcc, 0x09, 0x0f, 0x3e, 0x6e, 0x15, 0x1e, 0xbd, 0x17, 0x45, 0xed, 0xbd, - 0x15, 0xb4, 0x00, 0xbe, 0x71, 0x06, 0x83, 0x3d, 0xe5, 0x7e, 0xb2, 0x3e, - 0x72, 0x95, 0x57, 0x3e, 0x10, 0x2b, 0x78, 0xbe, 0x8f, 0x8b, 0x17, 0xbe, - 0x7a, 0xcb, 0x69, 0xbd, 0x93, 0xe9, 0x35, 0xbd, 0x70, 0x4a, 0x13, 0x3e, - 0x29, 0x2f, 0xde, 0xbd, 0x49, 0x2c, 0x0f, 0x3d, 0xb4, 0xd1, 0x7b, 0x3e, - 0x15, 0xff, 0x1a, 0xbd, 0x09, 0xeb, 0x16, 0x3d, 0x16, 0x20, 0x94, 0x3d, - 0x62, 0x81, 0x37, 0xbe, 0xf4, 0xb4, 0xa9, 0xbc, 0xa1, 0xe9, 0xfb, 0xbc, - 0x6a, 0xe0, 0xfc, 0x3d, 0x34, 0x3e, 0x4f, 0x3e, 0xc3, 0x45, 0x37, 0xbe, - 0xbd, 0x7c, 0x13, 0x3d, 0x80, 0xc5, 0xa1, 0x3c, 0x95, 0x2d, 0x94, 0xbd, - 0x37, 0xf2, 0xc3, 0x3d, 0xea, 0x4c, 0xb5, 0xbc, 0x3c, 0x6c, 0x0e, 0x3d, - 0x95, 0x03, 0xea, 0x3d, 0x00, 0xcb, 0xa5, 0xbd, 0xc6, 0xdb, 0x94, 0x3d, - 0xe0, 0x75, 0x9f, 0x3d, 0x35, 0x2b, 0xcd, 0xbc, 0x4b, 0xd6, 0x9d, 0xbc, - 0x19, 0xe6, 0x34, 0x3e, 0x95, 0xc7, 0xa8, 0x3d, 0x39, 0xa8, 0x4d, 0x3e, - 0x1a, 0xfe, 0x2e, 0xbe, 0x63, 0x46, 0x8c, 0x3d, 0xb2, 0x63, 0x0c, 0xbe, - 0xcb, 0x53, 0x82, 0xbc, 0x0d, 0xcc, 0x23, 0x3d, 0x93, 0xee, 0xff, 0x3c, - 0xa1, 0xba, 0x10, 0xbd, 0xf4, 0x9f, 0xfd, 0xbb, 0xe4, 0x63, 0x52, 0xbe, - 0xa0, 0x3f, 0xdb, 0x3d, 0xe3, 0x8c, 0xa1, 0xbd, 0x75, 0x4f, 0x75, 0xbd, - 0x64, 0xc3, 0xe6, 0x3c, 0x91, 0x6a, 0x96, 0x3e, 0x5b, 0x8f, 0x41, 0x3e, - 0x0b, 0xed, 0x8d, 0x3e, 0xe1, 0xfa, 0x47, 0xbd, 0xe8, 0x79, 0xa6, 0x3d, - 0x7f, 0x4e, 0x77, 0x3c, 0x3d, 0xbf, 0xf1, 0xba, 0x83, 0x2e, 0x92, 0x3d, - 0x14, 0xc8, 0xc3, 0xbd, 0x42, 0x69, 0x00, 0x3d, 0x86, 0x93, 0x8b, 0x3d, - 0xc6, 0xb3, 0x37, 0xbe, 0x5d, 0x8c, 0x27, 0xbc, 0xee, 0x70, 0xc9, 0xbc, - 0xf7, 0xf5, 0x96, 0xbe, 0x3f, 0x20, 0xe1, 0x3d, 0x07, 0x7a, 0x8b, 0x3e, - 0xc1, 0x46, 0x17, 0x3d, 0x39, 0xcc, 0x3c, 0x3e, 0xd4, 0xa0, 0x11, 0xbe, - 0x24, 0x8b, 0xe3, 0x3d, 0x9b, 0x32, 0x49, 0x3d, 0xc1, 0x6f, 0x08, 0xbe, - 0x5f, 0x21, 0x0e, 0x3d, 0xc0, 0xc1, 0x1a, 0x3d, 0x4d, 0x30, 0x8e, 0x3d, - 0x0d, 0xdf, 0x2e, 0x3e, 0x58, 0x84, 0x4b, 0xbe, 0x96, 0x08, 0x85, 0x3d, - 0x9a, 0x2e, 0x4f, 0x3c, 0x26, 0x4d, 0xc2, 0xbe, 0x78, 0x76, 0xea, 0x3d, - 0xd6, 0x01, 0x0a, 0x3d, 0xf5, 0xef, 0x78, 0xbe, 0x66, 0x9b, 0x82, 0x3e, - 0x90, 0x6b, 0x5f, 0xbe, 0x8c, 0xdf, 0xf9, 0x3d, 0x84, 0x98, 0xe9, 0xbe, - 0x38, 0x4e, 0x73, 0x3c, 0xce, 0xac, 0x2a, 0x3e, 0x1d, 0x29, 0x80, 0x3d, - 0x04, 0xd4, 0x94, 0xbe, 0x5d, 0x8b, 0x2f, 0xbe, 0x02, 0xde, 0xc5, 0xbe, - 0xbc, 0x6b, 0x0e, 0xbd, 0x0e, 0x34, 0xb2, 0xbd, 0xfc, 0xc1, 0x71, 0xbe, - 0xb3, 0x9a, 0xa4, 0xbc, 0xd2, 0x28, 0x55, 0x3e, 0x99, 0x4b, 0x15, 0x3e, - 0x6b, 0x9e, 0x8b, 0xbe, 0x0f, 0xa7, 0xa6, 0xbe, 0x9f, 0x1c, 0x7f, 0x3d, - 0x03, 0x97, 0xd6, 0xbe, 0xec, 0x2d, 0x7e, 0xbd, 0x5a, 0xa2, 0x2a, 0x3e, - 0x96, 0x01, 0xf9, 0x3c, 0x02, 0xaa, 0xd3, 0xbe, 0xb1, 0x3a, 0x1e, 0xbf, - 0x4b, 0xee, 0x50, 0x3e, 0x33, 0x84, 0x4a, 0xbe, 0xae, 0x5e, 0xca, 0xbe, - 0x6d, 0xad, 0x3d, 0xbe, 0x35, 0xdc, 0x8b, 0xbd, 0x7f, 0x3b, 0x19, 0xbe, - 0xf7, 0x3f, 0xe9, 0xbd, 0x39, 0x0d, 0xa9, 0xbe, 0x62, 0xaf, 0x2f, 0xbe, - 0x50, 0x42, 0xf2, 0xbd, 0x0b, 0xa2, 0xec, 0x3d, 0x87, 0x39, 0x6e, 0xbe, - 0xa0, 0xe2, 0x18, 0xbd, 0xd0, 0xf9, 0x9d, 0xbd, 0x44, 0xca, 0x1c, 0x3c, - 0x22, 0x53, 0xc3, 0x3e, 0x8b, 0x89, 0x93, 0xbe, 0x5b, 0xc0, 0x2a, 0xbe, - 0xc5, 0x08, 0xe9, 0xbc, 0x55, 0x7a, 0xbb, 0xbd, 0xa6, 0x59, 0x44, 0xbe, - 0x92, 0x5a, 0x9e, 0xbe, 0x44, 0x21, 0x92, 0xbe, 0x97, 0x4f, 0x57, 0xbe, - 0xe6, 0x26, 0x19, 0xbe, 0x7c, 0x1a, 0xf9, 0xbe, 0xa7, 0x2d, 0x9a, 0x3c, - 0xa1, 0x6c, 0xb9, 0xbe, 0x17, 0x1f, 0xf0, 0xbe, 0x36, 0x41, 0x63, 0xbe, - 0xe1, 0xfb, 0x72, 0xbd, 0x77, 0x52, 0xe0, 0x3e, 0xa8, 0x23, 0x01, 0xbf, - 0x34, 0x23, 0x34, 0xbe, 0x86, 0x7c, 0xbe, 0xbd, 0x95, 0x31, 0xde, 0xbe, - 0xea, 0x79, 0x04, 0xbe, 0x23, 0x82, 0xcc, 0xbe, 0xf6, 0xc9, 0x1c, 0xbf, - 0x1a, 0x7d, 0x41, 0x3c, 0xcb, 0xc1, 0x6b, 0xbe, 0xf7, 0x47, 0x2d, 0xbf, - 0xfe, 0x87, 0x32, 0xbd, 0xad, 0x73, 0x82, 0xbe, 0xa2, 0xd1, 0x3e, 0xbe, - 0x4c, 0xfa, 0x08, 0xbd, 0xef, 0xe1, 0x3d, 0xbd, 0xaf, 0x4c, 0x72, 0x3e, - 0x79, 0x46, 0xa1, 0xbe, 0x5a, 0x73, 0xe1, 0xbe, 0x0d, 0x0b, 0xc8, 0xbd, - 0x2d, 0x22, 0x91, 0x3e, 0x3f, 0x09, 0x7d, 0xbe, 0x18, 0xe7, 0x25, 0xbe, - 0x20, 0x50, 0x49, 0xbd, 0x08, 0x29, 0x4b, 0xbc, 0xce, 0xd6, 0x82, 0xbe, - 0x98, 0xe0, 0xec, 0xbe, 0xcd, 0x3d, 0x2d, 0x3d, 0xf4, 0xf8, 0xb0, 0xbd, - 0x7a, 0xee, 0xf3, 0xbd, 0x21, 0xd4, 0xd8, 0xb8, 0xf1, 0xf3, 0x81, 0xbd, - 0x2f, 0x08, 0x0c, 0x3d, 0x80, 0x46, 0x19, 0x3e, 0x80, 0x3c, 0x9c, 0xbe, - 0xc1, 0x46, 0xf2, 0xbd, 0x1d, 0x89, 0x3a, 0x3e, 0xa9, 0x64, 0x0a, 0xbe, - 0xb5, 0xe3, 0x2f, 0xbd, 0x99, 0x8d, 0x55, 0xbe, 0x19, 0x11, 0xcf, 0xbd, - 0x00, 0x5d, 0x3a, 0xbd, 0xef, 0x70, 0xb8, 0xbe, 0x2f, 0xe7, 0x4e, 0xbc, - 0x57, 0xbc, 0xa1, 0x3e, 0xbc, 0xcd, 0xd9, 0xbc, 0x7d, 0x6a, 0xbe, 0xbd, - 0x23, 0x6e, 0x3a, 0xbd, 0xc8, 0x65, 0x77, 0xbd, 0xb2, 0xe9, 0x1a, 0xbe, - 0x4f, 0xb3, 0x7a, 0xbe, 0x92, 0x29, 0x3d, 0xbd, 0xc2, 0xf1, 0xac, 0x3e, - 0x60, 0x35, 0x39, 0xbd, 0x49, 0xa6, 0xe8, 0x3c, 0x35, 0xf4, 0x04, 0x3e, - 0x41, 0x26, 0x09, 0xbc, 0x51, 0x4e, 0xa3, 0x3e, 0x56, 0x74, 0x9a, 0xbd, - 0xae, 0x10, 0x94, 0x3d, 0x01, 0xe9, 0x51, 0x3d, 0xc0, 0x1d, 0x45, 0x3d, - 0x80, 0xb1, 0xb4, 0xbc, 0x9b, 0x51, 0xcb, 0xbd, 0xa8, 0xca, 0x2d, 0xbd, - 0xa3, 0x12, 0x52, 0xbc, 0x22, 0xee, 0x05, 0xbe, 0xf0, 0x4c, 0x66, 0xbd, - 0x12, 0x0a, 0xa3, 0x3e, 0xae, 0xe7, 0x8d, 0xbc, 0xa2, 0x32, 0x48, 0x3e, - 0xd9, 0x25, 0xc6, 0xbd, 0x9f, 0xe7, 0x0e, 0x3e, 0x83, 0xd4, 0x20, 0x3e, - 0xf5, 0x11, 0xf0, 0xbc, 0xd8, 0xb1, 0x8c, 0x3d, 0xad, 0x03, 0xd5, 0x3b, - 0x87, 0x4f, 0xc0, 0x3d, 0xb5, 0x17, 0x36, 0x3d, 0x13, 0xdc, 0x08, 0xbd, - 0x6d, 0x28, 0x23, 0x3d, 0x24, 0x48, 0x94, 0xbe, 0xd9, 0xbe, 0x4f, 0xbd, - 0x0b, 0x18, 0x90, 0xbd, 0x04, 0xee, 0x91, 0x3e, 0x60, 0x12, 0x20, 0x3e, - 0xa4, 0x50, 0x31, 0x3e, 0x79, 0xb4, 0x91, 0xbd, 0xb6, 0x3b, 0xfd, 0x3d, - 0x87, 0x3a, 0xce, 0x3d, 0xab, 0x25, 0x3c, 0x3d, 0x2e, 0xba, 0xf6, 0x3b, - 0x22, 0xbe, 0x88, 0x3e, 0x41, 0x1d, 0x91, 0x3d, 0x40, 0x2e, 0x92, 0x3d, - 0x02, 0xd8, 0x90, 0x3d, 0x68, 0x3f, 0x58, 0xbe, 0x9b, 0xac, 0xd7, 0xbd, - 0x18, 0x6d, 0x8a, 0xbb, 0x1f, 0xf4, 0x7d, 0xbc, 0xe9, 0x45, 0x09, 0x3e, - 0x9f, 0xfe, 0x95, 0x3d, 0x9d, 0xf8, 0x2a, 0x3e, 0x55, 0x59, 0x18, 0xbe, - 0xb1, 0xfd, 0x36, 0x3e, 0x2a, 0xaa, 0x3b, 0x3e, 0xa6, 0xf6, 0x39, 0xba, - 0xa1, 0x4d, 0x74, 0xbd, 0xd9, 0xe6, 0x8d, 0x3e, 0x80, 0xa5, 0x8c, 0xbc, - 0x61, 0x74, 0x25, 0x3d, 0x34, 0xfe, 0x71, 0x3d, 0xbc, 0xb5, 0x8f, 0xbe, - 0x46, 0x41, 0xf4, 0xbe, 0x17, 0xf2, 0x9e, 0x3a, 0x80, 0x45, 0xce, 0x3c, - 0xa4, 0xd1, 0x8f, 0x3e, 0xe3, 0x2e, 0x43, 0xbc, 0x3d, 0x24, 0xb4, 0x3e, - 0xb1, 0x1e, 0x30, 0xbd, 0x98, 0x36, 0xcd, 0x3d, 0x0c, 0x48, 0x55, 0x3d, - 0x39, 0x6f, 0x16, 0xbe, 0x1d, 0x82, 0x34, 0xbe, 0xc0, 0x7d, 0x81, 0x3e, - 0x73, 0xe1, 0x0a, 0xbe, 0x1d, 0x95, 0x13, 0x3d, 0x52, 0x6b, 0xf5, 0xbb, - 0x42, 0x7e, 0xbc, 0xbe, 0x0c, 0x5a, 0xcc, 0xbe, 0x6b, 0xc1, 0xc3, 0xbc, - 0x85, 0xa2, 0xf7, 0x3c, 0x11, 0xef, 0x4b, 0x3e, 0xa4, 0xb9, 0x85, 0x3d, - 0x1b, 0x76, 0xbe, 0x3e, 0x9f, 0x20, 0xd4, 0xbd, 0x65, 0x15, 0x2a, 0x3e, - 0x6e, 0xdd, 0xfe, 0x3d, 0x72, 0x35, 0x21, 0xbd, 0x54, 0x0f, 0x48, 0xbe, - 0xfe, 0xe7, 0x46, 0x3e, 0x6f, 0x80, 0x80, 0xbd, 0xf4, 0x2c, 0xc8, 0x3d, - 0x45, 0x0b, 0x8e, 0xbd, 0xb3, 0xd3, 0x99, 0xbe, 0x12, 0x64, 0x19, 0xbe, - 0x8e, 0x98, 0x34, 0x3c, 0x3f, 0x82, 0x5c, 0x3d, 0x05, 0x5c, 0x8d, 0x3e, - 0x66, 0x6d, 0x1d, 0x3d, 0xe8, 0xe4, 0x79, 0x3e, 0x09, 0x06, 0x0c, 0xbd, - 0xfc, 0x25, 0x90, 0x3d, 0xd8, 0xda, 0xf1, 0x3d, 0xbb, 0x7e, 0x18, 0x3e, - 0xa7, 0x79, 0xe1, 0x3c, 0x68, 0x65, 0x31, 0x3d, 0x80, 0x16, 0x0f, 0xbe, - 0xa6, 0x2a, 0x79, 0x3c, 0x4f, 0x6a, 0xf5, 0x3d, 0x2d, 0x8e, 0x83, 0xbe, - 0xef, 0x1a, 0x86, 0xbd, 0xd7, 0x98, 0xd3, 0xbc, 0xdc, 0x99, 0xfb, 0x3d, - 0xe8, 0x2d, 0xd0, 0x3e, 0x93, 0x99, 0x23, 0xbe, 0x36, 0xdd, 0x84, 0x3e, - 0x41, 0x0f, 0x0a, 0x3e, 0x90, 0x4b, 0xd5, 0xbc, 0xd3, 0x40, 0xac, 0x3d, - 0x8e, 0x08, 0x95, 0x3c, 0xcc, 0x5b, 0x6f, 0xbe, 0x02, 0x7b, 0x89, 0x3e, - 0x6f, 0x2a, 0x55, 0xbe, 0x64, 0x57, 0xcd, 0xbd, 0x8d, 0xa2, 0xa8, 0x3d, - 0xaa, 0x1f, 0x42, 0xbd, 0x12, 0x22, 0x5c, 0xbe, 0x9a, 0xfa, 0x43, 0xbd, - 0xe0, 0x93, 0x81, 0xbd, 0xa6, 0x78, 0x74, 0xbd, 0x78, 0x8d, 0xd4, 0xbe, - 0x7e, 0xc7, 0x32, 0xbf, 0x01, 0xc0, 0x06, 0xbf, 0xd8, 0xad, 0x7f, 0xbd, - 0xd2, 0xc7, 0xa7, 0xbe, 0xbe, 0xc5, 0xff, 0xbe, 0x1e, 0xf5, 0x03, 0x3c, - 0x96, 0xae, 0x07, 0xbf, 0x43, 0xf6, 0x60, 0xbe, 0x1c, 0x7e, 0x8c, 0xbe, - 0xd4, 0x05, 0x51, 0xbd, 0x21, 0x90, 0x8c, 0x3e, 0x13, 0xcb, 0x85, 0xbe, - 0xdf, 0xb4, 0x05, 0xbf, 0xc3, 0x79, 0xa7, 0x3b, 0x60, 0x54, 0x39, 0x3c, - 0x12, 0x9a, 0x91, 0x3d, 0xe2, 0x53, 0x8d, 0x3d, 0xe8, 0x49, 0x5b, 0xbb, - 0x28, 0xcc, 0x95, 0xbe, 0x00, 0x79, 0xd7, 0xbc, 0x2a, 0x7e, 0x80, 0x3c, - 0xc9, 0x5f, 0x15, 0xbe, 0x4f, 0x7e, 0x4c, 0x3d, 0x4a, 0x6d, 0x58, 0xbc, - 0x6d, 0xa2, 0x17, 0xbe, 0x1c, 0x64, 0x11, 0xbe, 0x05, 0xf2, 0x7d, 0x3e, - 0x0f, 0x16, 0xb4, 0x3b, 0x98, 0x36, 0x88, 0x3d, 0x87, 0x91, 0xc0, 0xbe, - 0x3a, 0xbb, 0xcd, 0x3c, 0x89, 0x5d, 0xa8, 0xbe, 0x67, 0xdb, 0xe2, 0x3d, - 0xd0, 0xe3, 0x5f, 0xbd, 0x2b, 0xad, 0x0e, 0xbe, 0x4c, 0x1e, 0x24, 0xbe, - 0xe7, 0x6f, 0x8f, 0xbe, 0x0a, 0xa3, 0xb5, 0x3c, 0xfe, 0xf3, 0x8b, 0xbd, - 0x42, 0xfb, 0xc2, 0x3d, 0x7b, 0x30, 0x30, 0xbd, 0xbd, 0xbf, 0x49, 0xbe, - 0xb6, 0x92, 0x14, 0x3e, 0x97, 0x15, 0x3d, 0xbd, 0xfa, 0x58, 0x8b, 0xbd, - 0x88, 0x5d, 0x89, 0xbe, 0x8d, 0x80, 0x64, 0xbe, 0x3b, 0xb3, 0x6c, 0xbe, - 0x87, 0xd4, 0xda, 0xbb, 0x92, 0x31, 0x64, 0xbe, 0x8a, 0x85, 0xf4, 0xbd, - 0x9e, 0x89, 0xa4, 0xbd, 0x5e, 0xd7, 0x5d, 0xbe, 0xbf, 0x73, 0x45, 0xbe, - 0x77, 0xa5, 0x52, 0x3c, 0x3a, 0x4a, 0xa7, 0xbd, 0x37, 0xfb, 0x3e, 0xbc, - 0x24, 0x9c, 0x8e, 0xbd, 0x1e, 0x54, 0x91, 0xbc, 0x48, 0xaf, 0x45, 0xbd, - 0x25, 0x8c, 0x1e, 0x3d, 0xf2, 0x1a, 0x92, 0xbd, 0x7b, 0xb7, 0x3a, 0x3e, - 0x0c, 0xe9, 0x98, 0xbe, 0x70, 0x74, 0xb1, 0x3d, 0x43, 0xa9, 0x59, 0xbe, - 0xe1, 0xe3, 0x18, 0x3d, 0xc3, 0x81, 0x90, 0xbe, 0x53, 0x0c, 0x08, 0xbe, - 0x06, 0x59, 0xa5, 0xbd, 0x09, 0x71, 0xa2, 0x3c, 0x7c, 0x30, 0xa6, 0x3d, - 0xd7, 0x3a, 0xab, 0x3b, 0x00, 0xf9, 0x68, 0xbd, 0x79, 0x2a, 0x19, 0xbe, - 0x6d, 0x83, 0xb1, 0xbd, 0x20, 0xc3, 0x15, 0xbd, 0x70, 0x6e, 0x95, 0xbd, - 0xda, 0x24, 0x53, 0xbd, 0x6c, 0x88, 0xb6, 0xbe, 0x14, 0x65, 0x5f, 0x3d, - 0x80, 0xd5, 0x41, 0xbe, 0xaf, 0x31, 0x37, 0x3e, 0xa3, 0xad, 0x1c, 0xbd, - 0x2f, 0xfa, 0x0b, 0xbe, 0xed, 0x59, 0x2e, 0xbe, 0x6a, 0x9d, 0x5b, 0x3e, - 0x37, 0xde, 0x8f, 0xbd, 0x39, 0x9b, 0xb0, 0x3d, 0xaf, 0xc0, 0x03, 0xbd, - 0x85, 0xc3, 0x04, 0x3e, 0x0f, 0x4f, 0x38, 0xbc, 0x0f, 0xb3, 0x06, 0xbe, - 0x64, 0x20, 0x2b, 0xbe, 0x81, 0xb0, 0x1d, 0x3e, 0x57, 0x08, 0x8c, 0xbe, - 0x62, 0xbc, 0xe7, 0x3d, 0x45, 0xdf, 0x4e, 0x3d, 0xa0, 0x7d, 0x29, 0x3d, - 0x87, 0xd4, 0xc4, 0xbd, 0xb2, 0xa7, 0x54, 0xbd, 0x87, 0xfa, 0x8f, 0x3d, - 0x4b, 0x05, 0x34, 0x3e, 0xef, 0x5a, 0xa9, 0x3d, 0xd5, 0xd2, 0xc9, 0xbd, - 0x66, 0xbd, 0x16, 0xbd, 0x54, 0xcf, 0x08, 0x3e, 0x54, 0x8a, 0xf4, 0x3c, - 0xc6, 0xcd, 0x31, 0x3e, 0xf6, 0x6d, 0x7c, 0xbc, 0x7e, 0x1e, 0x10, 0xbd, - 0x30, 0x46, 0x1d, 0xbe, 0x5d, 0x65, 0x9c, 0xbc, 0xea, 0xbe, 0x4d, 0xbe, - 0x76, 0xc0, 0x91, 0xbc, 0x6d, 0x84, 0x33, 0x3d, 0x63, 0xc5, 0x1a, 0xbe, - 0x48, 0xa0, 0xaa, 0x3d, 0x2c, 0x55, 0x27, 0x3e, 0xee, 0x41, 0x94, 0x3d, - 0x3a, 0x2b, 0xa2, 0x3c, 0x14, 0xa0, 0x15, 0xbe, 0x33, 0x75, 0xdb, 0x3b, - 0xe2, 0x0e, 0x5d, 0xbd, 0x8d, 0x94, 0xc3, 0x3c, 0x76, 0x8e, 0xc3, 0xbe, - 0x26, 0xd0, 0x05, 0xbe, 0xb3, 0x7c, 0xc3, 0xbd, 0x95, 0xbd, 0xa3, 0x3d, - 0x98, 0x07, 0xc7, 0xbc, 0xbb, 0xc0, 0x77, 0xbd, 0x0b, 0x87, 0x80, 0x3c, - 0x3b, 0xda, 0x88, 0xbd, 0xc3, 0x53, 0x03, 0x3e, 0xe3, 0x7d, 0x00, 0xbe, - 0x57, 0xf1, 0x40, 0x3c, 0xd3, 0xf2, 0x23, 0xbd, 0xcf, 0x47, 0xcf, 0xbd, - 0x55, 0x35, 0x0c, 0xbd, 0x3a, 0x41, 0x60, 0xbd, 0xb3, 0xc1, 0x21, 0x3e, - 0x17, 0x79, 0x79, 0xbe, 0x4f, 0x9e, 0x0e, 0xbe, 0x81, 0x91, 0x00, 0xbe, - 0xaf, 0x5b, 0xbc, 0xbc, 0xe2, 0xbc, 0xd0, 0xbc, 0xa5, 0xfe, 0x9d, 0x3d, - 0x03, 0xbd, 0x93, 0xbe, 0x1e, 0x59, 0xa9, 0xbd, 0x1f, 0xea, 0xd0, 0xbd, - 0xc9, 0x61, 0x03, 0x3e, 0xe8, 0x4c, 0x16, 0x3e, 0xe5, 0x83, 0x41, 0xbb, - 0xd3, 0x77, 0xd2, 0xbd, 0x9e, 0x9e, 0x8d, 0xbc, 0x75, 0x41, 0x37, 0xbc, - 0x61, 0x40, 0x70, 0xbd, 0xbf, 0xec, 0xde, 0xbd, 0x8c, 0x63, 0xee, 0xbc, - 0xc1, 0x06, 0xe8, 0xbd, 0x15, 0x17, 0x4e, 0x3c, 0xee, 0xaa, 0xf0, 0xbd, - 0x17, 0x12, 0x02, 0xbd, 0xbb, 0xbf, 0x67, 0xbe, 0x35, 0xcb, 0x44, 0xbe, - 0x40, 0xaf, 0xa2, 0x3d, 0xc1, 0xe9, 0x9d, 0xbc, 0x84, 0x51, 0x61, 0x3e, - 0xea, 0xad, 0x7f, 0x3d, 0x5f, 0x13, 0x82, 0x3c, 0x87, 0x1c, 0xf5, 0xbc, - 0x5c, 0xc4, 0xe8, 0x3c, 0xc1, 0xa3, 0x68, 0xbd, 0x2c, 0xbf, 0x98, 0xbe, - 0xf7, 0xa1, 0xd2, 0x3b, 0x70, 0x4c, 0x24, 0x3c, 0xe2, 0x19, 0x8b, 0xbd, - 0xd0, 0x95, 0x17, 0xbe, 0xa4, 0x5e, 0x2b, 0xbe, 0x55, 0x11, 0x53, 0xbe, - 0x33, 0xdc, 0x7c, 0xbe, 0xa3, 0x5f, 0x00, 0x3e, 0x41, 0x5c, 0xf2, 0x3d, - 0x0d, 0xab, 0xe8, 0xbd, 0xdd, 0xf9, 0x24, 0x3d, 0x7f, 0x07, 0x06, 0x3e, - 0x62, 0xd0, 0x26, 0x3d, 0x72, 0x32, 0xf9, 0xbd, 0x80, 0x7a, 0xce, 0xbd, - 0xa8, 0x00, 0x1c, 0xbe, 0x28, 0x3a, 0x33, 0xbe, 0xef, 0xfc, 0xe6, 0xbc, - 0x69, 0xd4, 0xe4, 0x3d, 0x9a, 0x5d, 0x33, 0xbe, 0xb1, 0x1f, 0xd9, 0x3c, - 0xa8, 0xe8, 0x5c, 0xbd, 0xdb, 0x5e, 0x9d, 0xbe, 0x17, 0xac, 0xb8, 0xbc, - 0x5a, 0x52, 0x4d, 0x3d, 0x3d, 0x00, 0x97, 0xbc, 0x9a, 0xaa, 0x53, 0xbe, - 0xc0, 0x8c, 0x18, 0xbe, 0xdd, 0x93, 0x28, 0xbd, 0xa5, 0x6a, 0x97, 0x3d, - 0xe1, 0x09, 0x55, 0xbd, 0xea, 0xdb, 0xaa, 0xbe, 0xb1, 0x0d, 0xa0, 0xbd, - 0x33, 0xb7, 0x0c, 0xbe, 0xf3, 0x7c, 0xe5, 0x3d, 0x9e, 0x05, 0x9c, 0xbe, - 0x52, 0x7b, 0x0e, 0xbe, 0x7d, 0x50, 0x0b, 0xbe, 0x8a, 0x99, 0x1a, 0xbe, - 0x70, 0x90, 0xde, 0x3c, 0x73, 0x98, 0x7f, 0xbc, 0xf2, 0x72, 0x1b, 0x3e, - 0x64, 0x71, 0x72, 0xbd, 0xd5, 0xdb, 0x7c, 0xbe, 0xb0, 0xb4, 0xc6, 0xbc, - 0x80, 0x0a, 0x43, 0xbc, 0x7c, 0x3e, 0xa8, 0x3d, 0x9a, 0xd9, 0xda, 0xbe, - 0xe6, 0x9d, 0x21, 0xbe, 0x5e, 0x36, 0x30, 0xbe, 0xc5, 0x14, 0x38, 0x3e, - 0xd1, 0xa1, 0xa3, 0x3d, 0x22, 0xdd, 0x3d, 0xbe, 0xd6, 0x71, 0x99, 0xbd, - 0xe8, 0xab, 0x8d, 0x3e, 0x02, 0x68, 0x38, 0x3a, 0x4c, 0x4f, 0xc6, 0xbd, - 0x32, 0xb3, 0x38, 0x3e, 0x29, 0x0d, 0x23, 0xbe, 0x84, 0x39, 0xd8, 0xbd, - 0x82, 0x93, 0xaf, 0x3d, 0x5b, 0xbb, 0x98, 0x3c, 0x33, 0x97, 0xa7, 0x3d, - 0x25, 0x92, 0x96, 0xbe, 0x51, 0x1f, 0x79, 0xbe, 0x24, 0x78, 0x0b, 0xbe, - 0x2b, 0x85, 0x78, 0xbe, 0xf7, 0xd1, 0x40, 0x3f, 0xa2, 0xd5, 0x9a, 0x3e, - 0x9b, 0xff, 0x63, 0xbe, 0x0c, 0xcb, 0x08, 0xbe, 0xb8, 0x8a, 0x99, 0x3d, - 0xd1, 0xb9, 0x25, 0x3d, 0xd0, 0x91, 0x31, 0xbd, 0x3d, 0x06, 0x88, 0x3d, - 0x47, 0x65, 0xed, 0x3d, 0xaa, 0xc4, 0xd2, 0xbe, 0xd9, 0x6c, 0xc4, 0xbe, - 0xa1, 0xee, 0xc2, 0x3d, 0x56, 0xbc, 0x62, 0x3e, 0x76, 0x7d, 0x41, 0xbe, - 0x35, 0x60, 0x56, 0x3d, 0x6c, 0x50, 0x1d, 0xbe, 0xcf, 0xf4, 0x84, 0x3d, - 0x61, 0x1d, 0x4e, 0x3e, 0xea, 0xcd, 0x36, 0xbe, 0xc4, 0xc2, 0x77, 0x3c, - 0xa1, 0x6a, 0x14, 0xbd, 0xf2, 0x01, 0xcb, 0x3d, 0x73, 0xe6, 0x0d, 0x3e, - 0xf7, 0x54, 0x4d, 0xbd, 0x1b, 0x0b, 0xf0, 0x3d, 0xd8, 0x6b, 0x4c, 0x3e, - 0x05, 0xd9, 0xbf, 0xbe, 0x29, 0x5a, 0x0a, 0x3e, 0x54, 0x15, 0xaa, 0x3d, - 0x6a, 0xd0, 0xad, 0xbe, 0xb5, 0xa9, 0x7e, 0x3d, 0x5b, 0x9b, 0xa2, 0xbd, - 0x66, 0xeb, 0x1e, 0x3e, 0xdf, 0xfa, 0xb3, 0x3d, 0x01, 0xb9, 0xeb, 0xbd, - 0xed, 0x60, 0x7b, 0x3d, 0xdd, 0xc3, 0xab, 0xbc, 0x9c, 0xf9, 0xa7, 0x3d, - 0x7c, 0x05, 0xd7, 0x3d, 0xf3, 0x62, 0xb7, 0xbe, 0x9d, 0x89, 0x7b, 0x3d, - 0x2e, 0x25, 0x20, 0x3c, 0xdb, 0xe4, 0x3d, 0xbe, 0x0e, 0xee, 0x3d, 0x3d, - 0x9c, 0xd3, 0xdc, 0x3c, 0x00, 0x4d, 0x38, 0xbe, 0xd6, 0x3c, 0x1e, 0xbd, - 0x8b, 0x41, 0x14, 0x3e, 0x56, 0x6b, 0x21, 0x3e, 0xfc, 0x0b, 0xb4, 0xbe, - 0x8c, 0xee, 0x24, 0xbe, 0x92, 0xf6, 0x48, 0x3e, 0x01, 0xa4, 0x4e, 0x3c, - 0x86, 0xf3, 0x0b, 0x3d, 0xde, 0x84, 0x1d, 0x3e, 0xe1, 0xa9, 0x00, 0xbf, - 0xa5, 0x03, 0xa7, 0x3d, 0x2b, 0xf4, 0x32, 0xbd, 0x7b, 0xed, 0x30, 0xbd, - 0xd7, 0xb6, 0x2a, 0xbe, 0xa4, 0xa7, 0xaa, 0xbd, 0x76, 0xa8, 0x59, 0x3e, - 0x63, 0x2b, 0x3f, 0xbd, 0x6a, 0xb1, 0x8d, 0x3c, 0x1c, 0xf7, 0x90, 0x3e, - 0xad, 0xa8, 0xb2, 0xbd, 0x5e, 0xe2, 0x80, 0xbe, 0xb1, 0x3b, 0x67, 0xbd, - 0xaa, 0x58, 0x88, 0xbc, 0xce, 0x93, 0x66, 0xbe, 0x3d, 0xe4, 0x85, 0x3d, - 0xfc, 0x7d, 0xd8, 0xbe, 0x73, 0x8d, 0xeb, 0xbc, 0xf7, 0x65, 0x6c, 0xbe, - 0xc5, 0xd4, 0x51, 0x3d, 0x62, 0x0d, 0x4b, 0xbe, 0xb0, 0xc3, 0x1f, 0xbd, - 0x87, 0x98, 0x69, 0x3d, 0x4d, 0x80, 0xd2, 0xbc, 0x9c, 0x0c, 0x38, 0x3e, - 0x89, 0x8b, 0x81, 0x3e, 0x55, 0x79, 0xea, 0x3d, 0xd0, 0xf2, 0xc8, 0xbd, - 0xb4, 0x9c, 0xa9, 0x3d, 0x7f, 0x05, 0x83, 0xbd, 0x05, 0x77, 0xb0, 0x3b, - 0x62, 0xbd, 0x4c, 0x3d, 0x49, 0x44, 0xc9, 0xbd, 0xdc, 0x3f, 0xb9, 0x3d, - 0x56, 0xc5, 0xe3, 0xbe, 0x5e, 0x61, 0x26, 0x3d, 0x31, 0x0e, 0x12, 0xbc, - 0x2b, 0xe8, 0x93, 0x3b, 0xc0, 0x15, 0x10, 0xbe, 0xdb, 0x6b, 0xd4, 0xbd, - 0xa7, 0xf7, 0x97, 0x3d, 0x61, 0xa0, 0x08, 0x3e, 0x54, 0x8a, 0x65, 0x3e, - 0x58, 0xcc, 0x1d, 0xbe, 0x33, 0x44, 0x63, 0x3c, 0x69, 0x14, 0xf6, 0xbd, - 0xff, 0x62, 0xd4, 0x3d, 0x05, 0x2b, 0x8b, 0x3d, 0x22, 0x2c, 0x13, 0xbe, - 0x52, 0x0c, 0x0f, 0x3d, 0x48, 0x96, 0xc7, 0xbd, 0x4d, 0x35, 0xab, 0x3d, - 0x20, 0xbe, 0xeb, 0x3d, 0x64, 0xf0, 0x86, 0xbd, 0xcb, 0x71, 0xa5, 0xbd, - 0xd5, 0x2a, 0x95, 0x3c, 0x57, 0xaa, 0x06, 0x3e, 0x5e, 0x14, 0x3c, 0x3e, - 0xcb, 0xf8, 0x11, 0x3e, 0x34, 0x34, 0x6d, 0x3d, 0xa4, 0x66, 0x0b, 0x3c, - 0x52, 0x5b, 0x9d, 0xbb, 0x7a, 0x84, 0x90, 0xbd, 0x11, 0xc8, 0x4a, 0x3e, - 0x75, 0xf9, 0x8a, 0xbd, 0x83, 0x65, 0x22, 0xbd, 0x64, 0x5d, 0x15, 0xbd, - 0x91, 0x22, 0x5e, 0xbc, 0xbd, 0x09, 0x12, 0x3d, 0xfe, 0x19, 0xeb, 0x3c, - 0xa3, 0x78, 0x2e, 0xbd, 0x62, 0xf3, 0x7f, 0x3c, 0x05, 0x5e, 0x03, 0x3e, - 0x6c, 0x9e, 0xb7, 0x3d, 0xb1, 0x03, 0x4d, 0x3e, 0x66, 0x72, 0xfd, 0xbd, - 0xc5, 0x12, 0x11, 0x3d, 0x99, 0x39, 0xab, 0x3c, 0xc8, 0xf5, 0x5a, 0xbd, - 0x33, 0x84, 0x98, 0x3c, 0x0a, 0xd6, 0x99, 0x3d, 0x4f, 0xcb, 0xb5, 0xbb, - 0x00, 0x39, 0x41, 0x3e, 0xdc, 0x20, 0x27, 0xbc, 0x34, 0xdb, 0xbb, 0x3d, - 0x33, 0x3c, 0x26, 0x3d, 0x3a, 0x99, 0x2b, 0x3d, 0xa4, 0x31, 0x76, 0x3d, - 0x50, 0x79, 0x91, 0x3d, 0x80, 0x50, 0xc3, 0x3d, 0x5c, 0x3d, 0x39, 0x3e, - 0xc9, 0x7b, 0xed, 0xbc, 0xf2, 0xbb, 0x88, 0x3d, 0x76, 0xb2, 0x5e, 0xbe, - 0x0c, 0x80, 0xb9, 0xbd, 0x10, 0x91, 0x12, 0x3d, 0xca, 0xfb, 0x47, 0x3d, - 0xc4, 0x43, 0xc4, 0xbd, 0xd3, 0x36, 0x82, 0xbd, 0x03, 0x96, 0xda, 0xbd, - 0xac, 0x46, 0xbf, 0x3d, 0x26, 0x6b, 0x26, 0x3d, 0x02, 0xa1, 0x0f, 0xbe, - 0x68, 0x09, 0x42, 0x3e, 0x32, 0xf7, 0x5d, 0x3e, 0xad, 0xa5, 0xec, 0x3d, - 0x97, 0x8a, 0xb1, 0x3e, 0xc7, 0x0e, 0xee, 0xbd, 0x12, 0x2d, 0xf0, 0x3d, - 0x05, 0x02, 0xae, 0xbe, 0x63, 0x2c, 0xe3, 0x3c, 0x1c, 0x86, 0x47, 0x3e, - 0xe5, 0xa6, 0x46, 0x3e, 0x6b, 0x88, 0x8c, 0xbd, 0x6b, 0x84, 0x28, 0x3e, - 0xad, 0xc4, 0x79, 0xbe, 0x77, 0x46, 0xfc, 0xbc, 0x40, 0x27, 0x28, 0x3e, - 0x2c, 0x30, 0xe0, 0xbd, 0xe5, 0xb6, 0xd8, 0x3c, 0x68, 0xac, 0x90, 0x3e, - 0x14, 0x75, 0x1c, 0xbe, 0x0d, 0xc9, 0x9e, 0xbe, 0x4b, 0xec, 0x4f, 0xbe, - 0x1b, 0x78, 0xcb, 0x3d, 0xf6, 0xeb, 0xd3, 0xbe, 0x38, 0xab, 0xed, 0xbd, - 0x86, 0x1a, 0xf4, 0x3d, 0x43, 0x3f, 0xd2, 0x3c, 0xbf, 0x91, 0xc3, 0xbe, - 0x76, 0x19, 0x6a, 0x3d, 0xd9, 0x29, 0x35, 0xbe, 0x3b, 0x83, 0xe7, 0xba, - 0x6e, 0xcd, 0x2e, 0xbf, 0xec, 0x5f, 0x25, 0x3e, 0xb6, 0x01, 0x9f, 0x3d, - 0xd6, 0x6e, 0x37, 0x3e, 0x09, 0x82, 0x49, 0xbe, 0x31, 0x65, 0x67, 0xbf, - 0x1a, 0xf7, 0xfc, 0xbe, 0xa9, 0x80, 0x7a, 0xbc, 0x09, 0x7e, 0xa0, 0xbe, - 0x62, 0x1c, 0x38, 0xbd, 0xce, 0x50, 0x9e, 0x3d, 0x69, 0x4b, 0x47, 0xbe, - 0x53, 0x6d, 0x8f, 0xbe, 0xee, 0x17, 0xa4, 0xbe, 0x42, 0x75, 0x52, 0x3c, - 0xe5, 0x67, 0x1a, 0xbe, 0x34, 0xb7, 0x53, 0xbf, 0xc7, 0xdf, 0xb3, 0xbd, - 0x6a, 0x60, 0x00, 0xbe, 0x32, 0xd9, 0xc0, 0x3d, 0x15, 0x83, 0x9c, 0x3b, - 0x79, 0x94, 0x9a, 0xbe, 0xac, 0x0b, 0xf1, 0xbe, 0x13, 0xd8, 0x11, 0xbe, - 0x25, 0x26, 0xb3, 0xbe, 0x13, 0xce, 0xc7, 0x3d, 0x3a, 0xb6, 0x17, 0x3e, - 0xb7, 0xd8, 0x6f, 0x3d, 0x1f, 0xc1, 0xe2, 0xbe, 0x5a, 0x21, 0x26, 0xbe, - 0xbd, 0x0b, 0x39, 0x3e, 0xf3, 0xb2, 0x87, 0x3d, 0x47, 0x17, 0xf0, 0xbe, - 0x09, 0xb9, 0xd8, 0x3d, 0x3c, 0x61, 0x81, 0x3e, 0x21, 0xe5, 0xba, 0xbd, - 0x26, 0x80, 0x0c, 0x3e, 0x95, 0x3e, 0x9a, 0xbd, 0x45, 0xc1, 0x31, 0xbe, - 0x3d, 0xa0, 0x14, 0xbd, 0x02, 0x58, 0x32, 0xbd, 0x59, 0xc7, 0xff, 0x3c, - 0x15, 0x05, 0xe2, 0x3e, 0x86, 0x1d, 0x0f, 0xbe, 0xa0, 0x40, 0xbc, 0xbe, - 0x68, 0x5e, 0x44, 0x3e, 0x99, 0x1f, 0xfb, 0x3d, 0xfe, 0x0e, 0x21, 0xbe, - 0x13, 0xf4, 0x67, 0xbe, 0x39, 0x82, 0xe1, 0xbc, 0xf4, 0x0d, 0xdb, 0x3d, - 0xd6, 0x05, 0x98, 0xbe, 0xd5, 0x96, 0x43, 0xbe, 0x0d, 0x8b, 0x04, 0xbe, - 0xdb, 0x1b, 0x9b, 0xbd, 0xa7, 0xb8, 0x86, 0xbe, 0x93, 0xa7, 0x48, 0x3e, - 0xff, 0x83, 0x70, 0xbe, 0x81, 0x92, 0xb6, 0x3e, 0x00, 0xde, 0x2d, 0xbe, - 0xcb, 0xea, 0xd1, 0xbe, 0x26, 0x5f, 0x89, 0x3d, 0xa9, 0x78, 0x94, 0xbc, - 0x46, 0xe4, 0xc0, 0xbc, 0xf8, 0x7a, 0x7f, 0xbe, 0x7f, 0x6c, 0xda, 0xbd, - 0xf0, 0x26, 0x7a, 0x3d, 0xc8, 0xe1, 0x68, 0xbe, 0xf9, 0x78, 0x22, 0xbd, - 0xc6, 0xe1, 0x7e, 0xbe, 0x48, 0xa7, 0xa1, 0xbd, 0x79, 0xdc, 0x63, 0xbc, - 0x3b, 0x33, 0x25, 0x3e, 0xe9, 0x5d, 0x8a, 0xbe, 0x68, 0x6a, 0x42, 0xbe, - 0xb6, 0x15, 0x24, 0xbd, 0x8a, 0xab, 0xec, 0xbe, 0x57, 0x0d, 0xf0, 0x3d, - 0xea, 0x0e, 0xd0, 0xbd, 0x6e, 0xe6, 0xd4, 0xbd, 0xc1, 0x3c, 0x84, 0xbe, - 0xfe, 0xc7, 0x0f, 0xbe, 0xe0, 0x6e, 0x3b, 0xbd, 0xb5, 0x5b, 0x75, 0xbe, - 0x57, 0xb2, 0x70, 0xbe, 0x9a, 0xb9, 0x45, 0x3c, 0xc0, 0xe1, 0xd2, 0xbc, - 0x71, 0xc3, 0xbc, 0xbd, 0xc1, 0x3a, 0x0b, 0x3d, 0x0d, 0x92, 0xd4, 0xbc, - 0x90, 0x72, 0x14, 0xbd, 0x19, 0x0f, 0x79, 0x3c, 0x8d, 0xb0, 0xc9, 0xbe, - 0xec, 0x8b, 0xe0, 0x3d, 0x61, 0xeb, 0x92, 0xbd, 0xcc, 0x90, 0xf1, 0xbd, - 0xc4, 0x38, 0x92, 0xbd, 0x89, 0xa0, 0x33, 0xbc, 0x4e, 0xe5, 0x4d, 0xbd, - 0x7a, 0xd2, 0x83, 0xbd, 0x61, 0x9a, 0xd0, 0x3d, 0x33, 0xca, 0x94, 0xbd, - 0x8c, 0x1a, 0xb3, 0x3d, 0x4e, 0x06, 0x51, 0x3e, 0xd5, 0x7a, 0xb3, 0x3d, - 0x6a, 0x1d, 0x01, 0xbe, 0x3a, 0xc0, 0xc4, 0x3e, 0xe6, 0xf7, 0xee, 0xbd, - 0xe2, 0x8e, 0x1f, 0xbe, 0xc2, 0xfd, 0x96, 0x3e, 0x6c, 0xe1, 0x62, 0xbe, - 0xff, 0xd8, 0x13, 0xbe, 0xbd, 0xb6, 0x81, 0xbd, 0xd8, 0xdc, 0x4a, 0x3b, - 0xe5, 0xe2, 0xcb, 0x3a, 0x49, 0x4c, 0xc3, 0xbd, 0xfb, 0x38, 0xa1, 0x3d, - 0x96, 0x5e, 0xa7, 0xbc, 0xcb, 0xde, 0x23, 0x3d, 0x0f, 0x0d, 0x8f, 0x3e, - 0x9b, 0x54, 0x04, 0x3e, 0x7d, 0x1b, 0x78, 0xbd, 0x06, 0x2f, 0x2a, 0x3e, - 0x3e, 0x9a, 0xce, 0xbe, 0xa2, 0xa0, 0xa7, 0xbe, 0x58, 0x1f, 0x5e, 0x3e, - 0x23, 0xe4, 0xc5, 0x3c, 0x25, 0xc4, 0x1f, 0xbe, 0x9a, 0x80, 0x56, 0x3b, - 0x69, 0xb9, 0x17, 0x3e, 0x4d, 0xd6, 0xf1, 0x3d, 0xf3, 0x63, 0x4a, 0xbe, - 0x2b, 0xa0, 0xcd, 0x3d, 0x14, 0xc3, 0x28, 0xbe, 0x61, 0x71, 0x05, 0x3e, - 0x6a, 0xb8, 0x07, 0x3d, 0x55, 0xf9, 0x2b, 0x3e, 0x39, 0x86, 0x0f, 0xbd, - 0x38, 0xd5, 0x61, 0x3e, 0xdb, 0x24, 0xa6, 0xbe, 0x20, 0xde, 0x2c, 0xbe, - 0x15, 0x3f, 0xc8, 0x3d, 0x9e, 0x9b, 0x45, 0x3d, 0xed, 0x09, 0x22, 0xbe, - 0x2a, 0x2c, 0x33, 0xbe, 0x37, 0x8a, 0x54, 0xbd, 0x48, 0x0c, 0x84, 0xba, - 0x2f, 0x23, 0x12, 0xbe, 0xc4, 0x7c, 0x48, 0x3e, 0x82, 0xd5, 0x95, 0xbe, - 0x79, 0xda, 0x06, 0x3c, 0x2d, 0xf1, 0x33, 0x3d, 0x55, 0x60, 0x77, 0xbd, - 0x1d, 0x61, 0x05, 0xbe, 0x81, 0xcb, 0x8d, 0x3e, 0x06, 0x08, 0x01, 0xbe, - 0x95, 0xc7, 0x51, 0xbd, 0xf0, 0x72, 0x99, 0xbd, 0x06, 0xb2, 0xc6, 0x3d, - 0xfb, 0x0f, 0xca, 0x3d, 0x8e, 0x73, 0x7e, 0x3b, 0xfd, 0x34, 0x67, 0xbd, - 0x70, 0x32, 0xdd, 0xbc, 0x2d, 0x99, 0xea, 0xbe, 0xc2, 0x73, 0x64, 0xbd, - 0x00, 0x94, 0x0f, 0xbe, 0x75, 0x7a, 0xba, 0xbd, 0x85, 0xc4, 0x2a, 0x3d, - 0x76, 0x88, 0x30, 0x3d, 0x53, 0x72, 0x9d, 0xbd, 0x30, 0x57, 0x81, 0x3e, - 0x6e, 0x96, 0x95, 0xbd, 0x3c, 0x35, 0x38, 0x3d, 0x9b, 0x98, 0x6f, 0xbb, - 0xf0, 0x78, 0x27, 0x3d, 0xdc, 0xa7, 0x13, 0x3d, 0x6d, 0x26, 0x01, 0x3e, - 0x0c, 0x56, 0x0d, 0x3c, 0x34, 0x73, 0x26, 0x3d, 0x9c, 0xd1, 0xe5, 0xbe, - 0x3a, 0x3d, 0x80, 0xbd, 0x12, 0x3e, 0xf8, 0xbd, 0xaf, 0x15, 0x82, 0xbd, - 0x50, 0x20, 0x49, 0xbe, 0x69, 0x45, 0x3e, 0x3e, 0xaa, 0xf1, 0x2d, 0xbe, - 0x46, 0x98, 0x5a, 0xbe, 0x3a, 0x09, 0xb8, 0xbe, 0x10, 0xa7, 0x6b, 0xbe, - 0xf4, 0x2a, 0x71, 0x3e, 0x34, 0xde, 0x1a, 0xbd, 0x14, 0x25, 0xc5, 0x3d, - 0xac, 0x10, 0x0e, 0xbd, 0xdd, 0xd6, 0x6c, 0x3e, 0x4a, 0x10, 0x6d, 0x3d, - 0x8a, 0x02, 0x45, 0xbe, 0x79, 0x00, 0x82, 0x3d, 0xa0, 0x2d, 0x6d, 0x3e, - 0xcf, 0x3b, 0xaa, 0xbd, 0x62, 0xbc, 0x3b, 0x3d, 0x1d, 0x84, 0x85, 0x3e, - 0x74, 0x75, 0x49, 0xbe, 0x72, 0x3d, 0x81, 0xbe, 0xd5, 0xb1, 0x13, 0xbe, - 0x15, 0x6e, 0x95, 0xbe, 0x6f, 0x24, 0x95, 0x3e, 0x07, 0xc3, 0x22, 0x3e, - 0x71, 0x27, 0x1c, 0xbd, 0xf8, 0x56, 0x55, 0xbe, 0x4d, 0x4b, 0x50, 0xbe, - 0x4e, 0x36, 0x61, 0xbd, 0xb4, 0x21, 0x08, 0xbf, 0x18, 0x79, 0xe1, 0xbc, - 0x05, 0x0e, 0x58, 0x3e, 0xaf, 0xa3, 0xff, 0xbd, 0x20, 0x4e, 0x78, 0xbd, - 0xce, 0x7a, 0xfe, 0xbc, 0x8d, 0xca, 0x02, 0xbe, 0x0c, 0x26, 0xb8, 0xbe, - 0x64, 0x00, 0xab, 0xbe, 0xd2, 0x25, 0x2b, 0xbe, 0x1f, 0x46, 0x4e, 0xbd, - 0x30, 0x12, 0xa0, 0xbd, 0xea, 0xae, 0x29, 0x3e, 0x7f, 0xe0, 0x8f, 0xbe, - 0x9e, 0x18, 0x2b, 0x3d, 0xe2, 0xa8, 0x14, 0xbe, 0x23, 0xe3, 0x17, 0x3c, - 0xee, 0xc3, 0x21, 0x3e, 0x43, 0xdf, 0xce, 0x3d, 0x12, 0x1c, 0xd2, 0xbb, - 0x7f, 0xd5, 0xca, 0x3d, 0x3c, 0x5c, 0xf1, 0x3d, 0x95, 0x71, 0x44, 0x3d, - 0x93, 0xf3, 0xba, 0xbe, 0x68, 0x93, 0x5c, 0xbe, 0x10, 0x67, 0xf1, 0xbe, - 0x2a, 0x15, 0x18, 0x3d, 0x73, 0xe2, 0x82, 0x3b, 0xd6, 0x91, 0xbc, 0x3d, - 0x52, 0xcb, 0xb9, 0xbe, 0xcb, 0x4a, 0xd7, 0x3c, 0x6c, 0x72, 0xa3, 0xbe, - 0xf7, 0xb6, 0xed, 0xbc, 0xf2, 0x4b, 0x78, 0x3a, 0x22, 0x3b, 0x92, 0xbe, - 0x93, 0xd9, 0x90, 0x3e, 0x45, 0x47, 0x15, 0xbe, 0x15, 0xb5, 0x29, 0xbc, - 0x12, 0x00, 0xe3, 0xbd, 0xfb, 0xb2, 0xa7, 0xbe, 0x88, 0x19, 0x9b, 0xbe, - 0x18, 0x47, 0x29, 0xbe, 0x65, 0xe8, 0xec, 0xbb, 0xd7, 0x95, 0x5e, 0xbe, - 0x44, 0xf0, 0xae, 0xbd, 0x5e, 0xb2, 0x63, 0xbe, 0x8f, 0x8c, 0xda, 0xbd, - 0x21, 0xec, 0xce, 0x3c, 0x61, 0xec, 0xc9, 0xbd, 0xc4, 0xbc, 0xae, 0xbe, - 0x55, 0x77, 0xa7, 0xbe, 0x5b, 0x6f, 0x43, 0xbe, 0x09, 0x7c, 0x72, 0x3d, - 0x07, 0x9c, 0x9d, 0xbe, 0xa3, 0x3f, 0x50, 0x3c, 0x1c, 0xa9, 0x0c, 0xbe, - 0x67, 0x2d, 0x3b, 0xbd, 0xc4, 0xed, 0x10, 0x3d, 0x50, 0xa2, 0xd4, 0xbe, - 0x47, 0x29, 0x0c, 0xbe, 0x9d, 0x91, 0x12, 0x3d, 0x88, 0xdd, 0x67, 0xb8, - 0x38, 0x07, 0x2f, 0xbe, 0x1a, 0x06, 0x37, 0xbc, 0x62, 0x98, 0xa3, 0xbe, - 0x98, 0xcc, 0x81, 0x3e, 0x6e, 0x97, 0xd6, 0xbd, 0x95, 0x8b, 0xdd, 0xbd, - 0x79, 0xa4, 0xf3, 0x3c, 0xb5, 0xc2, 0x11, 0xbe, 0xd3, 0xec, 0x66, 0xbe, - 0xa3, 0x56, 0x80, 0xbc, 0x01, 0x00, 0x63, 0x3c, 0x56, 0x89, 0x87, 0x3b, - 0xe2, 0x59, 0x39, 0xbe, 0x4d, 0x6d, 0xfb, 0x3d, 0xa0, 0x39, 0xcc, 0x3d, - 0x69, 0x45, 0x8e, 0xbd, 0x5f, 0x84, 0xd6, 0x3d, 0xa1, 0x84, 0xbd, 0x3d, - 0x10, 0x07, 0x38, 0xbe, 0x9e, 0xfa, 0x10, 0x3e, 0xb7, 0xa7, 0x29, 0xbe, - 0x97, 0x88, 0x6a, 0xbd, 0xbe, 0x44, 0x49, 0xbc, 0x6d, 0xaf, 0x52, 0xbe, - 0x1b, 0x47, 0x60, 0xbd, 0x2d, 0x4d, 0x09, 0x3e, 0xaf, 0x9b, 0x4d, 0x3d, - 0x0e, 0x61, 0x8d, 0x3b, 0x79, 0xf0, 0x43, 0x3e, 0xf1, 0xe4, 0x6e, 0x3e, - 0x30, 0x82, 0xb4, 0x3d, 0xb1, 0x9a, 0x1f, 0xbe, 0xf0, 0x6d, 0xe7, 0x3d, - 0xa4, 0x03, 0x72, 0x3d, 0xf6, 0x2b, 0xa5, 0x3b, 0xd0, 0x1c, 0xd9, 0x3d, - 0x46, 0x22, 0xaa, 0xbe, 0xd3, 0x08, 0x54, 0xbd, 0xcd, 0xb1, 0xc2, 0xba, - 0x07, 0xf2, 0xb6, 0xbd, 0xfe, 0x75, 0x73, 0x3c, 0xe4, 0x9b, 0x0a, 0xbd, - 0xc7, 0x90, 0x9e, 0xbd, 0xf1, 0xea, 0xa7, 0x3c, 0xc8, 0x26, 0x52, 0x3e, - 0xfc, 0x27, 0xc7, 0x3e, 0x97, 0x6f, 0xa9, 0x3d, 0x7f, 0xcb, 0x24, 0xbe, - 0x7d, 0x06, 0xb8, 0xbd, 0xa3, 0x40, 0x5e, 0x3d, 0x27, 0x47, 0x2b, 0xbe, - 0x44, 0x61, 0x8d, 0x3e, 0x04, 0x26, 0x8c, 0xbe, 0x5c, 0x59, 0xfa, 0xbd, - 0xcf, 0xf0, 0xa6, 0xbd, 0xc6, 0xdd, 0x9c, 0x3d, 0x44, 0xe8, 0x70, 0xbe, - 0xda, 0x14, 0x31, 0xbe, 0x38, 0xdb, 0xd2, 0xbd, 0x50, 0xa7, 0x19, 0x3d, - 0xf0, 0x2d, 0x3b, 0x3e, 0xfd, 0x9c, 0xe7, 0x3e, 0xb3, 0x62, 0xbb, 0x3d, - 0xe6, 0xe4, 0x95, 0x3c, 0xa4, 0x26, 0x10, 0x3e, 0xf6, 0x96, 0x4d, 0x3d, - 0xd8, 0x93, 0x34, 0xbd, 0x87, 0x9d, 0x81, 0xbd, 0x4f, 0xb4, 0x85, 0xbd, - 0x7a, 0xb6, 0x55, 0xbe, 0xbd, 0xfd, 0x84, 0x3d, 0x53, 0xa2, 0x15, 0x3e, - 0x10, 0x1e, 0x93, 0xbe, 0x4e, 0xab, 0xc5, 0xbd, 0x55, 0xd9, 0xdf, 0x3d, - 0xca, 0xf4, 0x6d, 0x3d, 0xdb, 0xfe, 0x84, 0x3e, 0x95, 0x98, 0x36, 0x3e, - 0x93, 0x3e, 0xfd, 0x3c, 0x61, 0x70, 0x04, 0xbd, 0x46, 0x72, 0xe4, 0x3c, - 0xa9, 0x0e, 0x58, 0xbe, 0xe5, 0x86, 0x33, 0x3d, 0x2d, 0x46, 0x03, 0xbe, - 0x94, 0xdc, 0xf0, 0xbd, 0xaa, 0x48, 0x27, 0xbe, 0x34, 0xb0, 0x63, 0xbd, - 0x56, 0x1f, 0xb7, 0xbb, 0xcd, 0xce, 0xde, 0xbd, 0xc7, 0xb0, 0x4d, 0x3d, - 0x65, 0x11, 0x01, 0x3e, 0xf5, 0xab, 0x8a, 0x3d, 0x34, 0xe1, 0x10, 0xbe, - 0x49, 0x06, 0xff, 0xbd, 0xb6, 0xe4, 0x69, 0xbe, 0xc8, 0x48, 0x78, 0xbd, - 0x1a, 0x63, 0x2b, 0x3d, 0x7f, 0x73, 0x35, 0xbe, 0x1e, 0x01, 0xf5, 0xbd, - 0x89, 0xad, 0x6b, 0xbe, 0x6a, 0x85, 0x09, 0x3d, 0x74, 0x69, 0xf6, 0xbd, - 0x62, 0x39, 0xc9, 0xbe, 0x72, 0x00, 0x8e, 0x3d, 0xfc, 0xa5, 0x9c, 0xbe, - 0x53, 0x4e, 0x95, 0xbd, 0xc7, 0x7d, 0xae, 0xbb, 0xa7, 0x61, 0xb9, 0xba, - 0xb8, 0x9a, 0xce, 0xbe, 0x80, 0xcc, 0xd2, 0xbe, 0xcf, 0xeb, 0x5c, 0xbe, - 0x69, 0xa4, 0x0e, 0xbd, 0x12, 0xa9, 0x1e, 0xbe, 0xc9, 0x6d, 0x50, 0xbe, - 0xfc, 0xd5, 0xe6, 0xbd, 0x9f, 0x5c, 0xd0, 0xbe, 0x33, 0xcf, 0xbb, 0xbd, - 0x70, 0x66, 0xa5, 0xbe, 0x6b, 0x92, 0xfa, 0xbe, 0x27, 0xa0, 0x4b, 0x3d, - 0x5d, 0x26, 0x75, 0xbe, 0x46, 0x63, 0x8c, 0xbe, 0x57, 0x65, 0xaf, 0xbd, - 0xea, 0xd8, 0x1c, 0x3d, 0xcd, 0x4f, 0xc2, 0xbe, 0x87, 0xf0, 0x0b, 0xbf, - 0x25, 0xdf, 0xd9, 0xbe, 0x5d, 0x6a, 0x4e, 0x3d, 0xaa, 0x06, 0xf1, 0xbd, - 0x31, 0xe2, 0x60, 0xbe, 0x72, 0x6f, 0xf2, 0xbc, 0xb9, 0x4a, 0x9e, 0xbe, - 0xfb, 0xb0, 0xd7, 0xbd, 0x9f, 0xfa, 0x96, 0xbe, 0xc8, 0x14, 0x13, 0xbf, - 0xaa, 0x6a, 0x39, 0xbc, 0x7f, 0x24, 0x5a, 0xbd, 0x12, 0x36, 0xa9, 0x3d, - 0x9e, 0x86, 0x60, 0x3d, 0xbc, 0x33, 0x85, 0x3c, 0x4c, 0x72, 0xf2, 0xbe, - 0x39, 0x9f, 0xba, 0xbe, 0xc2, 0xa5, 0x68, 0xbe, 0x35, 0x06, 0xa1, 0x3c, - 0xb5, 0xad, 0x53, 0x3d, 0x3e, 0xd0, 0xa2, 0xbd, 0xfd, 0x9b, 0xaa, 0x3c, - 0x0a, 0x8f, 0xa8, 0x3e, 0x24, 0xca, 0x09, 0xbd, 0x14, 0x2c, 0x4c, 0xbe, - 0x56, 0x1b, 0xae, 0xbd, 0x9d, 0x45, 0x60, 0xbc, 0xb7, 0x16, 0x5f, 0xbe, - 0x57, 0x8f, 0x8d, 0x3e, 0x5d, 0xab, 0x50, 0xbd, 0xcc, 0x11, 0xb0, 0x3c, - 0x52, 0x21, 0xd1, 0xbe, 0xfb, 0xff, 0xb2, 0xbc, 0x9b, 0xd1, 0x14, 0xbe, - 0xfc, 0x2c, 0x7e, 0xbd, 0x39, 0x94, 0xfb, 0xb9, 0xca, 0x93, 0xca, 0xbd, - 0x0a, 0xa7, 0xfc, 0xbd, 0xf6, 0xd3, 0x7f, 0x3e, 0xd3, 0xda, 0x0b, 0xbe, - 0x8e, 0xf9, 0x99, 0xbd, 0xc5, 0xe4, 0x73, 0xbd, 0xba, 0x7d, 0xae, 0xbd, - 0x5b, 0x0e, 0x24, 0x3d, 0xcf, 0x46, 0x90, 0x3d, 0x42, 0x80, 0x88, 0xbc, - 0x93, 0x44, 0xa0, 0x3c, 0x02, 0x2c, 0x53, 0xbb, 0xb3, 0x0d, 0xdf, 0x3e, - 0xf3, 0x46, 0xc8, 0x3c, 0xc5, 0x84, 0xa4, 0xbd, 0xf8, 0x13, 0xad, 0x3b, - 0x6d, 0x41, 0xb5, 0x3c, 0xeb, 0x74, 0x5f, 0xbe, 0x67, 0x82, 0xd0, 0x3e, - 0x3c, 0xd0, 0x9c, 0xbd, 0xa0, 0x97, 0x48, 0x3e, 0x1f, 0x0a, 0x36, 0xbe, - 0xf6, 0x51, 0x4a, 0xbd, 0x4f, 0xe5, 0x20, 0xbe, 0xa1, 0x8e, 0x0d, 0xbd, - 0x70, 0x13, 0x9c, 0xbd, 0xe8, 0x43, 0xb1, 0xbb, 0xc3, 0x59, 0xa4, 0x3e, - 0x06, 0xd6, 0x10, 0x3e, 0x8e, 0x8e, 0x88, 0x3e, 0xe8, 0xa8, 0xe1, 0xbd, - 0xb3, 0x6e, 0x6b, 0xbe, 0x28, 0x96, 0x7f, 0xbe, 0xde, 0xcc, 0xab, 0xbe, - 0xb0, 0xc9, 0xa6, 0xbe, 0x60, 0x75, 0xc5, 0x3d, 0x39, 0x36, 0xea, 0x3d, - 0x53, 0x43, 0xea, 0xbe, 0xb0, 0x9d, 0x9e, 0x3d, 0x06, 0x01, 0x7c, 0xbe, - 0xb8, 0x49, 0x51, 0xbe, 0x7c, 0xad, 0xe8, 0x3c, 0xe2, 0x4e, 0xc6, 0x3c, - 0x9f, 0x1f, 0x56, 0xbe, 0x5a, 0xd7, 0x79, 0x3d, 0x62, 0x73, 0x81, 0xbe, - 0xe6, 0xc1, 0xd1, 0x3c, 0x4e, 0x1c, 0x18, 0x3f, 0x2a, 0xe7, 0xa4, 0x3e, - 0xf6, 0x80, 0x24, 0x3e, 0x06, 0xf7, 0x00, 0x3f, 0x34, 0x8e, 0x91, 0xbe, - 0x17, 0x9f, 0xf3, 0x3c, 0xc5, 0x69, 0x15, 0x3f, 0x18, 0xd8, 0xab, 0x3d, - 0x98, 0xd2, 0x55, 0x3d, 0xf1, 0x5f, 0xdb, 0x3e, 0x21, 0x48, 0x05, 0xbf, - 0xf7, 0x44, 0x65, 0xbd, 0xbd, 0x27, 0xf6, 0x3e, 0x4e, 0x58, 0xaa, 0x3e, - 0x1f, 0x87, 0xd5, 0x3b, 0x4e, 0x6f, 0xa8, 0xbd, 0x1f, 0x08, 0xd0, 0xbe, - 0x06, 0xd5, 0x76, 0x3d, 0x15, 0xf8, 0x48, 0x3e, 0x38, 0xe0, 0x0e, 0xbf, - 0xaa, 0x5c, 0xaf, 0xbd, 0xfd, 0xe2, 0xbd, 0xbe, 0x54, 0x5b, 0x2f, 0xbf, - 0x37, 0xe6, 0xd9, 0x3d, 0xd4, 0x6d, 0x89, 0xbe, 0xe4, 0xa0, 0xc4, 0x3c, - 0x76, 0x37, 0xe2, 0xbe, 0xd2, 0xf0, 0x98, 0x3c, 0xe7, 0x7a, 0xcc, 0x3e, - 0xaa, 0x3f, 0x11, 0xbf, 0x86, 0x4a, 0x3e, 0xbf, 0xae, 0xa4, 0xe5, 0x3b, - 0x62, 0x98, 0x31, 0xbe, 0xff, 0x4e, 0x80, 0x3d, 0x79, 0xe7, 0xba, 0xbd, - 0x93, 0x5b, 0xca, 0xbe, 0x3c, 0x97, 0x97, 0x3d, 0xf1, 0x41, 0x2d, 0xbf, - 0xc9, 0xa4, 0x4b, 0xbf, 0x9e, 0x2a, 0xde, 0x3d, 0xad, 0x4e, 0x84, 0xbe, - 0x01, 0x2f, 0x5c, 0x3b, 0xab, 0x1c, 0x45, 0xbe, 0x32, 0x41, 0x2b, 0x3c, - 0x73, 0x53, 0x50, 0x3e, 0xe2, 0xc4, 0x06, 0xbf, 0x62, 0xef, 0x13, 0xbf, - 0x6e, 0x3d, 0x86, 0x3c, 0x6d, 0x9a, 0xee, 0xbd, 0x49, 0xbc, 0xa8, 0x3d, - 0x68, 0xfe, 0x11, 0x3e, 0xde, 0x38, 0xfa, 0xbe, 0xbc, 0x06, 0x81, 0xbc, - 0xb7, 0x85, 0xc3, 0xbe, 0x26, 0x4f, 0x24, 0xbf, 0xe8, 0x94, 0xbb, 0xbb, - 0x16, 0x0e, 0x9b, 0xbe, 0xca, 0xe8, 0x52, 0xbe, 0xa2, 0xb7, 0x53, 0xbd, - 0x04, 0xef, 0x52, 0x3b, 0x08, 0xad, 0x1e, 0xbd, 0x72, 0xba, 0x01, 0xbf, - 0xd7, 0x49, 0x87, 0xbe, 0xf3, 0x7b, 0xf2, 0x3c, 0xfc, 0x8e, 0x84, 0x3d, - 0xcf, 0xcc, 0x3a, 0x3d, 0x35, 0xe0, 0xe4, 0xbc, 0x88, 0x88, 0x05, 0x3d, - 0x8d, 0x2f, 0xab, 0x3d, 0x33, 0x4f, 0x49, 0xbe, 0xc6, 0x68, 0xfc, 0xbe, - 0xfc, 0x90, 0x2f, 0xbb, 0x20, 0x38, 0xc1, 0xbd, 0x19, 0x53, 0x27, 0xbd, - 0xa2, 0xee, 0x09, 0xbe, 0x5e, 0x6f, 0x6b, 0x3d, 0x1e, 0xf9, 0x0d, 0x3d, - 0x76, 0xd5, 0xdd, 0xbe, 0xe3, 0xa8, 0xac, 0xbe, 0xaa, 0xad, 0x0d, 0x3d, - 0x91, 0x62, 0xce, 0xbd, 0x83, 0x52, 0xbb, 0xbc, 0xfd, 0x4e, 0x18, 0x3e, - 0x98, 0x73, 0x8f, 0xbd, 0x55, 0x1f, 0x55, 0x39, 0xb9, 0x85, 0x67, 0xbe, - 0x1a, 0xcf, 0xd3, 0xbe, 0x97, 0x08, 0x14, 0xbd, 0xca, 0xf1, 0x11, 0xbe, - 0x8b, 0x4b, 0x39, 0xbe, 0xe8, 0xe0, 0xd8, 0xbc, 0x0a, 0xf4, 0x26, 0xbc, - 0x0d, 0xda, 0x20, 0xbd, 0x52, 0xe7, 0xb1, 0xbe, 0xfc, 0x69, 0xd3, 0xbe, - 0x6f, 0x10, 0xc8, 0x3c, 0xb8, 0xb5, 0x20, 0xbe, 0xcd, 0xd6, 0x72, 0x3d, - 0xb4, 0x1b, 0xca, 0xbd, 0x1c, 0xa7, 0xfb, 0xbd, 0xf4, 0x0c, 0x91, 0xbd, - 0x95, 0xec, 0xda, 0xbc, 0x00, 0xff, 0x2b, 0xbe, 0x17, 0x4b, 0x45, 0xbd, - 0x45, 0x0c, 0x85, 0xbe, 0x97, 0x38, 0x88, 0xbd, 0x76, 0xfb, 0x0a, 0xbe, - 0xe3, 0xb5, 0xdf, 0x3c, 0x80, 0x2c, 0xd5, 0x3d, 0xc7, 0xb8, 0xb5, 0xbe, - 0x94, 0x5d, 0xba, 0xbe, 0x1e, 0x73, 0xba, 0x3c, 0xe6, 0x81, 0x25, 0xbe, - 0xff, 0x3d, 0x49, 0x3d, 0xc1, 0x10, 0x7d, 0x3e, 0x62, 0x4f, 0x55, 0xbe, - 0x3e, 0x29, 0x2f, 0xbe, 0x8c, 0x31, 0x6c, 0xbe, 0xb3, 0x0f, 0xdf, 0xbe, - 0x63, 0x87, 0xb2, 0xbc, 0xf0, 0x8a, 0x3f, 0xbe, 0x24, 0xe6, 0x92, 0x3c, - 0xb3, 0x20, 0xa7, 0xbd, 0x89, 0xa6, 0xb4, 0x3b, 0x73, 0xe7, 0xfc, 0x3d, - 0xe7, 0x9d, 0xbc, 0xbe, 0x09, 0x19, 0x97, 0xbe, 0x7c, 0xfc, 0x78, 0x3d, - 0xae, 0x7b, 0x23, 0xbe, 0xe5, 0x86, 0x3a, 0x3d, 0x07, 0x56, 0x87, 0xbd, - 0x8e, 0x90, 0x87, 0xbe, 0xc4, 0x20, 0x4e, 0xbe, 0x22, 0x0e, 0x38, 0xbc, - 0xe7, 0x44, 0xae, 0xbe, 0xa8, 0xd5, 0x1a, 0x3d, 0x93, 0x4e, 0x93, 0xbe, - 0x00, 0xb9, 0x2f, 0xbc, 0x30, 0xcf, 0xb5, 0xbe, 0xfb, 0x54, 0x8b, 0xbb, - 0x65, 0xd0, 0x00, 0x3e, 0xd5, 0x24, 0xda, 0xbe, 0xcf, 0xc4, 0xd8, 0xbe, - 0x34, 0x93, 0xb9, 0x3c, 0x02, 0x97, 0x8a, 0xbe, 0x8f, 0xd4, 0x22, 0x3d, - 0x9a, 0xfc, 0xa8, 0xbe, 0x3c, 0x29, 0x5c, 0xbd, 0xab, 0x08, 0xf5, 0x3c, - 0x45, 0x1e, 0x42, 0xbe, 0xc4, 0x6e, 0xc9, 0xbe, 0x0a, 0xb0, 0xce, 0x3c, - 0x7c, 0xf9, 0x94, 0xbe, 0x09, 0xeb, 0x1f, 0xbe, 0x82, 0x6a, 0x9e, 0xbe, - 0x29, 0x1f, 0x9e, 0x3c, 0x00, 0x19, 0x0d, 0x3d, 0x47, 0x54, 0x77, 0xbe, - 0x31, 0x68, 0x44, 0xbf, 0x0a, 0x4c, 0x14, 0x3d, 0xdd, 0x90, 0xe0, 0x3d, - 0xd4, 0xc2, 0x8e, 0x3d, 0x64, 0xfb, 0xa7, 0xbe, 0x76, 0x35, 0x63, 0xbe, - 0xdf, 0xe6, 0x18, 0xbe, 0x82, 0x50, 0x08, 0xbf, 0xd5, 0x97, 0x1b, 0xbf, - 0x28, 0x8d, 0xce, 0xbc, 0x09, 0xa0, 0xaa, 0xbe, 0xbc, 0x7f, 0xce, 0xbd, - 0xae, 0x93, 0x7a, 0xbe, 0x6c, 0x9f, 0xec, 0x3b, 0xf2, 0x7c, 0x89, 0x3d, - 0xa7, 0x8e, 0x9b, 0xbe, 0x7d, 0x7e, 0x01, 0xbf, 0x0d, 0xae, 0x7e, 0x3d, - 0xfc, 0xe0, 0x09, 0xbd, 0xf4, 0x70, 0x5a, 0x3d, 0xd3, 0x33, 0xf9, 0xbd, - 0xc5, 0xf8, 0xbb, 0xbe, 0xd5, 0xc6, 0x8d, 0xbe, 0xc9, 0xb1, 0x5e, 0xbe, - 0x93, 0x6e, 0xd6, 0xbe, 0xae, 0x9a, 0xca, 0x3c, 0x59, 0x9d, 0x62, 0xbe, - 0x95, 0x6f, 0x62, 0xbe, 0xa8, 0x20, 0xac, 0xbe, 0x40, 0xb2, 0xb0, 0x3c, - 0x08, 0xf4, 0xed, 0x3d, 0x39, 0x3c, 0x42, 0xbe, 0x31, 0x1c, 0xe3, 0xbe, - 0x1a, 0x1a, 0x0e, 0x3c, 0x59, 0x79, 0xe9, 0x3b, 0xa2, 0x22, 0x10, 0xbe, - 0x30, 0xaa, 0xa5, 0x3d, 0x71, 0x8f, 0x06, 0xbf, 0x16, 0x24, 0xa0, 0xbe, - 0x67, 0xb2, 0xc9, 0xbe, 0x52, 0x7b, 0x0f, 0xbf, 0xf9, 0xe9, 0x4f, 0x3d, - 0xac, 0x49, 0x17, 0xbe, 0xae, 0x48, 0x68, 0xbd, 0x7b, 0x00, 0xb6, 0xbe, - 0xeb, 0xb6, 0x68, 0x3c, 0xc9, 0xc1, 0xe9, 0x3d, 0xed, 0x2d, 0xc3, 0xbe, - 0xa0, 0x62, 0x41, 0xbf, 0x62, 0x09, 0x25, 0x3d, 0x9b, 0x06, 0x56, 0xbe, - 0x9f, 0x8b, 0x17, 0x3e, 0x34, 0xe7, 0x3c, 0x3c, 0x8a, 0xbb, 0x17, 0x3e, - 0x9d, 0xe9, 0x15, 0x3c, 0x24, 0x65, 0x04, 0xbf, 0x3e, 0x1f, 0x0c, 0xbe, - 0xd7, 0x33, 0x83, 0x3c, 0xc7, 0x55, 0xa0, 0xbc, 0xb4, 0xc6, 0xbf, 0xbd, - 0x5a, 0xb1, 0xfb, 0xbd, 0x64, 0x9e, 0x0b, 0x3d, 0x1a, 0xbc, 0x5f, 0x3e, - 0x3b, 0x78, 0xc8, 0x3d, 0x60, 0x10, 0xd6, 0xbd, 0x2d, 0xe4, 0x7d, 0x3d, - 0x9d, 0x40, 0x9b, 0xbe, 0xeb, 0x0c, 0xd1, 0xbc, 0x58, 0x00, 0x2e, 0xbe, - 0xb9, 0x75, 0xff, 0x3e, 0x4d, 0x20, 0xab, 0x3e, 0x26, 0x60, 0xed, 0xbd, - 0xc3, 0xb9, 0xdd, 0xbe, 0xb5, 0xda, 0xa8, 0x3e, 0x33, 0x68, 0xa2, 0x3d, - 0x43, 0xd0, 0xa9, 0x3c, 0xf1, 0x7f, 0x8b, 0x3b, 0x3f, 0x6b, 0xbf, 0x3e, - 0x6c, 0x69, 0x34, 0xbe, 0x79, 0x58, 0xf3, 0xbe, 0xf1, 0x2e, 0xdb, 0xbd, - 0x62, 0x48, 0x98, 0x3e, 0x59, 0x6d, 0x75, 0xbd, 0x37, 0x2c, 0x5a, 0x3c, - 0x13, 0xe7, 0x30, 0xbd, 0xa1, 0xac, 0xfc, 0x3c, 0xb2, 0xd8, 0x2a, 0x3e, - 0x22, 0x4b, 0xb6, 0xbd, 0xd1, 0xa3, 0x81, 0xbe, 0x73, 0x91, 0x15, 0xbd, - 0xe7, 0xfa, 0x76, 0x3d, 0x60, 0xf1, 0xdb, 0x3c, 0x07, 0xcf, 0xc2, 0x3c, - 0x7d, 0xaa, 0x8a, 0x3c, 0xa2, 0x13, 0xb1, 0x3e, 0xfa, 0x90, 0xa3, 0xbe, - 0x83, 0x37, 0x1f, 0x3e, 0xd3, 0xe8, 0xb0, 0x3d, 0xda, 0x49, 0x68, 0xbe, - 0x9d, 0x3c, 0x6b, 0x3e, 0xbc, 0x5a, 0x1d, 0xbc, 0xc9, 0x02, 0x25, 0x3d, - 0x9e, 0x16, 0x15, 0x3e, 0xb5, 0x40, 0x17, 0xbe, 0x19, 0x17, 0x1a, 0x3e, - 0x17, 0x95, 0x8c, 0xbd, 0xe5, 0x42, 0x0a, 0xbd, 0x61, 0x6f, 0xc9, 0x3d, - 0x16, 0x3c, 0x58, 0xbe, 0x19, 0x15, 0x7a, 0xbe, 0x2a, 0xb1, 0xa2, 0x3e, - 0x52, 0xa2, 0x5c, 0xbd, 0x92, 0x5f, 0x11, 0x3e, 0xb5, 0x1f, 0x2c, 0xbd, - 0xa5, 0xf8, 0x20, 0xbe, 0x0b, 0x75, 0xc9, 0x3d, 0x1e, 0x00, 0x82, 0x3e, - 0xfe, 0xa1, 0x61, 0x3d, 0xd5, 0x3f, 0x41, 0x3e, 0x09, 0x65, 0xcb, 0x3d, - 0xc5, 0x75, 0x1e, 0x3c, 0xbb, 0x0f, 0x76, 0xbe, 0xbf, 0xb6, 0xa0, 0x3d, - 0xdf, 0x04, 0xae, 0x3d, 0x9b, 0xda, 0x06, 0xbf, 0x38, 0x8c, 0x84, 0xbe, - 0x92, 0x5c, 0x0c, 0x3e, 0xb8, 0x67, 0xfc, 0xbd, 0xb7, 0xec, 0x95, 0xbd, - 0x13, 0x50, 0x15, 0xbe, 0x75, 0xb1, 0x8d, 0xbd, 0x0a, 0x71, 0x1e, 0x3e, - 0x7d, 0x79, 0xf8, 0x3d, 0x40, 0x97, 0x77, 0x3d, 0x3b, 0xe7, 0x9d, 0xbe, - 0xb2, 0x88, 0xaa, 0x3c, 0x79, 0x16, 0x44, 0x3d, 0xb9, 0xdc, 0x02, 0xbe, - 0xe1, 0xcb, 0xa3, 0xbc, 0x29, 0xda, 0xff, 0xbb, 0xb2, 0x4a, 0xb8, 0xbe, - 0xd9, 0x15, 0xfb, 0xbe, 0xf4, 0x0b, 0x43, 0xbd, 0x3e, 0xc0, 0x17, 0xbd, - 0x04, 0xa3, 0xae, 0xbc, 0xd0, 0xff, 0xdf, 0xbe, 0x72, 0xcb, 0x0f, 0x3e, - 0x03, 0x79, 0xff, 0x3d, 0x91, 0x65, 0x40, 0x3e, 0xb6, 0x77, 0xbb, 0xbd, - 0xa5, 0x8f, 0x15, 0xbf, 0x30, 0x61, 0x1e, 0xbe, 0x8b, 0xa9, 0xc2, 0x3d, - 0x6e, 0xdf, 0x06, 0xbe, 0x1e, 0x89, 0xdf, 0x3d, 0x8a, 0x8f, 0xf2, 0x3c, - 0xfd, 0xcd, 0x43, 0xbc, 0x9c, 0x4c, 0xa2, 0xbe, 0x30, 0xb4, 0x19, 0xbe, - 0xb3, 0x94, 0x45, 0x3d, 0x6c, 0xaf, 0xc1, 0xbd, 0xd4, 0x46, 0x12, 0xbf, - 0xbb, 0xde, 0x5b, 0x3e, 0x30, 0xb4, 0xb0, 0x3d, 0xce, 0x2e, 0x84, 0x3d, - 0x3e, 0x76, 0xce, 0xbd, 0x82, 0x5a, 0x93, 0xbe, 0x2d, 0x20, 0xc8, 0x3d, - 0x00, 0x41, 0x07, 0xbd, 0xb1, 0x5a, 0x36, 0xbe, 0x40, 0x9e, 0xce, 0xbc, - 0x04, 0x6a, 0x0a, 0x3e, 0x1d, 0xc6, 0x05, 0xbe, 0x48, 0xb8, 0x22, 0xbd, - 0xf7, 0x49, 0x95, 0xbe, 0xfb, 0xbd, 0xca, 0xbd, 0x25, 0x34, 0x31, 0xbe, - 0xc3, 0xe3, 0x0f, 0xbf, 0xcf, 0xb1, 0x32, 0x3d, 0x73, 0x0d, 0x86, 0x3d, - 0x05, 0xbb, 0xcf, 0xbc, 0x4a, 0xd5, 0x8d, 0xbd, 0xb3, 0x6e, 0x0d, 0xbf, - 0x64, 0xec, 0x2f, 0xbe, 0x52, 0xf8, 0x15, 0xbe, 0xd3, 0xa2, 0x80, 0x3d, - 0xac, 0x9f, 0x6a, 0xbd, 0x5e, 0xc7, 0xf9, 0xbd, 0xc8, 0x90, 0x81, 0xbd, - 0xe7, 0x9d, 0x49, 0x3d, 0xd4, 0x4a, 0x06, 0xbe, 0x43, 0x04, 0x00, 0x3d, - 0x27, 0xa5, 0xc2, 0xbe, 0x90, 0x68, 0xfa, 0xbe, 0xe9, 0xb1, 0x1c, 0x3e, - 0x6c, 0x40, 0x94, 0xbd, 0xee, 0xe3, 0x39, 0x3c, 0x40, 0xd9, 0xd7, 0xbd, - 0xe8, 0x19, 0x46, 0xbe, 0x72, 0x04, 0xaa, 0xbe, 0x80, 0xd8, 0xd3, 0xbd, - 0xe6, 0x8e, 0x04, 0xbe, 0x80, 0xea, 0xf8, 0xbc, 0xa2, 0x17, 0x51, 0x3e, - 0xd9, 0x2b, 0x59, 0x3d, 0x0c, 0x8f, 0x75, 0xbb, 0xdd, 0x53, 0x82, 0xbd, - 0x06, 0xc3, 0x50, 0x3d, 0xf2, 0x35, 0x54, 0xbe, 0xce, 0xf5, 0xea, 0xbe, - 0x22, 0x31, 0x61, 0xbc, 0x03, 0x0f, 0x4b, 0xbe, 0x1a, 0x66, 0x1e, 0x3d, - 0xb2, 0x6b, 0x22, 0xbe, 0xd7, 0x96, 0xe0, 0xbe, 0x08, 0x6f, 0xb3, 0xbe, - 0x30, 0x04, 0xf9, 0xbd, 0xea, 0x57, 0x23, 0xbe, 0x79, 0x3f, 0x8b, 0xbd, - 0xea, 0xa2, 0x62, 0x3d, 0xdc, 0x5b, 0xfb, 0x3c, 0x91, 0x51, 0x17, 0x3d, - 0xe7, 0x8d, 0x58, 0xbe, 0xab, 0x9a, 0x4f, 0xbd, 0x7c, 0xab, 0xad, 0xbe, - 0xa8, 0x66, 0x06, 0xbf, 0xbf, 0xfb, 0x9e, 0x3c, 0x48, 0xf1, 0x1e, 0xbd, - 0xad, 0xaf, 0x94, 0x3c, 0xf7, 0xdc, 0x81, 0xbe, 0xaa, 0x56, 0xda, 0xbe, - 0x41, 0xca, 0x1a, 0xbe, 0xaf, 0x06, 0xab, 0xbe, 0x9e, 0xb7, 0xa3, 0xbd, - 0x08, 0x79, 0xb6, 0x3d, 0x19, 0x31, 0x04, 0xbd, 0x90, 0xb5, 0xed, 0xbc, - 0x5d, 0xf7, 0x8a, 0x3c, 0x16, 0x86, 0x37, 0xbe, 0xa6, 0x12, 0xcd, 0x3d, - 0x82, 0x94, 0x42, 0xbe, 0xd7, 0x01, 0xad, 0xbe, 0xd1, 0xec, 0xb4, 0x3d, - 0x43, 0xf6, 0x2d, 0x3c, 0xeb, 0x80, 0xcc, 0x3d, 0x65, 0x14, 0x85, 0xbe, - 0x83, 0xc6, 0xdb, 0xbe, 0xf4, 0xd0, 0x8b, 0xbe, 0x97, 0x01, 0xfd, 0xbc, - 0x99, 0xdc, 0x9e, 0xbe, 0xf9, 0xc5, 0x09, 0x3e, 0x12, 0x1a, 0xc0, 0x3d, - 0x95, 0x7d, 0x2c, 0x3e, 0x40, 0x16, 0x88, 0xbd, 0xd0, 0x85, 0x3b, 0xbe, - 0xea, 0xef, 0x51, 0x3d, 0x63, 0xb3, 0x5b, 0xbe, 0x50, 0x49, 0xc5, 0xbe, - 0xc3, 0xa6, 0x2c, 0x3e, 0x3e, 0x64, 0x47, 0x3b, 0xfd, 0xe2, 0x22, 0x3d, - 0x1e, 0xf2, 0x52, 0xbe, 0x9d, 0x8f, 0xd6, 0xbe, 0x39, 0xf8, 0x0e, 0xbe, - 0x60, 0x67, 0x72, 0xbd, 0x05, 0x60, 0x0a, 0xbe, 0x43, 0x19, 0x8f, 0x3c, - 0x84, 0xe1, 0x3d, 0x3e, 0x7d, 0x8b, 0x24, 0x3d, 0x17, 0xf5, 0x3e, 0xbd, - 0xab, 0xf2, 0xe3, 0xbd, 0x8c, 0xc2, 0x9c, 0x3d, 0xf6, 0x55, 0x21, 0xbd, - 0x64, 0x77, 0x8a, 0xbe, 0xca, 0x5e, 0x97, 0xbb, 0x57, 0x52, 0x08, 0x3e, - 0x82, 0xb2, 0xd1, 0x3d, 0x7c, 0x53, 0x0b, 0xbd, 0x9b, 0x78, 0x9e, 0x3c, - 0xe5, 0xe2, 0xa5, 0xbd, 0x1a, 0x31, 0xbe, 0xbd, 0x37, 0x59, 0x8a, 0xbd, - 0xb5, 0x48, 0x5e, 0x3e, 0x6f, 0xd3, 0xe2, 0x3d, 0x7f, 0x74, 0x26, 0xbd, - 0x8d, 0xfe, 0x8c, 0xbe, 0x35, 0xbc, 0x75, 0xbe, 0xdf, 0xa9, 0x07, 0x3d, - 0x2c, 0x90, 0xf4, 0x3d, 0x63, 0xb0, 0x9e, 0x3d, 0x38, 0x84, 0x78, 0x3e, - 0xa4, 0x5c, 0xd1, 0x3e, 0x0c, 0x9d, 0xa8, 0xbd, 0x08, 0x4f, 0x8e, 0x3e, - 0x12, 0xc1, 0x96, 0xbe, 0x9a, 0x4c, 0xee, 0xbb, 0xe2, 0x69, 0x9a, 0x3e, - 0x71, 0x14, 0xc7, 0xbe, 0xc8, 0xaa, 0xe9, 0xbc, 0x1a, 0x81, 0xae, 0x3e, - 0x0d, 0x0f, 0x0b, 0xbf, 0x9f, 0x40, 0x15, 0xbf, 0x99, 0xec, 0x9f, 0x3e, - 0xfe, 0x81, 0x42, 0x3e, 0x74, 0xb0, 0x42, 0xbe, 0x25, 0x93, 0x96, 0xbe, - 0xee, 0x36, 0x36, 0x3d, 0xa5, 0x9f, 0xa2, 0x3d, 0x07, 0x8e, 0x3e, 0x3e, - 0x89, 0x2a, 0x9f, 0x3d, 0xe5, 0x83, 0x58, 0xbd, 0x72, 0xf7, 0x01, 0xbe, - 0x9c, 0x8b, 0x67, 0xbc, 0x28, 0xc1, 0xf7, 0xbd, 0x3b, 0x5d, 0x86, 0xbd, - 0x8c, 0x2d, 0x34, 0xbd, 0x6f, 0x8f, 0xde, 0xbe, 0x8a, 0xf3, 0xf2, 0xbe, - 0x3b, 0x49, 0xb9, 0x3e, 0xfb, 0x63, 0x83, 0xbd, 0xda, 0x2f, 0x93, 0xbe, - 0xd6, 0x44, 0xa8, 0xbe, 0x3d, 0xec, 0xff, 0x3b, 0xb5, 0xd6, 0x04, 0xbd, - 0xe3, 0xc9, 0x19, 0x3d, 0x6c, 0xdb, 0x87, 0x3d, 0x70, 0xbc, 0x9e, 0xbc, - 0x76, 0x16, 0x24, 0x3e, 0x63, 0xb6, 0x6e, 0x3d, 0xb7, 0x58, 0x9a, 0xbe, - 0x79, 0xd0, 0x21, 0x3e, 0x16, 0xe7, 0x22, 0x3b, 0x4c, 0x44, 0x4a, 0xbd, - 0x78, 0x34, 0xcc, 0xbe, 0xfd, 0x92, 0x82, 0x3d, 0x3d, 0x22, 0x9b, 0xbd, - 0x01, 0xba, 0xcb, 0xbd, 0xf0, 0xac, 0xed, 0xbc, 0x92, 0x5f, 0x09, 0x3e, - 0x76, 0x6e, 0x97, 0xbd, 0x6e, 0x7d, 0x18, 0x3d, 0xed, 0x55, 0xf8, 0x3c, - 0x01, 0xaa, 0x4a, 0x3e, 0x49, 0xf7, 0x77, 0x3e, 0xa9, 0xd2, 0xd4, 0x3d, - 0x6b, 0x7a, 0xac, 0xbd, 0xb1, 0x67, 0x90, 0xbd, 0x30, 0xf5, 0x94, 0xbd, - 0xdd, 0xb9, 0xd7, 0x3d, 0xc3, 0x2e, 0x04, 0xbe, 0xfd, 0xc1, 0xa8, 0x3d, - 0x80, 0xf0, 0x41, 0xbe, 0xae, 0x23, 0x8e, 0x3c, 0x03, 0x8a, 0x05, 0xbc, - 0x50, 0x33, 0x26, 0x3e, 0xdd, 0x04, 0xca, 0x3d, 0x11, 0xd9, 0x82, 0xbe, - 0xc9, 0x2b, 0x42, 0x3e, 0xbb, 0x94, 0x72, 0x3e, 0xdb, 0x8f, 0xa5, 0x3c, - 0xde, 0x89, 0xd0, 0x3d, 0x24, 0xb6, 0x86, 0xbe, 0x1b, 0xa7, 0x54, 0xbe, - 0x48, 0x2e, 0xd1, 0xbd, 0x5d, 0x47, 0x00, 0x3e, 0x8c, 0xd7, 0xa9, 0x3d, - 0xc9, 0x22, 0x9b, 0xbd, 0x9c, 0xa0, 0xe8, 0x3d, 0xc0, 0x7b, 0x1c, 0xbd, - 0x27, 0xd2, 0xdd, 0x3d, 0x24, 0x8e, 0xa0, 0x3d, 0x62, 0xf4, 0x53, 0xbc, - 0x16, 0x38, 0xba, 0xbe, 0x7c, 0x6e, 0xe3, 0x3d, 0x7b, 0x0d, 0x44, 0x3e, - 0xae, 0x3c, 0x8d, 0x3d, 0x1b, 0x4e, 0x4f, 0x3d, 0x73, 0x75, 0x4d, 0xbe, - 0x98, 0x9b, 0x44, 0xbe, 0xd2, 0xe8, 0x74, 0xbe, 0x8f, 0xd9, 0x56, 0x3e, - 0x74, 0x2a, 0x4e, 0x3e, 0xda, 0xb1, 0xd8, 0x3d, 0x38, 0xd7, 0x85, 0x3d, - 0x09, 0xbe, 0x3c, 0x3d, 0x24, 0x28, 0x6d, 0x3c, 0xbb, 0xcd, 0x1f, 0x3d, - 0xfb, 0x77, 0x3f, 0xbe, 0x33, 0xde, 0xbe, 0xbc, 0x5c, 0xa1, 0xb3, 0x3c, - 0x6b, 0xd1, 0x9d, 0x3d, 0xf1, 0x7b, 0x3b, 0x3e, 0x4c, 0x2c, 0x75, 0x3d, - 0x70, 0xef, 0xb2, 0xbe, 0xb5, 0x17, 0xe0, 0xbd, 0x61, 0x87, 0x65, 0xbd, - 0x3a, 0x09, 0x3e, 0x3e, 0x48, 0x49, 0xbf, 0xbb, 0x2f, 0x30, 0xaa, 0x3d, - 0x65, 0x75, 0x07, 0x3d, 0xbd, 0xc2, 0x1f, 0x3e, 0xe9, 0x8c, 0xf8, 0xbd, - 0x7c, 0x97, 0xe8, 0x3c, 0x9d, 0xc7, 0x51, 0xbb, 0x40, 0x8d, 0x89, 0xbd, - 0xd5, 0x27, 0x87, 0x3c, 0x92, 0xa1, 0x65, 0xbd, 0xf7, 0x7a, 0xa6, 0xbc, - 0xec, 0x7c, 0xd6, 0x3c, 0xf7, 0x47, 0x14, 0xbe, 0xd5, 0x0d, 0x92, 0x3c, - 0x3a, 0x11, 0x01, 0xbb, 0x9a, 0x3b, 0x03, 0x3e, 0xe0, 0xde, 0x22, 0xbe, - 0x37, 0xad, 0xe4, 0xbc, 0x4e, 0xb3, 0x03, 0xbb, 0x3f, 0xe8, 0xf6, 0xbd, - 0x87, 0x10, 0xf7, 0xbb, 0xe2, 0xc3, 0x9a, 0xbd, 0x0e, 0x42, 0x0b, 0x3e, - 0x92, 0x26, 0x9d, 0xbe, 0x4f, 0xe3, 0x32, 0xbc, 0x26, 0x0a, 0x03, 0x3e, - 0xf3, 0x60, 0xa7, 0x3c, 0x0c, 0x24, 0x05, 0x3d, 0x3e, 0xc8, 0x94, 0xbe, - 0x50, 0x31, 0x02, 0xbe, 0xff, 0xd4, 0x69, 0xbd, 0x1d, 0x42, 0x72, 0x3d, - 0xe7, 0x8c, 0x7f, 0xbe, 0x33, 0x87, 0x16, 0x3d, 0x93, 0x2c, 0xa2, 0xbd, - 0x88, 0xf0, 0xe3, 0xbb, 0xa8, 0x96, 0x84, 0x3d, 0xda, 0xc0, 0x40, 0x3e, - 0x8a, 0x68, 0x58, 0x3d, 0xad, 0xb0, 0x19, 0xbe, 0x97, 0x4e, 0x26, 0xbc, - 0x1c, 0x26, 0xe6, 0x3d, 0x48, 0x68, 0x55, 0x3d, 0xc6, 0xe3, 0xc3, 0x3d, - 0xa7, 0xe6, 0xc8, 0xbb, 0xf3, 0x00, 0x99, 0xbd, 0x2d, 0x63, 0xda, 0x3c, - 0xb4, 0xbd, 0x81, 0x3e, 0xdf, 0xb9, 0x92, 0xbe, 0x48, 0x3a, 0xb2, 0xbd, - 0x9f, 0xcb, 0xd2, 0x3d, 0x0b, 0x38, 0x97, 0x3c, 0xe2, 0x95, 0xb7, 0xbc, - 0xf6, 0x82, 0x13, 0x39, 0xc0, 0x98, 0x1a, 0x3d, 0xc2, 0xf4, 0x51, 0x3d, - 0x0d, 0xeb, 0xf5, 0x3c, 0x1d, 0xda, 0x52, 0x3d, 0xad, 0xb0, 0x77, 0x3d, - 0xca, 0x58, 0x51, 0x3d, 0x5a, 0x2d, 0xb0, 0x3c, 0xf0, 0x8b, 0xeb, 0xbd, - 0x0d, 0xd8, 0x50, 0x3e, 0x65, 0x25, 0x18, 0xbe, 0xb0, 0x18, 0xfd, 0xbe, - 0xc4, 0x02, 0x3b, 0xbd, 0xfa, 0x7b, 0x82, 0x3d, 0xce, 0xa0, 0x4d, 0xbe, - 0x0b, 0xc6, 0x32, 0xbd, 0x84, 0x62, 0xc0, 0x3d, 0x60, 0x6e, 0x1e, 0x3d, - 0x31, 0x01, 0x28, 0x3e, 0x31, 0x76, 0x04, 0x3e, 0x91, 0x6b, 0x60, 0xbd, - 0x14, 0xf5, 0x20, 0x3e, 0x5c, 0x38, 0x67, 0xbd, 0x36, 0x21, 0xc2, 0x3d, - 0x13, 0x10, 0x7e, 0xbe, 0x19, 0xd1, 0x9f, 0x3e, 0xd0, 0x1a, 0x16, 0xbe, - 0x34, 0xb5, 0xaf, 0xbe, 0x86, 0x67, 0x16, 0x3d, 0x11, 0x05, 0x58, 0xbe, - 0xfb, 0x0d, 0xd0, 0x3c, 0x90, 0x88, 0x20, 0xbd, 0xdc, 0xcc, 0x9a, 0x3d, - 0x11, 0x29, 0x7a, 0x3e, 0x05, 0x44, 0xbf, 0xbe, 0x54, 0x1a, 0x0a, 0x3e, - 0xff, 0x6f, 0xb4, 0xbd, 0xeb, 0xa4, 0x86, 0x3d, 0x5e, 0x43, 0x00, 0x3e, - 0xfa, 0x4f, 0xd9, 0xbc, 0xad, 0x7b, 0xa2, 0xbd, 0x0e, 0xf6, 0x01, 0x3e, - 0x90, 0xf0, 0xb4, 0xbd, 0xd0, 0x21, 0x9a, 0xbe, 0x74, 0x43, 0x14, 0x3e, - 0x4d, 0xe8, 0x0b, 0xbe, 0x80, 0x2b, 0x93, 0xbd, 0x87, 0x39, 0x35, 0x3e, - 0x90, 0x63, 0xb8, 0x3d, 0xcf, 0x2c, 0x83, 0x3e, 0xbd, 0xe5, 0x0b, 0xbf, - 0x5f, 0xf7, 0x90, 0x3c, 0xa0, 0x61, 0x52, 0xbc, 0x8f, 0x88, 0xac, 0xbc, - 0x0c, 0x64, 0xd3, 0x3d, 0x9c, 0xd9, 0x8b, 0x3d, 0x80, 0xb7, 0x39, 0xbe, - 0x4e, 0x30, 0x95, 0x3e, 0x73, 0x3e, 0xda, 0xbe, 0x51, 0x56, 0x84, 0x3d, - 0xe6, 0x28, 0x85, 0x3d, 0xb9, 0xe9, 0x87, 0xbe, 0x46, 0x01, 0x6c, 0xbd, - 0x4b, 0x64, 0xd5, 0x3d, 0x56, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x43, 0x6f, 0x6e, 0x76, 0x32, 0x44, - 0x5f, 0x62, 0x69, 0x61, 0x73, 0x00, 0x00, 0x00, 0x68, 0xfc, 0xff, 0xff, - 0x20, 0x00, 0x00, 0x00, 0xe4, 0xe5, 0x3e, 0xc0, 0x22, 0xe7, 0x92, 0x3f, - 0x57, 0x04, 0xde, 0xbf, 0xda, 0x8f, 0x1c, 0x3e, 0x47, 0xbf, 0x05, 0xc0, - 0x53, 0xab, 0xcb, 0xbf, 0x68, 0x6a, 0x6a, 0xbf, 0xd2, 0x0b, 0xe4, 0xbf, - 0xbe, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, - 0x64, 0x5f, 0x31, 0x2f, 0x43, 0x6f, 0x6e, 0x76, 0x32, 0x44, 0x5f, 0x62, - 0x69, 0x61, 0x73, 0x00, 0xd0, 0xfc, 0xff, 0xff, 0x40, 0x00, 0x00, 0x00, - 0xa3, 0x15, 0xb7, 0x3e, 0x29, 0x67, 0x95, 0x3f, 0x4b, 0x96, 0x62, 0xbe, - 0x61, 0x5f, 0xfc, 0x3e, 0xa2, 0xd4, 0x3e, 0xbf, 0x45, 0x1c, 0x0d, 0xbf, - 0x29, 0xdd, 0x70, 0xbe, 0x9a, 0x75, 0x97, 0xbf, 0xfc, 0x0a, 0x6f, 0xbe, - 0xcc, 0x56, 0x25, 0x3f, 0xdf, 0xac, 0x98, 0xbf, 0x0e, 0x1c, 0x8b, 0xbf, - 0xa5, 0xd8, 0x8c, 0x3f, 0xa5, 0x42, 0xd5, 0x3c, 0xa9, 0x8e, 0x7a, 0xbf, - 0x9e, 0xdb, 0x71, 0xbe, 0x46, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, - 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x31, 0x2f, 0x4d, 0x61, 0x74, 0x4d, 0x75, - 0x6c, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x00, 0x00, 0x58, 0xfd, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0xc3, 0x99, 0xb6, 0xbf, 0xe9, 0x87, 0x8b, 0x3f, - 0xac, 0x83, 0x9a, 0xbf, 0x4c, 0x49, 0x3d, 0xbe, 0x9e, 0xfe, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, 0x65, 0x6e, 0x73, 0x65, - 0x5f, 0x31, 0x2f, 0x4d, 0x61, 0x74, 0x4d, 0x75, 0x6c, 0x2f, 0x52, 0x65, - 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, - 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x73, 0x65, 0x00, 0x00, - 0xc8, 0xfd, 0xff, 0xff, 0x00, 0x01, 0x00, 0x00, 0x40, 0x5c, 0x8a, 0xbe, - 0xc5, 0x88, 0xaa, 0x3e, 0xb0, 0x76, 0x1f, 0xbe, 0x35, 0x3a, 0x58, 0x3e, - 0x84, 0xa5, 0xa3, 0x3e, 0x38, 0xd9, 0x49, 0x3d, 0x58, 0x63, 0x78, 0xbc, - 0x82, 0x94, 0x2e, 0xbe, 0x97, 0xde, 0x6e, 0xbe, 0xea, 0x27, 0x9f, 0xbe, - 0x1d, 0x24, 0xc0, 0x3d, 0x21, 0x31, 0x66, 0x3c, 0x80, 0xf8, 0x88, 0xbe, - 0xdd, 0x06, 0x19, 0xbe, 0x3f, 0x4b, 0xb3, 0xbe, 0x70, 0xdc, 0x8d, 0x3e, - 0x20, 0xee, 0x93, 0xbe, 0xde, 0x7c, 0xbf, 0xbe, 0xda, 0x3a, 0x50, 0xbe, - 0x0e, 0x91, 0x6e, 0x3e, 0x18, 0xbc, 0x81, 0x3e, 0x18, 0x9c, 0xfe, 0xb9, - 0x11, 0x2d, 0x9b, 0xbe, 0xa2, 0x73, 0x3f, 0xbe, 0x0c, 0x6c, 0xa3, 0xbe, - 0x37, 0x4b, 0x8c, 0xbe, 0x91, 0x26, 0xa0, 0x3d, 0xb3, 0x04, 0xbd, 0x3e, - 0x01, 0x3e, 0x70, 0xbe, 0xd1, 0xdb, 0x69, 0xbe, 0xb4, 0xc0, 0x98, 0xbe, - 0xd4, 0xd9, 0x80, 0x3e, 0x62, 0xa9, 0x74, 0xbe, 0x8a, 0xe9, 0x83, 0xbe, - 0x7a, 0x92, 0x54, 0xbe, 0x92, 0x5d, 0x43, 0x3e, 0xe3, 0x35, 0x7b, 0x3e, - 0xee, 0x81, 0x2e, 0x3d, 0xbb, 0x68, 0xec, 0x3d, 0x70, 0x72, 0x1b, 0xbe, - 0x64, 0x20, 0xa4, 0xbe, 0x4f, 0x1f, 0x8d, 0xbd, 0xee, 0xd6, 0xf8, 0x3d, - 0xdb, 0x83, 0xb0, 0x3e, 0xd1, 0x99, 0x8c, 0xbe, 0x99, 0x21, 0x45, 0xbe, - 0x97, 0x04, 0x82, 0xbe, 0x25, 0xdf, 0x88, 0x3e, 0xe2, 0xe6, 0x5b, 0xbe, - 0xe5, 0x53, 0x68, 0x3d, 0x0b, 0xcd, 0x40, 0xbe, 0x4e, 0xea, 0x55, 0x3e, - 0x54, 0xd8, 0x85, 0x3e, 0x3c, 0xba, 0x82, 0x3d, 0x58, 0xc0, 0xe9, 0x3e, - 0x0c, 0xcc, 0x29, 0xbe, 0x3a, 0x6e, 0xa2, 0xbe, 0x84, 0x4a, 0x12, 0x3e, - 0x32, 0xd9, 0xcb, 0x3d, 0xad, 0x16, 0xd4, 0x3e, 0xf5, 0xa8, 0x85, 0xbe, - 0x5c, 0x34, 0xc8, 0xbd, 0x31, 0x02, 0xac, 0xbe, 0x3e, 0x15, 0x9e, 0x3e, - 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x10, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, - 0x65, 0x6e, 0x73, 0x65, 0x2f, 0x4d, 0x61, 0x74, 0x4d, 0x75, 0x6c, 0x5f, - 0x62, 0x69, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x20, 0xff, 0xff, 0xff, - 0x40, 0x00, 0x00, 0x00, 0xa9, 0xa9, 0x93, 0x3f, 0x3c, 0xda, 0x21, 0xbe, - 0x33, 0xe3, 0x3d, 0x3d, 0xd8, 0xed, 0x33, 0xbf, 0x4c, 0x9d, 0x3e, 0xbf, - 0xba, 0x40, 0xa6, 0x3f, 0x6e, 0x4e, 0x25, 0xbe, 0x6e, 0x5e, 0x93, 0x3f, - 0x48, 0xa2, 0xc5, 0xbe, 0x67, 0xaa, 0x33, 0x3e, 0xc0, 0x66, 0x20, 0x3f, - 0xb3, 0x25, 0x7e, 0xbe, 0x8a, 0x2f, 0xd9, 0xbe, 0xda, 0x64, 0xc8, 0x3f, - 0x1a, 0x14, 0x89, 0xbd, 0xea, 0x48, 0x89, 0x3e, 0x07, 0x00, 0x00, 0x00, - 0x00, 0x02, 0x00, 0x00, 0x90, 0x01, 0x00, 0x00, 0x3c, 0x01, 0x00, 0x00, - 0xd0, 0x00, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x30, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x70, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xb0, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0xf0, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x05, - 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x2c, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x14, 0x00, 0x18, 0x00, 0x00, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x07, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xa8, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x05, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x18, 0x00, 0x07, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x1c, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x07, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xce, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x19, - 0xd6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xde, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0xe6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x11, - 0xfa, 0xff, 0xff, 0xff, 0x00, 0x03, 0x06, 0x00, 0x06, 0x00, 0x05, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x11, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}; -unsigned int model_tflite_len = 19616; diff --git a/examples/magic_wand/magic_wand_model_data.h b/examples/magic_wand/magic_wand_model_data.h deleted file mode 100644 index 40a0b4d..0000000 --- a/examples/magic_wand/magic_wand_model_data.h +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This is a standard TensorFlow Lite model file that has been converted into a -// C data array, so it can be easily compiled into a binary for devices that -// don't have a file system. It was created using the command: -// xxd -i magic_wand_model.tflite > magic_wand_model_data.cc - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_ - -extern const unsigned char g_magic_wand_model_data[]; -extern const int g_magic_wand_model_data_len; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAGIC_WAND_MODEL_DATA_H_ diff --git a/examples/magic_wand/magic_wand_test.cpp b/examples/magic_wand/magic_wand_test.cpp deleted file mode 100644 index be8b82d..0000000 --- a/examples/magic_wand/magic_wand_test.cpp +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "magic_wand_model_data.h" -#include "ring_micro_features_data.h" -#include "slope_micro_features_data.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" -#include "tensorflow/lite/micro/testing/micro_test.h" -#include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { - // Set up logging - tflite::MicroErrorReporter micro_error_reporter; - - // Map the model into a usable data structure. This doesn't involve any - // copying or parsing, it's a very lightweight operation. - const tflite::Model* model = ::tflite::GetModel(g_magic_wand_model_data); - if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(µ_error_reporter, - "Model provided is schema version %d not equal " - "to supported version %d.\n", - model->version(), TFLITE_SCHEMA_VERSION); - } - - // Pull in only the operation implementations we need. - // This relies on a complete list of all the ops needed by this graph. - // An easier approach is to just use the AllOpsResolver, but this will - // incur some penalty in code space for op implementations that are not - // needed by this graph. - static tflite::MicroMutableOpResolver<5> micro_op_resolver; // NOLINT - micro_op_resolver.AddConv2D(); - micro_op_resolver.AddDepthwiseConv2D(); - micro_op_resolver.AddFullyConnected(); - micro_op_resolver.AddMaxPool2D(); - micro_op_resolver.AddSoftmax(); - - // Create an area of memory to use for input, output, and intermediate arrays. - // Finding the minimum value for your model may require some trial and error. - const int tensor_arena_size = 60 * 1024; - uint8_t tensor_arena[tensor_arena_size]; - - // Build an interpreter to run the model with - tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, - tensor_arena_size, - µ_error_reporter); - - // Allocate memory from the tensor_arena for the model's tensors - interpreter.AllocateTensors(); - - // Obtain a pointer to the model's input tensor - TfLiteTensor* input = interpreter.input(0); - - // Make sure the input has the properties we expect - TF_LITE_MICRO_EXPECT_NE(nullptr, input); - TF_LITE_MICRO_EXPECT_EQ(4, input->dims->size); - // The value of each element gives the length of the corresponding tensor. - TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(128, input->dims->data[1]); - TF_LITE_MICRO_EXPECT_EQ(3, input->dims->data[2]); - TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[3]); - // The input is a 32 bit floating point value - TF_LITE_MICRO_EXPECT_EQ(kTfLiteFloat32, input->type); - - // Provide an input value - const float* ring_features_data = g_ring_micro_f9643d42_nohash_4_data; - TF_LITE_REPORT_ERROR(µ_error_reporter, "%d", input->bytes); - for (size_t i = 0; i < (input->bytes / sizeof(float)); ++i) { - input->data.f[i] = ring_features_data[i]; - } - - // Run the model on this input and check that it succeeds - TfLiteStatus invoke_status = interpreter.Invoke(); - if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); - } - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); - - // Obtain a pointer to the output tensor and make sure it has the - // properties we expect. - TfLiteTensor* output = interpreter.output(0); - TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size); - TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(4, output->dims->data[1]); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteFloat32, output->type); - - // There are four possible classes in the output, each with a score. - const int kWingIndex = 0; - const int kRingIndex = 1; - const int kSlopeIndex = 2; - const int kNegativeIndex = 3; - - // Make sure that the expected "Ring" score is higher than the other - // classes. - float wing_score = output->data.f[kWingIndex]; - float ring_score = output->data.f[kRingIndex]; - float slope_score = output->data.f[kSlopeIndex]; - float negative_score = output->data.f[kNegativeIndex]; - TF_LITE_MICRO_EXPECT_GT(ring_score, wing_score); - TF_LITE_MICRO_EXPECT_GT(ring_score, slope_score); - TF_LITE_MICRO_EXPECT_GT(ring_score, negative_score); - - // Now test with a different input, from a recording of "Slope". - const float* slope_features_data = g_slope_micro_f2e59fea_nohash_1_data; - for (size_t i = 0; i < (input->bytes / sizeof(float)); ++i) { - input->data.f[i] = slope_features_data[i]; - } - - // Run the model on this "Slope" input. - invoke_status = interpreter.Invoke(); - if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); - } - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); - - // Get the output from the model, and make sure it's the expected size and - // type. - output = interpreter.output(0); - TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size); - TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(4, output->dims->data[1]); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteFloat32, output->type); - - // Make sure that the expected "Slope" score is higher than the other classes. - wing_score = output->data.f[kWingIndex]; - ring_score = output->data.f[kRingIndex]; - slope_score = output->data.f[kSlopeIndex]; - negative_score = output->data.f[kNegativeIndex]; - TF_LITE_MICRO_EXPECT_GT(slope_score, wing_score); - TF_LITE_MICRO_EXPECT_GT(slope_score, ring_score); - TF_LITE_MICRO_EXPECT_GT(slope_score, negative_score); -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/magic_wand/main.cpp b/examples/magic_wand/main.cpp deleted file mode 100644 index e08c300..0000000 --- a/examples/magic_wand/main.cpp +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "main_functions.h" - -// This is the default main used on systems that have the standard C entry -// point. Other devices (for example FreeRTOS or ESP32) that have different -// requirements for entry code (like an app_main function) should specialize -// this main.cc file in a target-specific subfolder. -int main(int argc, char* argv[]) { - setup(); - while (true) { - loop(); - } -} diff --git a/examples/magic_wand/main_functions.cpp b/examples/magic_wand/main_functions.cpp deleted file mode 100644 index 7368ecf..0000000 --- a/examples/magic_wand/main_functions.cpp +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "main_functions.h" - -#include "accelerometer_handler.h" -#include "constants.h" -#include "gesture_predictor.h" -#include "magic_wand_model_data.h" -#include "output_handler.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" -#include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" - -// Globals, used for compatibility with Arduino-style sketches. -namespace { -tflite::ErrorReporter* error_reporter = nullptr; -const tflite::Model* model = nullptr; -tflite::MicroInterpreter* interpreter = nullptr; -TfLiteTensor* model_input = nullptr; -int input_length; - -// Create an area of memory to use for input, output, and intermediate arrays. -// The size of this will depend on the model you're using, and may need to be -// determined by experimentation. -constexpr int kTensorArenaSize = 60 * 1024; -uint8_t tensor_arena[kTensorArenaSize]; -} // namespace - -// The name of this function is important for Arduino compatibility. -void setup() { - // Set up logging. Google style is to avoid globals or statics because of - // lifetime uncertainty, but since this has a trivial destructor it's okay. - static tflite::MicroErrorReporter micro_error_reporter; // NOLINT - error_reporter = µ_error_reporter; - - // Map the model into a usable data structure. This doesn't involve any - // copying or parsing, it's a very lightweight operation. - model = tflite::GetModel(g_magic_wand_model_data); - if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, - "Model provided is schema version %d not equal " - "to supported version %d.", - model->version(), TFLITE_SCHEMA_VERSION); - return; - } - - // Pull in only the operation implementations we need. - // This relies on a complete list of all the ops needed by this graph. - // An easier approach is to just use the AllOpsResolver, but this will - // incur some penalty in code space for op implementations that are not - // needed by this graph. - static tflite::MicroMutableOpResolver<5> micro_op_resolver; // NOLINT - micro_op_resolver.AddConv2D(); - micro_op_resolver.AddDepthwiseConv2D(); - micro_op_resolver.AddFullyConnected(); - micro_op_resolver.AddMaxPool2D(); - micro_op_resolver.AddSoftmax(); - - // Build an interpreter to run the model with. - static tflite::MicroInterpreter static_interpreter( - model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter); - interpreter = &static_interpreter; - - // Allocate memory from the tensor_arena for the model's tensors. - interpreter->AllocateTensors(); - - // Obtain pointer to the model's input tensor. - model_input = interpreter->input(0); - if ((model_input->dims->size != 4) || (model_input->dims->data[0] != 1) || - (model_input->dims->data[1] != 128) || - (model_input->dims->data[2] != kChannelNumber) || - (model_input->type != kTfLiteFloat32)) { - TF_LITE_REPORT_ERROR(error_reporter, - "Bad input tensor parameters in model"); - return; - } - - input_length = model_input->bytes / sizeof(float); - - TfLiteStatus setup_status = SetupAccelerometer(error_reporter); - if (setup_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Set up failed\n"); - } -} - -void loop() { - // Attempt to read new data from the accelerometer. - bool got_data = - ReadAccelerometer(error_reporter, model_input->data.f, input_length); - // If there was no new data, wait until next time. - if (!got_data) return; - - // Run inference, and report any error. - TfLiteStatus invoke_status = interpreter->Invoke(); - if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed on index: %d\n", - begin_index); - return; - } - // Analyze the results to obtain a prediction - int gesture_index = PredictGesture(interpreter->output(0)->data.f); - - // Produce an output - HandleOutput(error_reporter, gesture_index); -} diff --git a/examples/magic_wand/main_functions.h b/examples/magic_wand/main_functions.h deleted file mode 100644 index d69755b..0000000 --- a/examples/magic_wand/main_functions.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_ - -// Expose a C friendly interface for main functions. -#ifdef __cplusplus -extern "C" { -#endif - -// Initializes all data needed for the example. The name is important, and needs -// to be setup() for Arduino compatibility. -void setup(); - -// Runs one iteration of data gathering and inference. This should be called -// repeatedly from the application code. The name needs to be loop() for Arduino -// compatibility. -void loop(); - -#ifdef __cplusplus -} -#endif - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_MAIN_FUNCTIONS_H_ diff --git a/examples/magic_wand/output_handler.cpp b/examples/magic_wand/output_handler.cpp deleted file mode 100644 index 1cdc9d8..0000000 --- a/examples/magic_wand/output_handler.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "output_handler.h" - -void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) { - // light (red: wing, blue: ring, green: slope) - if (kind == 0) { - TF_LITE_REPORT_ERROR( - error_reporter, - "WING:\n\r* * *\n\r * * * " - "*\n\r * * * *\n\r * * * *\n\r * * " - "* *\n\r * *\n\r"); - } else if (kind == 1) { - TF_LITE_REPORT_ERROR( - error_reporter, - "RING:\n\r *\n\r * *\n\r * *\n\r " - " * *\n\r * *\n\r * *\n\r " - " *\n\r"); - } else if (kind == 2) { - TF_LITE_REPORT_ERROR( - error_reporter, - "SLOPE:\n\r *\n\r *\n\r *\n\r *\n\r " - "*\n\r *\n\r *\n\r * * * * * * * *\n\r"); - } -} diff --git a/examples/magic_wand/output_handler.h b/examples/magic_wand/output_handler.h deleted file mode 100644 index 7b85254..0000000 --- a/examples/magic_wand/output_handler.h +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -void HandleOutput(tflite::ErrorReporter* error_reporter, int kind); - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_OUTPUT_HANDLER_H_ diff --git a/examples/magic_wand/output_handler_test.cpp b/examples/magic_wand/output_handler_test.cpp deleted file mode 100644 index 6868fb7..0000000 --- a/examples/magic_wand/output_handler_test.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "output_handler.h" - -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestCallability) { - tflite::MicroErrorReporter micro_error_reporter; - HandleOutput(µ_error_reporter, 0); - HandleOutput(µ_error_reporter, 1); - HandleOutput(µ_error_reporter, 2); - HandleOutput(µ_error_reporter, 3); -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/magic_wand/ring_micro_features_data.cpp b/examples/magic_wand/ring_micro_features_data.cpp deleted file mode 100644 index 54c9992..0000000 --- a/examples/magic_wand/ring_micro_features_data.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "ring_micro_features_data.h" - -const int g_ring_micro_f9643d42_nohash_4_length = 128; -const int g_ring_micro_f9643d42_nohash_4_dim = 3; -// Raw accelerometer data with a sample rate of 25Hz -const float g_ring_micro_f9643d42_nohash_4_data[] = { - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -665.0, 228.0, 827.0, -680.0, 339.0, 716.0, - -680.0, 564.0, 812.0, -679.0, 552.0, 818.0, -665.0, 528.0, 751.0, - -658.0, 432.0, 618.0, -655.0, 445.0, 592.0, -667.0, 484.0, 556.0, - -684.0, 590.0, 510.0, -674.0, 672.0, 475.0, -660.0, 786.0, 390.0, - -562.0, 1124.0, 128.0, -526.0, 1140.0, 111.0, -486.0, 1044.0, 33.0, - -416.0, 652.0, -134.0, -390.0, 534.0, -143.0, -365.0, 381.0, -117.0, - -314.0, 60.0, 94.0, -322.0, 7.0, 190.0, -338.0, -95.0, 342.0, - -360.0, -106.0, 842.0, -351.0, -41.0, 965.0, -352.0, 12.0, 960.0, - -366.0, 42.0, 1124.0, -322.0, 56.0, 1178.0, -312.0, 15.0, 1338.0, - -254.0, 10.0, 1532.0, -241.0, 5.0, 1590.0, -227.0, 60.0, 1565.0, - -204.0, 282.0, 1560.0, -180.0, 262.0, 1524.0, -138.0, 385.0, 1522.0, - -84.0, 596.0, 1626.0, -55.0, 639.0, 1604.0, -19.0, 771.0, 1511.0, - 16.0, 932.0, 1132.0, 15.0, 924.0, 1013.0, 1.0, 849.0, 812.0, - -88.0, 628.0, 500.0, -114.0, 609.0, 463.0, -155.0, 559.0, 382.0, - -234.0, 420.0, 278.0, -254.0, 390.0, 272.0, -327.0, 200.0, 336.0, - -558.0, -556.0, 630.0, -640.0, -607.0, 740.0, -706.0, -430.0, 868.0, - -778.0, 42.0, 1042.0, -763.0, 84.0, 973.0, -735.0, 185.0, 931.0, - -682.0, 252.0, 766.0, -673.0, 230.0, 757.0, -671.0, 218.0, 757.0, - -656.0, 222.0, 714.0, -659.0, 238.0, 746.0, -640.0, 276.0, 731.0, - -634.0, 214.0, 754.0, -637.0, 207.0, 735.0, -637.0, 194.0, 742.0, - -634.0, 248.0, 716.0, -631.0, 265.0, 697.0, -628.0, 252.0, 797.0, - -592.0, 204.0, 816.0, -618.0, 218.0, 812.0, -633.0, 231.0, 828.0, - -640.0, 222.0, 736.0, -634.0, 221.0, 787.0, -}; diff --git a/examples/magic_wand/ring_micro_features_data.h b/examples/magic_wand/ring_micro_features_data.h deleted file mode 100644 index 9cd02cd..0000000 --- a/examples/magic_wand/ring_micro_features_data.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_RING_MICRO_FEATURES_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_RING_MICRO_FEATURES_DATA_H_ - -extern const int g_ring_micro_f9643d42_nohash_4_length; -extern const int g_ring_micro_f9643d42_nohash_4_dim; -extern const float g_ring_micro_f9643d42_nohash_4_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_RING_MICRO_FEATURES_DATA_H_ diff --git a/examples/magic_wand/slope_micro_features_data.cpp b/examples/magic_wand/slope_micro_features_data.cpp deleted file mode 100644 index ff09138..0000000 --- a/examples/magic_wand/slope_micro_features_data.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "slope_micro_features_data.h" - -const int g_slope_micro_f2e59fea_nohash_1_length = 128; -const int g_slope_micro_f2e59fea_nohash_1_dim = 3; -// Raw accelerometer data with a sample rate of 25Hz -const float g_slope_micro_f2e59fea_nohash_1_data[] = { - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -766.0, 132.0, 709.0, -751.0, 249.0, 659.0, - -714.0, 314.0, 630.0, -709.0, 244.0, 623.0, -707.0, 230.0, 659.0, - -704.0, 202.0, 748.0, -714.0, 219.0, 728.0, -722.0, 239.0, 710.0, - -744.0, 116.0, 612.0, -753.0, -49.0, 570.0, -748.0, -279.0, 527.0, - -668.0, -664.0, 592.0, -601.0, -635.0, 609.0, -509.0, -559.0, 606.0, - -286.0, -162.0, 536.0, -255.0, -144.0, 495.0, -209.0, -85.0, 495.0, - 6.0, 416.0, 698.0, -33.0, 304.0, 1117.0, -82.0, 405.0, 1480.0, - -198.0, 1008.0, 1908.0, -229.0, 990.0, 1743.0, -234.0, 934.0, 1453.0, - -126.0, 838.0, 896.0, -78.0, 792.0, 911.0, -27.0, 741.0, 918.0, - 114.0, 734.0, 960.0, 135.0, 613.0, 959.0, 152.0, 426.0, 1015.0, - 106.0, -116.0, 1110.0, 63.0, -314.0, 1129.0, -12.0, -486.0, 1179.0, - -118.0, -656.0, 1510.0, -116.0, -558.0, 1553.0, -126.0, -361.0, 1367.0, - -222.0, -76.0, 922.0, -210.0, -26.0, 971.0, -194.0, 50.0, 1053.0, - -178.0, 72.0, 1082.0, -169.0, 100.0, 1073.0, -162.0, 133.0, 1050.0, - -156.0, 226.0, 976.0, -154.0, 323.0, 886.0, -130.0, 240.0, 1154.0, - -116.0, 124.0, 916.0, -132.0, 124.0, 937.0, -153.0, 115.0, 981.0, - -184.0, 94.0, 962.0, -177.0, 85.0, 1017.0, -173.0, 92.0, 1027.0, - -168.0, 158.0, 1110.0, -181.0, 101.0, 1030.0, -180.0, 139.0, 1054.0, - -152.0, 10.0, 1044.0, -169.0, 74.0, 1007.0, -}; diff --git a/examples/magic_wand/slope_micro_features_data.h b/examples/magic_wand/slope_micro_features_data.h deleted file mode 100644 index 6ed0c3c..0000000 --- a/examples/magic_wand/slope_micro_features_data.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_SLOPE_MICRO_FEATURES_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_SLOPE_MICRO_FEATURES_DATA_H_ - -extern const int g_slope_micro_f2e59fea_nohash_1_length; -extern const int g_slope_micro_f2e59fea_nohash_1_dim; -extern const float g_slope_micro_f2e59fea_nohash_1_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MAGIC_WAND_SLOPE_MICRO_FEATURES_DATA_H_ diff --git a/examples/micro_speech/CMakeLists.txt b/examples/micro_speech/CMakeLists.txt deleted file mode 100644 index 1fe2398..0000000 --- a/examples/micro_speech/CMakeLists.txt +++ /dev/null @@ -1,647 +0,0 @@ - -cmake_minimum_required(VERSION 3.12) - -project(micro_speech C CXX ASM) -set(CMAKE_C_STANDARD 11) -set(CMAKE_CXX_STANDARD 11) - - -add_executable(audio_provider_mock_test "") - -target_include_directories(audio_provider_mock_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - audio_provider_mock_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(audio_provider_mock_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/audio_provider_mock.cpp - ${CMAKE_CURRENT_LIST_DIR}/audio_provider_mock_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/no_1000ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/yes_1000ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.h - ${CMAKE_CURRENT_LIST_DIR}/no_1000ms_sample_data.h - ${CMAKE_CURRENT_LIST_DIR}/yes_1000ms_sample_data.h -) - -target_link_libraries( - audio_provider_mock_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(audio_provider_mock_test) - - -add_executable(feature_provider_mock_test "") - -target_include_directories(feature_provider_mock_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - feature_provider_mock_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(feature_provider_mock_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.c - ${CMAKE_CURRENT_LIST_DIR}/audio_provider_mock.cpp - ${CMAKE_CURRENT_LIST_DIR}/feature_provider.cpp - ${CMAKE_CURRENT_LIST_DIR}/feature_provider_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/no_1000ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/yes_1000ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/bits.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.h - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.h - ${CMAKE_CURRENT_LIST_DIR}/feature_provider.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_micro_features_data.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_micro_features_data.h - ${CMAKE_CURRENT_LIST_DIR}/no_1000ms_sample_data.h - ${CMAKE_CURRENT_LIST_DIR}/yes_1000ms_sample_data.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/_kiss_fft_guts.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h -) - -target_link_libraries( - feature_provider_mock_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(feature_provider_mock_test) - - -add_executable(micro_speech "") - -target_include_directories(micro_speech - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - micro_speech - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(micro_speech - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.c - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.cpp - ${CMAKE_CURRENT_LIST_DIR}/command_responder.cpp - ${CMAKE_CURRENT_LIST_DIR}/feature_provider.cpp - ${CMAKE_CURRENT_LIST_DIR}/main.cpp - ${CMAKE_CURRENT_LIST_DIR}/main_functions.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/model.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/recognize_commands.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/bits.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.h - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.h - ${CMAKE_CURRENT_LIST_DIR}/command_responder.h - ${CMAKE_CURRENT_LIST_DIR}/feature_provider.h - ${CMAKE_CURRENT_LIST_DIR}/main_functions.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/model.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_micro_features_data.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_micro_features_data.h - ${CMAKE_CURRENT_LIST_DIR}/recognize_commands.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/_kiss_fft_guts.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h -) - -target_link_libraries( - micro_speech - pico-tflmicro - hardware_pwm -) - -pico_add_extra_outputs(micro_speech) - - -add_executable(simple_features_generator_test "") - -target_include_directories(simple_features_generator_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - simple_features_generator_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(simple_features_generator_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/no_30ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/simple_features/no_power_spectrum_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/simple_features/simple_features_generator.cpp - ${CMAKE_CURRENT_LIST_DIR}/simple_features/simple_features_generator_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/simple_features/yes_power_spectrum_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/yes_30ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/no_30ms_sample_data.h - ${CMAKE_CURRENT_LIST_DIR}/simple_features/no_power_spectrum_data.h - ${CMAKE_CURRENT_LIST_DIR}/simple_features/simple_features_generator.h - ${CMAKE_CURRENT_LIST_DIR}/simple_features/simple_model_settings.h - ${CMAKE_CURRENT_LIST_DIR}/simple_features/yes_power_spectrum_data.h - ${CMAKE_CURRENT_LIST_DIR}/yes_30ms_sample_data.h -) - -target_link_libraries( - simple_features_generator_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(simple_features_generator_test) - - -add_executable(command_responder_test "") - -target_include_directories(command_responder_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - command_responder_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(command_responder_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/command_responder.cpp - ${CMAKE_CURRENT_LIST_DIR}/command_responder_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/command_responder.h -) - -target_link_libraries( - command_responder_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(command_responder_test) - - -add_executable(micro_speech_mock "") - -target_include_directories(micro_speech_mock - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - micro_speech_mock - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(micro_speech_mock - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.c - ${CMAKE_CURRENT_LIST_DIR}/audio_provider_mock.cpp - ${CMAKE_CURRENT_LIST_DIR}/command_responder.cpp - ${CMAKE_CURRENT_LIST_DIR}/feature_provider.cpp - ${CMAKE_CURRENT_LIST_DIR}/main.cpp - ${CMAKE_CURRENT_LIST_DIR}/main_functions.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/model.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/no_1000ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/recognize_commands.cpp - ${CMAKE_CURRENT_LIST_DIR}/yes_1000ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/bits.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.h - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.h - ${CMAKE_CURRENT_LIST_DIR}/command_responder.h - ${CMAKE_CURRENT_LIST_DIR}/feature_provider.h - ${CMAKE_CURRENT_LIST_DIR}/main_functions.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/model.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_micro_features_data.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_micro_features_data.h - ${CMAKE_CURRENT_LIST_DIR}/no_1000ms_sample_data.h - ${CMAKE_CURRENT_LIST_DIR}/recognize_commands.h - ${CMAKE_CURRENT_LIST_DIR}/yes_1000ms_sample_data.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/_kiss_fft_guts.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h -) - -target_link_libraries( - micro_speech_mock - pico-tflmicro - hardware_pwm -) - -pico_add_extra_outputs(micro_speech_mock) - - -add_executable(micro_features_generator_test "") - -target_include_directories(micro_features_generator_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - micro_features_generator_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(micro_features_generator_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.c - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_feature_data_slice.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_feature_data_slice.cpp - ${CMAKE_CURRENT_LIST_DIR}/no_30ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/yes_30ms_sample_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/bits.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_feature_data_slice.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_feature_data_slice.h - ${CMAKE_CURRENT_LIST_DIR}/no_30ms_sample_data.h - ${CMAKE_CURRENT_LIST_DIR}/yes_30ms_sample_data.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/_kiss_fft_guts.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h -) - -target_link_libraries( - micro_features_generator_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(micro_features_generator_test) - - -add_executable(feature_provider_test "") - -target_include_directories(feature_provider_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - feature_provider_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(feature_provider_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.c - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.cpp - ${CMAKE_CURRENT_LIST_DIR}/feature_provider.cpp - ${CMAKE_CURRENT_LIST_DIR}/feature_provider_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.c - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/bits.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/fft_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_lut.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/experimental/microfrontend/lib/window_util.h - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.h - ${CMAKE_CURRENT_LIST_DIR}/feature_provider.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_features_generator.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/_kiss_fft_guts.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.h - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h -) - -target_link_libraries( - feature_provider_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(feature_provider_test) - - -add_executable(micro_speech_test "") - -target_include_directories(micro_speech_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - micro_speech_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(micro_speech_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/micro_features/model.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_micro_features_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_speech_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/model.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/no_micro_features_data.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/yes_micro_features_data.h -) - -target_link_libraries( - micro_speech_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(micro_speech_test) - - -add_executable(audio_provider_test "") - -target_include_directories(audio_provider_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - audio_provider_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(audio_provider_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.cpp - ${CMAKE_CURRENT_LIST_DIR}/audio_provider_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/audio_provider.h - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.h -) - -target_link_libraries( - audio_provider_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(audio_provider_test) - - -add_executable(recognize_commands_test "") - -target_include_directories(recognize_commands_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/. -) - -set_target_properties( - recognize_commands_test - PROPERTIES - COMPILE_FLAGS -fno-rtti - COMPILE_FLAGS -fno-exceptions - COMPILE_FLAGS -fno-threadsafe-statics - COMPILE_FLAGS -nostdlib -) - -target_sources(recognize_commands_test - PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/recognize_commands.cpp - ${CMAKE_CURRENT_LIST_DIR}/recognize_commands_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/micro_features/micro_model_settings.h - ${CMAKE_CURRENT_LIST_DIR}/recognize_commands.h -) - -target_link_libraries( - recognize_commands_test - pico-tflmicro - hardware_pwm - pico-tflmicro_test -) - -pico_add_extra_outputs(recognize_commands_test) - diff --git a/examples/micro_speech/audio_provider.cpp b/examples/micro_speech/audio_provider.cpp deleted file mode 100644 index 5bb7d38..0000000 --- a/examples/micro_speech/audio_provider.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "audio_provider.h" - -#include "micro_features/micro_model_settings.h" - -namespace { -int16_t g_dummy_audio_data[kMaxAudioSampleSize]; -int32_t g_latest_audio_timestamp = 0; -} // namespace - -TfLiteStatus GetAudioSamples(tflite::ErrorReporter* error_reporter, - int start_ms, int duration_ms, - int* audio_samples_size, int16_t** audio_samples) { - for (int i = 0; i < kMaxAudioSampleSize; ++i) { - g_dummy_audio_data[i] = 0; - } - *audio_samples_size = kMaxAudioSampleSize; - *audio_samples = g_dummy_audio_data; - return kTfLiteOk; -} - -int32_t LatestAudioTimestamp() { - g_latest_audio_timestamp += 100; - return g_latest_audio_timestamp; -} diff --git a/examples/micro_speech/audio_provider.h b/examples/micro_speech/audio_provider.h deleted file mode 100644 index c51cc5f..0000000 --- a/examples/micro_speech/audio_provider.h +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -// This is an abstraction around an audio source like a microphone, and is -// expected to return 16-bit PCM sample data for a given point in time. The -// sample data itself should be used as quickly as possible by the caller, since -// to allow memory optimizations there are no guarantees that the samples won't -// be overwritten by new data in the future. In practice, implementations should -// ensure that there's a reasonable time allowed for clients to access the data -// before any reuse. -// The reference implementation can have no platform-specific dependencies, so -// it just returns an array filled with zeros. For real applications, you should -// ensure there's a specialized implementation that accesses hardware APIs. -TfLiteStatus GetAudioSamples(tflite::ErrorReporter* error_reporter, - int start_ms, int duration_ms, - int* audio_samples_size, int16_t** audio_samples); - -// Returns the time that audio data was last captured in milliseconds. There's -// no contract about what time zero represents, the accuracy, or the granularity -// of the result. Subsequent calls will generally not return a lower value, but -// even that's not guaranteed if there's an overflow wraparound. -// The reference implementation of this function just returns a constantly -// incrementing value for each call, since it would need a non-portable platform -// call to access time information. For real applications, you'll need to write -// your own platform-specific implementation. -int32_t LatestAudioTimestamp(); - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_ diff --git a/examples/micro_speech/audio_provider_mock.cpp b/examples/micro_speech/audio_provider_mock.cpp deleted file mode 100644 index 79f1a99..0000000 --- a/examples/micro_speech/audio_provider_mock.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "audio_provider.h" -#include "micro_features/micro_model_settings.h" -#include "no_1000ms_sample_data.h" -#include "yes_1000ms_sample_data.h" - -namespace { -int16_t g_dummy_audio_data[kMaxAudioSampleSize]; -int32_t g_latest_audio_timestamp = 0; -} // namespace - -TfLiteStatus GetAudioSamples(tflite::ErrorReporter* error_reporter, - int start_ms, int duration_ms, - int* audio_samples_size, int16_t** audio_samples) { - const int yes_start = (0 * kAudioSampleFrequency) / 1000; - const int yes_end = (1000 * kAudioSampleFrequency) / 1000; - const int no_start = (4000 * kAudioSampleFrequency) / 1000; - const int no_end = (5000 * kAudioSampleFrequency) / 1000; - const int wraparound = (8000 * kAudioSampleFrequency) / 1000; - const int start_sample = (start_ms * kAudioSampleFrequency) / 1000; - for (int i = 0; i < kMaxAudioSampleSize; ++i) { - const int sample_index = (start_sample + i) % wraparound; - int16_t sample; - if ((sample_index >= yes_start) && (sample_index < yes_end)) { - sample = g_yes_1000ms_sample_data[sample_index - yes_start]; - } else if ((sample_index >= no_start) && (sample_index < no_end)) { - sample = g_no_1000ms_sample_data[sample_index - no_start]; - } else { - sample = 0; - } - g_dummy_audio_data[i] = sample; - } - *audio_samples_size = kMaxAudioSampleSize; - *audio_samples = g_dummy_audio_data; - return kTfLiteOk; -} - -int32_t LatestAudioTimestamp() { - g_latest_audio_timestamp += 100; - return g_latest_audio_timestamp; -} diff --git a/examples/micro_speech/audio_provider_mock_test.cpp b/examples/micro_speech/audio_provider_mock_test.cpp deleted file mode 100644 index 21e7a23..0000000 --- a/examples/micro_speech/audio_provider_mock_test.cpp +++ /dev/null @@ -1,76 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "tensorflow/lite/c/common.h" -#include "audio_provider.h" -#include "micro_features/micro_model_settings.h" -#include "no_1000ms_sample_data.h" -#include "yes_1000ms_sample_data.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestAudioProviderMock) { - tflite::MicroErrorReporter micro_error_reporter; - - int audio_samples_size = 0; - int16_t* audio_samples = nullptr; - TfLiteStatus get_status = - GetAudioSamples(µ_error_reporter, 0, kFeatureSliceDurationMs, - &audio_samples_size, &audio_samples); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); - TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); - TF_LITE_MICRO_EXPECT_NE(audio_samples, nullptr); - for (int i = 0; i < audio_samples_size; ++i) { - TF_LITE_MICRO_EXPECT_EQ(g_yes_1000ms_sample_data[i], audio_samples[i]); - } - - get_status = - GetAudioSamples(µ_error_reporter, 500, kFeatureSliceDurationMs, - &audio_samples_size, &audio_samples); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); - TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); - TF_LITE_MICRO_EXPECT_NE(audio_samples, nullptr); - for (int i = 0; i < audio_samples_size; ++i) { - TF_LITE_MICRO_EXPECT_EQ(g_yes_1000ms_sample_data[i + 8000], - audio_samples[i]); - } - - get_status = - GetAudioSamples(µ_error_reporter, 1500, kFeatureSliceDurationMs, - &audio_samples_size, &audio_samples); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); - TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); - TF_LITE_MICRO_EXPECT_NE(audio_samples, nullptr); - for (int i = 0; i < audio_samples_size; ++i) { - TF_LITE_MICRO_EXPECT_EQ(0, audio_samples[i]); - } - - get_status = - GetAudioSamples(µ_error_reporter, 12250, kFeatureSliceDurationMs, - &audio_samples_size, &audio_samples); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); - TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); - TF_LITE_MICRO_EXPECT_NE(audio_samples, nullptr); - for (int i = 0; i < audio_samples_size; ++i) { - TF_LITE_MICRO_EXPECT_EQ(g_no_1000ms_sample_data[i + 4000], - audio_samples[i]); - } -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/micro_speech/audio_provider_test.cpp b/examples/micro_speech/audio_provider_test.cpp deleted file mode 100644 index 5416c23..0000000 --- a/examples/micro_speech/audio_provider_test.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "audio_provider.h" - -#include - -#include "tensorflow/lite/c/common.h" -#include "micro_features/micro_model_settings.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestAudioProvider) { - tflite::MicroErrorReporter micro_error_reporter; - - int audio_samples_size = 0; - int16_t* audio_samples = nullptr; - TfLiteStatus get_status = - GetAudioSamples(µ_error_reporter, 0, kFeatureSliceDurationMs, - &audio_samples_size, &audio_samples); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); - TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); - TF_LITE_MICRO_EXPECT_NE(audio_samples, nullptr); - - // Make sure we can read all of the returned memory locations. - int total = 0; - for (int i = 0; i < audio_samples_size; ++i) { - total += audio_samples[i]; - } -} - -TF_LITE_MICRO_TEST(TestTimer) { - // Make sure that the technically-undefined overflow behavior we rely on below - // works on this platform. It's still not guaranteed, but at least this is a - // smoke check. Turn off when running with ASan, as it will complain about - // the following undefined behavior. -#ifndef ADDRESS_SANITIZER - int32_t overflow_value = std::numeric_limits::max(); - overflow_value += 1; - TF_LITE_MICRO_EXPECT_EQ(std::numeric_limits::min(), overflow_value); -#endif - - const int32_t first_time = LatestAudioTimestamp(); - const int32_t second_time = LatestAudioTimestamp(); - - // It's possible that the timer may have wrapped around from +BIG_NUM to - // -BIG_NUM between the first and second calls, since we're storing - // milliseconds in a 32-bit integer. It's not reasonable that the call itself - // would have taken more than 2^31 milliseconds though, so look at the - // difference and rely on integer overflow to ensure it's accurate. - const int32_t time_delta = (second_time - first_time); - TF_LITE_MICRO_EXPECT_LE(0, time_delta); -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/micro_speech/command_responder.cpp b/examples/micro_speech/command_responder.cpp deleted file mode 100644 index 09310e7..0000000 --- a/examples/micro_speech/command_responder.cpp +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "command_responder.h" - -// The default implementation writes out the name of the recognized command -// to the error console. Real applications will want to take some custom -// action instead, and should implement their own versions of this function. -void RespondToCommand(tflite::ErrorReporter* error_reporter, - int32_t current_time, const char* found_command, - uint8_t score, bool is_new_command) { - if (is_new_command) { - TF_LITE_REPORT_ERROR(error_reporter, "Heard %s (%d) @%dms", found_command, - score, current_time); - } -} diff --git a/examples/micro_speech/command_responder.h b/examples/micro_speech/command_responder.h deleted file mode 100644 index ac3f448..0000000 --- a/examples/micro_speech/command_responder.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Provides an interface to take an action based on an audio command. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -// Called every time the results of an audio recognition run are available. The -// human-readable name of any recognized command is in the `found_command` -// argument, `score` has the numerical confidence, and `is_new_command` is set -// if the previous command was different to this one. -void RespondToCommand(tflite::ErrorReporter* error_reporter, - int32_t current_time, const char* found_command, - uint8_t score, bool is_new_command); - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_ diff --git a/examples/micro_speech/command_responder_test.cpp b/examples/micro_speech/command_responder_test.cpp deleted file mode 100644 index 6ed8f67..0000000 --- a/examples/micro_speech/command_responder_test.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "command_responder.h" - -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestCallability) { - tflite::MicroErrorReporter micro_error_reporter; - - // This will have external side-effects (like printing to the debug console - // or lighting an LED) that are hard to observe, so the most we can do is - // make sure the call doesn't crash. - RespondToCommand(µ_error_reporter, 0, "foo", 0, true); -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/micro_speech/feature_provider.cpp b/examples/micro_speech/feature_provider.cpp deleted file mode 100644 index 2050fb5..0000000 --- a/examples/micro_speech/feature_provider.cpp +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "feature_provider.h" - -#include "audio_provider.h" -#include "micro_features/micro_features_generator.h" -#include "micro_features/micro_model_settings.h" - -FeatureProvider::FeatureProvider(int feature_size, int8_t* feature_data) - : feature_size_(feature_size), - feature_data_(feature_data), - is_first_run_(true) { - // Initialize the feature data to default values. - for (int n = 0; n < feature_size_; ++n) { - feature_data_[n] = 0; - } -} - -FeatureProvider::~FeatureProvider() {} - -TfLiteStatus FeatureProvider::PopulateFeatureData( - tflite::ErrorReporter* error_reporter, int32_t last_time_in_ms, - int32_t time_in_ms, int* how_many_new_slices) { - if (feature_size_ != kFeatureElementCount) { - TF_LITE_REPORT_ERROR(error_reporter, - "Requested feature_data_ size %d doesn't match %d", - feature_size_, kFeatureElementCount); - return kTfLiteError; - } - - // Quantize the time into steps as long as each window stride, so we can - // figure out which audio data we need to fetch. - const int last_step = (last_time_in_ms / kFeatureSliceStrideMs); - const int current_step = (time_in_ms / kFeatureSliceStrideMs); - - int slices_needed = current_step - last_step; - // If this is the first call, make sure we don't use any cached information. - if (is_first_run_) { - TfLiteStatus init_status = InitializeMicroFeatures(error_reporter); - if (init_status != kTfLiteOk) { - return init_status; - } - is_first_run_ = false; - slices_needed = kFeatureSliceCount; - } - if (slices_needed > kFeatureSliceCount) { - slices_needed = kFeatureSliceCount; - } - *how_many_new_slices = slices_needed; - - const int slices_to_keep = kFeatureSliceCount - slices_needed; - const int slices_to_drop = kFeatureSliceCount - slices_to_keep; - // If we can avoid recalculating some slices, just move the existing data - // up in the spectrogram, to perform something like this: - // last time = 80ms current time = 120ms - // +-----------+ +-----------+ - // | data@20ms | --> | data@60ms | - // +-----------+ -- +-----------+ - // | data@40ms | -- --> | data@80ms | - // +-----------+ -- -- +-----------+ - // | data@60ms | -- -- | | - // +-----------+ -- +-----------+ - // | data@80ms | -- | | - // +-----------+ +-----------+ - if (slices_to_keep > 0) { - for (int dest_slice = 0; dest_slice < slices_to_keep; ++dest_slice) { - int8_t* dest_slice_data = - feature_data_ + (dest_slice * kFeatureSliceSize); - const int src_slice = dest_slice + slices_to_drop; - const int8_t* src_slice_data = - feature_data_ + (src_slice * kFeatureSliceSize); - for (int i = 0; i < kFeatureSliceSize; ++i) { - dest_slice_data[i] = src_slice_data[i]; - } - } - } - // Any slices that need to be filled in with feature data have their - // appropriate audio data pulled, and features calculated for that slice. - if (slices_needed > 0) { - for (int new_slice = slices_to_keep; new_slice < kFeatureSliceCount; - ++new_slice) { - const int new_step = (current_step - kFeatureSliceCount + 1) + new_slice; - const int32_t slice_start_ms = (new_step * kFeatureSliceStrideMs); - int16_t* audio_samples = nullptr; - int audio_samples_size = 0; - // TODO(petewarden): Fix bug that leads to non-zero slice_start_ms - GetAudioSamples(error_reporter, (slice_start_ms > 0 ? slice_start_ms : 0), - kFeatureSliceDurationMs, &audio_samples_size, - &audio_samples); - if (audio_samples_size < kMaxAudioSampleSize) { - TF_LITE_REPORT_ERROR(error_reporter, - "Audio data size %d too small, want %d", - audio_samples_size, kMaxAudioSampleSize); - return kTfLiteError; - } - int8_t* new_slice_data = feature_data_ + (new_slice * kFeatureSliceSize); - size_t num_samples_read; - TfLiteStatus generate_status = GenerateMicroFeatures( - error_reporter, audio_samples, audio_samples_size, kFeatureSliceSize, - new_slice_data, &num_samples_read); - if (generate_status != kTfLiteOk) { - return generate_status; - } - } - } - return kTfLiteOk; -} diff --git a/examples/micro_speech/feature_provider.h b/examples/micro_speech/feature_provider.h deleted file mode 100644 index d086e01..0000000 --- a/examples/micro_speech/feature_provider.h +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -// Binds itself to an area of memory intended to hold the input features for an -// audio-recognition neural network model, and fills that data area with the -// features representing the current audio input, for example from a microphone. -// The audio features themselves are a two-dimensional array, made up of -// horizontal slices representing the frequencies at one point in time, stacked -// on top of each other to form a spectrogram showing how those frequencies -// changed over time. -class FeatureProvider { - public: - // Create the provider, and bind it to an area of memory. This memory should - // remain accessible for the lifetime of the provider object, since subsequent - // calls will fill it with feature data. The provider does no memory - // management of this data. - FeatureProvider(int feature_size, int8_t* feature_data); - ~FeatureProvider(); - - // Fills the feature data with information from audio inputs, and returns how - // many feature slices were updated. - TfLiteStatus PopulateFeatureData(tflite::ErrorReporter* error_reporter, - int32_t last_time_in_ms, int32_t time_in_ms, - int* how_many_new_slices); - - private: - int feature_size_; - int8_t* feature_data_; - // Make sure we don't try to use cached information if this is the first call - // into the provider. - bool is_first_run_; -}; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_ diff --git a/examples/micro_speech/feature_provider_test.cpp b/examples/micro_speech/feature_provider_test.cpp deleted file mode 100644 index 1b3076f..0000000 --- a/examples/micro_speech/feature_provider_test.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "feature_provider.h" - -#include "tensorflow/lite/c/common.h" -#include "micro_features/micro_model_settings.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestFeatureProvider) { - tflite::MicroErrorReporter micro_error_reporter; - - int8_t feature_data[kFeatureElementCount]; - FeatureProvider feature_provider(kFeatureElementCount, feature_data); - - int how_many_new_slices = 0; - TfLiteStatus populate_status = feature_provider.PopulateFeatureData( - µ_error_reporter, /* last_time_in_ms= */ 0, /* time_in_ms= */ 10000, - &how_many_new_slices); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, populate_status); - TF_LITE_MICRO_EXPECT_EQ(kFeatureSliceCount, how_many_new_slices); -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/micro_speech/main.cpp b/examples/micro_speech/main.cpp deleted file mode 100644 index e08c300..0000000 --- a/examples/micro_speech/main.cpp +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "main_functions.h" - -// This is the default main used on systems that have the standard C entry -// point. Other devices (for example FreeRTOS or ESP32) that have different -// requirements for entry code (like an app_main function) should specialize -// this main.cc file in a target-specific subfolder. -int main(int argc, char* argv[]) { - setup(); - while (true) { - loop(); - } -} diff --git a/examples/micro_speech/main_functions.cpp b/examples/micro_speech/main_functions.cpp deleted file mode 100644 index 83ac052..0000000 --- a/examples/micro_speech/main_functions.cpp +++ /dev/null @@ -1,175 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "main_functions.h" - -#include "audio_provider.h" -#include "command_responder.h" -#include "feature_provider.h" -#include "micro_features/micro_model_settings.h" -#include "micro_features/model.h" -#include "recognize_commands.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" -#include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" - -// Globals, used for compatibility with Arduino-style sketches. -namespace { -tflite::ErrorReporter* error_reporter = nullptr; -const tflite::Model* model = nullptr; -tflite::MicroInterpreter* interpreter = nullptr; -TfLiteTensor* model_input = nullptr; -FeatureProvider* feature_provider = nullptr; -RecognizeCommands* recognizer = nullptr; -int32_t previous_time = 0; - -// Create an area of memory to use for input, output, and intermediate arrays. -// The size of this will depend on the model you're using, and may need to be -// determined by experimentation. -constexpr int kTensorArenaSize = 10 * 1024; -uint8_t tensor_arena[kTensorArenaSize]; -int8_t feature_buffer[kFeatureElementCount]; -int8_t* model_input_buffer = nullptr; -} // namespace - -// The name of this function is important for Arduino compatibility. -void setup() { - // Set up logging. Google style is to avoid globals or statics because of - // lifetime uncertainty, but since this has a trivial destructor it's okay. - // NOLINTNEXTLINE(runtime-global-variables) - static tflite::MicroErrorReporter micro_error_reporter; - error_reporter = µ_error_reporter; - - // Map the model into a usable data structure. This doesn't involve any - // copying or parsing, it's a very lightweight operation. - model = tflite::GetModel(g_model); - if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, - "Model provided is schema version %d not equal " - "to supported version %d.", - model->version(), TFLITE_SCHEMA_VERSION); - return; - } - - // Pull in only the operation implementations we need. - // This relies on a complete list of all the ops needed by this graph. - // An easier approach is to just use the AllOpsResolver, but this will - // incur some penalty in code space for op implementations that are not - // needed by this graph. - // - // tflite::AllOpsResolver resolver; - // NOLINTNEXTLINE(runtime-global-variables) - static tflite::MicroMutableOpResolver<4> micro_op_resolver(error_reporter); - if (micro_op_resolver.AddDepthwiseConv2D() != kTfLiteOk) { - return; - } - if (micro_op_resolver.AddFullyConnected() != kTfLiteOk) { - return; - } - if (micro_op_resolver.AddSoftmax() != kTfLiteOk) { - return; - } - if (micro_op_resolver.AddReshape() != kTfLiteOk) { - return; - } - - // Build an interpreter to run the model with. - static tflite::MicroInterpreter static_interpreter( - model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter); - interpreter = &static_interpreter; - - // Allocate memory from the tensor_arena for the model's tensors. - TfLiteStatus allocate_status = interpreter->AllocateTensors(); - if (allocate_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed"); - return; - } - - // Get information about the memory area to use for the model's input. - model_input = interpreter->input(0); - if ((model_input->dims->size != 2) || (model_input->dims->data[0] != 1) || - (model_input->dims->data[1] != - (kFeatureSliceCount * kFeatureSliceSize)) || - (model_input->type != kTfLiteInt8)) { - TF_LITE_REPORT_ERROR(error_reporter, - "Bad input tensor parameters in model"); - return; - } - model_input_buffer = model_input->data.int8; - - // Prepare to access the audio spectrograms from a microphone or other source - // that will provide the inputs to the neural network. - // NOLINTNEXTLINE(runtime-global-variables) - static FeatureProvider static_feature_provider(kFeatureElementCount, - feature_buffer); - feature_provider = &static_feature_provider; - - static RecognizeCommands static_recognizer(error_reporter); - recognizer = &static_recognizer; - - previous_time = 0; -} - -// The name of this function is important for Arduino compatibility. -void loop() { - // Fetch the spectrogram for the current time. - const int32_t current_time = LatestAudioTimestamp(); - int how_many_new_slices = 0; - TfLiteStatus feature_status = feature_provider->PopulateFeatureData( - error_reporter, previous_time, current_time, &how_many_new_slices); - if (feature_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Feature generation failed"); - return; - } - previous_time = current_time; - // If no new audio samples have been received since last time, don't bother - // running the network model. - if (how_many_new_slices == 0) { - return; - } - - // Copy feature buffer to input tensor - for (int i = 0; i < kFeatureElementCount; i++) { - model_input_buffer[i] = feature_buffer[i]; - } - - // Run the model on the spectrogram input and make sure it succeeds. - TfLiteStatus invoke_status = interpreter->Invoke(); - if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed"); - return; - } - - // Obtain a pointer to the output tensor - TfLiteTensor* output = interpreter->output(0); - // Determine whether a command was recognized based on the output of inference - const char* found_command = nullptr; - uint8_t score = 0; - bool is_new_command = false; - TfLiteStatus process_status = recognizer->ProcessLatestResults( - output, current_time, &found_command, &score, &is_new_command); - if (process_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, - "RecognizeCommands::ProcessLatestResults() failed"); - return; - } - // Do something based on the recognized command. The default implementation - // just prints to the error console, but you should replace this with your - // own function for a real application. - RespondToCommand(error_reporter, current_time, found_command, score, - is_new_command); -} diff --git a/examples/micro_speech/main_functions.h b/examples/micro_speech/main_functions.h deleted file mode 100644 index 0ac0677..0000000 --- a/examples/micro_speech/main_functions.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_ - -// Expose a C friendly interface for main functions. -#ifdef __cplusplus -extern "C" { -#endif - -// Initializes all data needed for the example. The name is important, and needs -// to be setup() for Arduino compatibility. -void setup(); - -// Runs one iteration of data gathering and inference. This should be called -// repeatedly from the application code. The name needs to be loop() for Arduino -// compatibility. -void loop(); - -#ifdef __cplusplus -} -#endif - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MAIN_FUNCTIONS_H_ diff --git a/examples/micro_speech/micro_features/micro_features_generator.cpp b/examples/micro_speech/micro_features/micro_features_generator.cpp deleted file mode 100644 index f4cc1e4..0000000 --- a/examples/micro_speech/micro_features/micro_features_generator.cpp +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "micro_features/micro_features_generator.h" - -#include -#include - -#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h" -#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h" -#include "micro_features/micro_model_settings.h" - -// Configure FFT to output 16 bit fixed point. -#define FIXED_POINT 16 - -namespace { - -FrontendState g_micro_features_state; -bool g_is_first_time = true; - -} // namespace - -TfLiteStatus InitializeMicroFeatures(tflite::ErrorReporter* error_reporter) { - FrontendConfig config; - config.window.size_ms = kFeatureSliceDurationMs; - config.window.step_size_ms = kFeatureSliceStrideMs; - config.noise_reduction.smoothing_bits = 10; - config.filterbank.num_channels = kFeatureSliceSize; - config.filterbank.lower_band_limit = 125.0; - config.filterbank.upper_band_limit = 7500.0; - config.noise_reduction.smoothing_bits = 10; - config.noise_reduction.even_smoothing = 0.025; - config.noise_reduction.odd_smoothing = 0.06; - config.noise_reduction.min_signal_remaining = 0.05; - config.pcan_gain_control.enable_pcan = 1; - config.pcan_gain_control.strength = 0.95; - config.pcan_gain_control.offset = 80.0; - config.pcan_gain_control.gain_bits = 21; - config.log_scale.enable_log = 1; - config.log_scale.scale_shift = 6; - if (!FrontendPopulateState(&config, &g_micro_features_state, - kAudioSampleFrequency)) { - TF_LITE_REPORT_ERROR(error_reporter, "FrontendPopulateState() failed"); - return kTfLiteError; - } - g_is_first_time = true; - return kTfLiteOk; -} - -// This is not exposed in any header, and is only used for testing, to ensure -// that the state is correctly set up before generating results. -void SetMicroFeaturesNoiseEstimates(const uint32_t* estimate_presets) { - for (int i = 0; i < g_micro_features_state.filterbank.num_channels; ++i) { - g_micro_features_state.noise_reduction.estimate[i] = estimate_presets[i]; - } -} - -TfLiteStatus GenerateMicroFeatures(tflite::ErrorReporter* error_reporter, - const int16_t* input, int input_size, - int output_size, int8_t* output, - size_t* num_samples_read) { - const int16_t* frontend_input; - if (g_is_first_time) { - frontend_input = input; - g_is_first_time = false; - } else { - frontend_input = input + 160; - } - FrontendOutput frontend_output = FrontendProcessSamples( - &g_micro_features_state, frontend_input, input_size, num_samples_read); - - for (size_t i = 0; i < frontend_output.size; ++i) { - // These scaling values are derived from those used in input_data.py in the - // training pipeline. - // The feature pipeline outputs 16-bit signed integers in roughly a 0 to 670 - // range. In training, these are then arbitrarily divided by 25.6 to get - // float values in the rough range of 0.0 to 26.0. This scaling is performed - // for historical reasons, to match up with the output of other feature - // generators. - // The process is then further complicated when we quantize the model. This - // means we have to scale the 0.0 to 26.0 real values to the -128 to 127 - // signed integer numbers. - // All this means that to get matching values from our integer feature - // output into the tensor input, we have to perform: - // input = (((feature / 25.6) / 26.0) * 256) - 128 - // To simplify this and perform it in 32-bit integer math, we rearrange to: - // input = (feature * 256) / (25.6 * 26.0) - 128 - constexpr int32_t value_scale = 256; - constexpr int32_t value_div = static_cast((25.6f * 26.0f) + 0.5f); - int32_t value = - ((frontend_output.values[i] * value_scale) + (value_div / 2)) / - value_div; - value -= 128; - if (value < -128) { - value = -128; - } - if (value > 127) { - value = 127; - } - output[i] = value; - } - - return kTfLiteOk; -} diff --git a/examples/micro_speech/micro_features/micro_features_generator.h b/examples/micro_speech/micro_features/micro_features_generator.h deleted file mode 100644 index 2930423..0000000 --- a/examples/micro_speech/micro_features/micro_features_generator.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -// Sets up any resources needed for the feature generation pipeline. -TfLiteStatus InitializeMicroFeatures(tflite::ErrorReporter* error_reporter); - -// Converts audio sample data into a more compact form that's appropriate for -// feeding into a neural network. -TfLiteStatus GenerateMicroFeatures(tflite::ErrorReporter* error_reporter, - const int16_t* input, int input_size, - int output_size, int8_t* output, - size_t* num_samples_read); - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_ diff --git a/examples/micro_speech/micro_features/micro_features_generator_test.cpp b/examples/micro_speech/micro_features/micro_features_generator_test.cpp deleted file mode 100644 index 8043170..0000000 --- a/examples/micro_speech/micro_features/micro_features_generator_test.cpp +++ /dev/null @@ -1,104 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "micro_features/micro_features_generator.h" - -#include "tensorflow/lite/c/common.h" -#include "micro_features/no_feature_data_slice.h" -#include "micro_features/yes_feature_data_slice.h" -#include "no_30ms_sample_data.h" -#include "yes_30ms_sample_data.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -// This is a test-only API, not exposed in any public headers, so declare it. -void SetMicroFeaturesNoiseEstimates(const uint32_t* estimate_presets); - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorYes) { - tflite::MicroErrorReporter micro_error_reporter; - - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - InitializeMicroFeatures(µ_error_reporter)); - - // The micro features pipeline retains state from previous calls to help - // estimate the background noise. Unfortunately this makes it harder to - // exactly reproduce results in a test environment, so use a known snapshot - // of the parameters at the point that the golden feature values were - // created. - const uint32_t yes_estimate_presets[] = { - 1062898, 2644477, 1257642, 1864718, 412722, 725703, 395721, 474082, - 173046, 255856, 158966, 153736, 69181, 199100, 144493, 227740, - 110573, 164330, 79666, 144650, 122947, 476799, 398553, 497493, - 322152, 1140005, 566716, 690605, 308902, 347481, 109891, 170457, - 73901, 100975, 42963, 72325, 34183, 20207, 6640, 9468, - }; - SetMicroFeaturesNoiseEstimates(yes_estimate_presets); - - int8_t yes_calculated_data[g_yes_feature_data_slice_size]; - size_t num_samples_read; - TfLiteStatus yes_status = GenerateMicroFeatures( - µ_error_reporter, g_yes_30ms_sample_data, - g_yes_30ms_sample_data_size, g_yes_feature_data_slice_size, - yes_calculated_data, &num_samples_read); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, yes_status); - - for (int i = 0; i < g_yes_feature_data_slice_size; ++i) { - const int expected = g_yes_feature_data_slice[i]; - const int actual = yes_calculated_data[i]; - TF_LITE_MICRO_EXPECT_EQ(expected, actual); - if (expected != actual) { - TF_LITE_REPORT_ERROR(µ_error_reporter, - "Expected value %d but found %d", expected, actual); - } - } -} - -TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorNo) { - tflite::MicroErrorReporter micro_error_reporter; - - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - InitializeMicroFeatures(µ_error_reporter)); - // As we did for the previous features, set known good noise state - // parameters. - const uint32_t no_estimate_presets[] = { - 2563964, 1909393, 559801, 538670, 203643, 175959, 75088, 139491, - 59691, 95307, 43865, 129263, 52517, 80058, 51330, 100731, - 76674, 76262, 15497, 22598, 13778, 21460, 8946, 17806, - 10023, 18810, 8002, 10842, 7578, 9983, 6267, 10759, - 8946, 18488, 9691, 39785, 9939, 17835, 9671, 18512, - }; - SetMicroFeaturesNoiseEstimates(no_estimate_presets); - - int8_t no_calculated_data[g_no_feature_data_slice_size]; - size_t num_samples_read; - TfLiteStatus no_status = GenerateMicroFeatures( - µ_error_reporter, g_no_30ms_sample_data, g_no_30ms_sample_data_size, - g_no_feature_data_slice_size, no_calculated_data, &num_samples_read); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, no_status); - - for (size_t i = 0; i < g_no_feature_data_slice_size; ++i) { - const int expected = g_no_feature_data_slice[i]; - const int actual = no_calculated_data[i]; - TF_LITE_MICRO_EXPECT_EQ(expected, actual); - if (expected != actual) { - TF_LITE_REPORT_ERROR(µ_error_reporter, - "Expected value %d but found %d", expected, actual); - } - } -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/micro_speech/micro_features/micro_model_settings.cpp b/examples/micro_speech/micro_features/micro_model_settings.cpp deleted file mode 100644 index 7080fe1..0000000 --- a/examples/micro_speech/micro_features/micro_model_settings.cpp +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "micro_features/micro_model_settings.h" - -const char* kCategoryLabels[kCategoryCount] = { - "silence", - "unknown", - "yes", - "no", -}; diff --git a/examples/micro_speech/micro_features/micro_model_settings.h b/examples/micro_speech/micro_features/micro_model_settings.h deleted file mode 100644 index e542213..0000000 --- a/examples/micro_speech/micro_features/micro_model_settings.h +++ /dev/null @@ -1,43 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_ - -// Keeping these as constant expressions allow us to allocate fixed-sized arrays -// on the stack for our working memory. - -// The size of the input time series data we pass to the FFT to produce the -// frequency information. This has to be a power of two, and since we're dealing -// with 30ms of 16KHz inputs, which means 480 samples, this is the next value. -constexpr int kMaxAudioSampleSize = 512; -constexpr int kAudioSampleFrequency = 16000; - -// The following values are derived from values used during model training. -// If you change the way you preprocess the input, update all these constants. -constexpr int kFeatureSliceSize = 40; -constexpr int kFeatureSliceCount = 49; -constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount); -constexpr int kFeatureSliceStrideMs = 20; -constexpr int kFeatureSliceDurationMs = 30; - -// Variables for the model's output categories. -constexpr int kSilenceIndex = 0; -constexpr int kUnknownIndex = 1; -// If you modify the output categories, you need to update the following values. -constexpr int kCategoryCount = 4; -extern const char* kCategoryLabels[kCategoryCount]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_ diff --git a/examples/micro_speech/micro_features/model.cpp b/examples/micro_speech/micro_features/model.cpp deleted file mode 100644 index b53d879..0000000 --- a/examples/micro_speech/micro_features/model.cpp +++ /dev/null @@ -1,1596 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This is a standard TensorFlow Lite FlatBuffer model file that has been -// converted into a C data array, so it can be easily compiled into a binary -// for devices that don't have a file system. It was created using the command: -// xxd -i model.tflite > model.cc - -#include "micro_features/model.h" - -// We need to keep the data array aligned on some architectures. -#ifdef __has_attribute -#define HAVE_ATTRIBUTE(x) __has_attribute(x) -#else -#define HAVE_ATTRIBUTE(x) 0 -#endif -#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) -#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4))) -#else -#define DATA_ALIGN_ATTRIBUTE -#endif - -const unsigned char g_model[] DATA_ALIGN_ATTRIBUTE = { - 0x20, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x12, 0x00, 0x1c, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, - 0x10, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x00, 0x12, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x94, 0x48, 0x00, 0x00, 0x34, 0x42, 0x00, 0x00, - 0x1c, 0x42, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x6d, 0x69, 0x6e, 0x5f, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x00, 0x0c, 0x00, 0x00, 0x00, 0xd4, 0x41, 0x00, 0x00, - 0xb4, 0x41, 0x00, 0x00, 0x24, 0x03, 0x00, 0x00, 0xf4, 0x02, 0x00, 0x00, - 0xec, 0x02, 0x00, 0x00, 0xe4, 0x02, 0x00, 0x00, 0xc4, 0x02, 0x00, 0x00, - 0xbc, 0x02, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x16, 0xbd, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x31, 0x2e, 0x35, 0x2e, - 0x30, 0x00, 0x00, 0x00, 0x94, 0xba, 0xff, 0xff, 0x98, 0xba, 0xff, 0xff, - 0x32, 0xbd, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x00, - 0xfa, 0xee, 0x28, 0xc4, 0xee, 0xfe, 0xcf, 0x0f, 0x1e, 0xf7, 0x1f, 0x06, - 0x0d, 0xed, 0xe9, 0x83, 0x5c, 0xc9, 0x18, 0xe3, 0xf9, 0x14, 0x28, 0x2a, - 0x09, 0xf2, 0x18, 0x34, 0x62, 0xea, 0xef, 0xd6, 0x36, 0xb7, 0x1e, 0xf7, - 0x3b, 0x22, 0x28, 0x39, 0xc2, 0x9d, 0xf1, 0x07, 0x5e, 0x0b, 0x1e, 0x2c, - 0x07, 0xdd, 0xfd, 0xc3, 0xd8, 0x4a, 0xf3, 0x28, 0xa7, 0x16, 0xd5, 0xf1, - 0xc3, 0x05, 0xfd, 0x27, 0xcc, 0xba, 0x1e, 0xcb, 0xd7, 0x3d, 0xd4, 0x29, - 0x00, 0xfd, 0x28, 0x44, 0xfb, 0xf2, 0xf3, 0xb6, 0x4f, 0xcf, 0x09, 0xf0, - 0xfa, 0x45, 0x41, 0x49, 0x05, 0xc5, 0x17, 0x5d, 0x64, 0x00, 0xf8, 0xee, - 0x48, 0x17, 0xf4, 0xe9, 0x2e, 0x4b, 0x2e, 0x3f, 0xdf, 0xee, 0xe4, 0x08, - 0x38, 0xf1, 0x16, 0x13, 0x2f, 0x2a, 0xed, 0xc2, 0xbf, 0x36, 0xf4, 0x02, - 0xcf, 0xaa, 0xd2, 0xfa, 0xac, 0x13, 0xf6, 0xe8, 0xb5, 0x68, 0x12, 0xb6, - 0xce, 0x0e, 0xdf, 0x58, 0xe4, 0x49, 0x14, 0x15, 0x03, 0xed, 0xfa, 0xd4, - 0x40, 0xa7, 0xf6, 0xca, 0xfb, 0x00, 0x4d, 0x5e, 0xe4, 0x55, 0x1d, 0x30, - 0x45, 0xe2, 0xfc, 0x01, 0x48, 0x81, 0xe9, 0xf1, 0x1e, 0xfc, 0x21, 0x32, - 0xed, 0x4b, 0xed, 0xfa, 0x2f, 0xd2, 0xfa, 0xfb, 0x4d, 0xa7, 0xed, 0xc7, - 0x92, 0xdf, 0xe6, 0xdb, 0xf8, 0x1f, 0xd9, 0xfa, 0x91, 0xf5, 0xe5, 0xc5, - 0x8c, 0x17, 0x0f, 0xb9, 0xd2, 0xc7, 0xfe, 0x68, 0xd3, 0x51, 0x2e, 0x49, - 0x1f, 0xbd, 0x01, 0xeb, 0x31, 0x17, 0xf0, 0xef, 0xff, 0xb8, 0x5d, 0x62, - 0x02, 0x0f, 0x1f, 0x78, 0x6a, 0xb0, 0xf9, 0xfe, 0x4f, 0xcc, 0xd3, 0xff, - 0x0a, 0x96, 0x1e, 0x2c, 0xed, 0xbc, 0xf4, 0x0b, 0x42, 0xc8, 0xf1, 0xea, - 0x6e, 0x58, 0xec, 0xc4, 0x99, 0xae, 0xdc, 0xd7, 0x12, 0x87, 0xd8, 0x06, - 0xa2, 0xc2, 0xe6, 0xa2, 0x81, 0x24, 0xe9, 0xac, 0xce, 0xb6, 0x15, 0x6b, - 0xba, 0x00, 0x19, 0x58, 0x29, 0xb6, 0xfe, 0x01, 0x25, 0x96, 0xd2, 0xec, - 0x0e, 0x9c, 0x60, 0x5f, 0xe9, 0xf4, 0xf5, 0x69, 0x6b, 0xb5, 0xe1, 0xf6, - 0x5e, 0xb7, 0xb1, 0xe5, 0x11, 0x9b, 0x18, 0x10, 0xe3, 0xe1, 0xe0, 0x0d, - 0x4f, 0xa5, 0xde, 0xe5, 0x6f, 0xe2, 0xfb, 0x99, 0x82, 0xa5, 0xc9, 0xb6, - 0x1f, 0x46, 0xf3, 0x04, 0xc6, 0xca, 0xd6, 0x97, 0x90, 0x1d, 0xc0, 0x95, - 0xf0, 0x19, 0x30, 0x77, 0xc2, 0x3c, 0xfa, 0x24, 0x02, 0x4d, 0x06, 0x07, - 0x15, 0x02, 0xb0, 0xe7, 0x27, 0x22, 0x67, 0x4d, 0xf1, 0xc2, 0xf4, 0x64, - 0x38, 0x40, 0xdf, 0xf6, 0x3a, 0x43, 0xb8, 0xe1, 0x0d, 0x15, 0x11, 0xfe, - 0xf5, 0xec, 0xf9, 0xe5, 0x22, 0x36, 0xe4, 0xfd, 0x6d, 0xbf, 0x0d, 0x8e, - 0xb7, 0x15, 0xbf, 0x9f, 0x16, 0xad, 0x0a, 0x02, 0x8e, 0x14, 0xda, 0x9b, - 0x8e, 0xc3, 0xa6, 0xca, 0xf5, 0x7f, 0x51, 0x56, 0xc1, 0xb3, 0xd9, 0x35, - 0xf8, 0x7f, 0x04, 0x0a, 0x03, 0x3f, 0xbe, 0xee, 0x19, 0x68, 0x78, 0x50, - 0xf9, 0xa7, 0xf7, 0x7f, 0x1d, 0x76, 0xdb, 0xe8, 0x33, 0xb9, 0xd7, 0xe7, - 0xe8, 0x69, 0x15, 0xf7, 0xf5, 0xb2, 0xfe, 0xe8, 0xf3, 0x5b, 0xe2, 0x06, - 0x6e, 0x09, 0x36, 0xb7, 0xcc, 0x38, 0xbf, 0x8a, 0x28, 0x14, 0x2e, 0x18, - 0xa7, 0x26, 0xcb, 0xb2, 0x95, 0x37, 0xac, 0xcd, 0xd7, 0x51, 0x67, 0x44, - 0xcd, 0x31, 0xde, 0x04, 0xe9, 0x6a, 0x00, 0x13, 0x0a, 0x0c, 0xdd, 0x16, - 0xe0, 0x24, 0x7e, 0x49, 0xf1, 0xb5, 0x04, 0x52, 0x01, 0x50, 0xdd, 0xf5, - 0x26, 0xc9, 0xf4, 0xf8, 0xd6, 0x31, 0x1b, 0xd0, 0xef, 0x03, 0x0a, 0xc0, - 0xd4, 0x4f, 0xe2, 0xfd, 0x72, 0xf4, 0x5a, 0xc9, 0xd7, 0x31, 0xc0, 0x8e, - 0x17, 0x5e, 0x57, 0x00, 0xb4, 0x3a, 0xc8, 0xd2, 0x92, 0x32, 0xcb, 0xd8, - 0xc3, 0xa6, 0x63, 0x26, 0xcf, 0xbc, 0xe8, 0x57, 0x9b, 0xe9, 0xf7, 0x1c, - 0xea, 0x12, 0xf1, 0xf7, 0xdb, 0xb9, 0x7f, 0x16, 0xf6, 0xe0, 0x08, 0x70, - 0xa2, 0xed, 0xcc, 0xf1, 0x1e, 0x10, 0x04, 0xf7, 0xa9, 0xb7, 0x34, 0xaa, - 0x0a, 0xdb, 0x2a, 0xa6, 0xb6, 0x10, 0xea, 0xf8, 0x5e, 0x06, 0x72, 0xdd, - 0xd0, 0xb9, 0xd6, 0xa0, 0x10, 0x9f, 0x5a, 0x17, 0xb1, 0xe7, 0xc0, 0x01, - 0x9d, 0x01, 0xe0, 0xe0, 0xaf, 0x9c, 0x46, 0xd8, 0xaf, 0xe8, 0xce, 0x02, - 0x8a, 0xbb, 0xe4, 0xf6, 0xf3, 0x36, 0x07, 0xca, 0xcb, 0x87, 0x6e, 0xcc, - 0xd6, 0x9e, 0x0a, 0x2a, 0x81, 0xd7, 0xcf, 0xc0, 0x04, 0xeb, 0x24, 0xcc, - 0xc9, 0x95, 0x33, 0x81, 0xf7, 0xad, 0x1c, 0x9c, 0xa4, 0xd6, 0xf9, 0xe6, - 0x3d, 0x84, 0x7f, 0xcc, 0xd4, 0xb0, 0xf4, 0xa2, 0xe9, 0x3c, 0x36, 0xee, - 0xd5, 0xcf, 0xcd, 0x2d, 0x28, 0xbd, 0xff, 0xff, 0xc2, 0xbf, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x31, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x48, 0xbd, 0xff, 0xff, 0x4c, 0xbd, 0xff, 0xff, 0xe6, 0xbf, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x8a, 0xfe, 0xff, 0xff, - 0xa9, 0x00, 0x00, 0x00, 0xd0, 0xff, 0xff, 0xff, 0xd0, 0x00, 0x00, 0x00, - 0x52, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4f, 0xfb, 0xff, 0xff, - 0x4a, 0xfd, 0xff, 0xff, 0x12, 0xc0, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x80, 0x3e, 0x00, 0x00, 0xff, 0xf9, 0xfd, 0x0a, 0x07, 0x08, 0x07, 0x03, - 0x07, 0xf2, 0xd1, 0x09, 0xf0, 0xe9, 0x28, 0x09, 0xdf, 0x05, 0xfa, 0xf0, - 0xe8, 0xe3, 0x13, 0x0e, 0x08, 0xef, 0xd3, 0xee, 0x0f, 0xe8, 0xeb, 0x14, - 0xf7, 0xed, 0xfd, 0x1f, 0xe8, 0xd5, 0xeb, 0xfc, 0x0e, 0xf4, 0xf7, 0x07, - 0x05, 0xea, 0xf6, 0x1f, 0xf8, 0xdb, 0xdc, 0x0b, 0x03, 0xdd, 0xd8, 0xf3, - 0x0f, 0x19, 0xe1, 0x09, 0xfc, 0xe4, 0x02, 0x04, 0xf1, 0x04, 0xeb, 0xf3, - 0x1e, 0x06, 0xfd, 0x11, 0xfc, 0xfa, 0xf6, 0x1f, 0x0f, 0x02, 0xf5, 0xf7, - 0xff, 0x24, 0xdf, 0xf7, 0xf8, 0xf3, 0xf6, 0xe9, 0xef, 0x03, 0xdd, 0xf2, - 0x28, 0xe1, 0xf2, 0x22, 0xf4, 0x09, 0xf7, 0xf9, 0xf0, 0xd4, 0xf9, 0xee, - 0xff, 0x14, 0xda, 0xf3, 0x11, 0xe2, 0xf6, 0x0c, 0xf2, 0xeb, 0xf8, 0xe8, - 0xe3, 0x08, 0x02, 0x17, 0xf4, 0x0b, 0x0c, 0x27, 0xe6, 0x02, 0x03, 0xf9, - 0x14, 0x18, 0xf6, 0xeb, 0x1f, 0x0c, 0xf1, 0xee, 0xfc, 0x08, 0xf0, 0xfe, - 0xfd, 0xee, 0x17, 0xfd, 0x1c, 0xef, 0xfd, 0xde, 0x04, 0x05, 0xf0, 0x31, - 0xfa, 0x0b, 0xdc, 0x0d, 0xed, 0xf5, 0xfa, 0xf4, 0x08, 0x0c, 0xd7, 0x1e, - 0x15, 0x03, 0xf5, 0x02, 0xf4, 0xfb, 0xed, 0x01, 0xfe, 0xd6, 0x1f, 0xfd, - 0xfd, 0x0e, 0xfa, 0x06, 0xf1, 0xf9, 0xe2, 0x16, 0xe9, 0xf1, 0x03, 0x0d, - 0x0d, 0xdf, 0xf9, 0x1a, 0x0e, 0xf6, 0xfc, 0x0a, 0x19, 0xe2, 0xe0, 0x09, - 0x15, 0xf0, 0xf1, 0x06, 0xf1, 0xe1, 0xef, 0x1a, 0x08, 0xe8, 0xfd, 0x12, - 0x14, 0x06, 0xf1, 0xfc, 0xea, 0xfb, 0xf7, 0xea, 0x1d, 0x09, 0xfa, 0xf6, - 0x08, 0xf2, 0xe7, 0xf8, 0xfc, 0x16, 0xf5, 0x0e, 0x08, 0xf9, 0x0a, 0x03, - 0x26, 0xd8, 0x02, 0xf5, 0xf6, 0xf6, 0xef, 0x1f, 0xe4, 0xe2, 0xfb, 0x02, - 0x1b, 0xe6, 0xde, 0x00, 0xf2, 0xed, 0xfb, 0x18, 0xe4, 0x16, 0x1a, 0x1d, - 0xf1, 0xf6, 0xea, 0x16, 0x05, 0xde, 0xfb, 0x18, 0xf5, 0xe4, 0xfe, 0xe2, - 0x1b, 0x1c, 0x0c, 0xe8, 0x02, 0xee, 0xfb, 0x07, 0x24, 0xf2, 0xe9, 0xfa, - 0x0d, 0x05, 0xf1, 0x03, 0xfe, 0xf6, 0x19, 0x06, 0xff, 0xf9, 0x04, 0xfb, - 0x15, 0xef, 0xf1, 0xf8, 0xe9, 0xe1, 0x10, 0x04, 0xfc, 0xe6, 0x1f, 0xed, - 0x0b, 0xef, 0x00, 0x1e, 0xe6, 0x16, 0xf3, 0x09, 0xfd, 0x08, 0x08, 0x06, - 0x06, 0x23, 0xdf, 0xfc, 0x08, 0xf4, 0xea, 0x0c, 0xf2, 0xe6, 0x18, 0xf5, - 0x02, 0xf9, 0x50, 0x09, 0x01, 0xda, 0x0b, 0x05, 0x12, 0x18, 0xef, 0x04, - 0x0e, 0xd9, 0xff, 0xdc, 0xf6, 0x16, 0xf9, 0xf4, 0xec, 0xff, 0xea, 0xe6, - 0xfa, 0x0a, 0xed, 0xef, 0x02, 0xf0, 0x25, 0x21, 0xf1, 0x26, 0xf5, 0xed, - 0x09, 0xea, 0xea, 0x24, 0xfa, 0x11, 0xfc, 0xdf, 0xf3, 0x0a, 0x28, 0x0c, - 0x19, 0xff, 0xf5, 0xd6, 0x0e, 0xe2, 0x2a, 0x06, 0xfa, 0x03, 0xf9, 0xe6, - 0xef, 0x23, 0xf9, 0xfa, 0xe6, 0xfe, 0xfc, 0x03, 0x06, 0x1a, 0xf9, 0x08, - 0xe0, 0xe5, 0xff, 0x05, 0x01, 0xe7, 0x12, 0x02, 0x1d, 0x05, 0x03, 0x05, - 0x0b, 0xee, 0xed, 0xfc, 0x0f, 0xf3, 0x02, 0xe0, 0x15, 0xdf, 0x02, 0xed, - 0x10, 0x26, 0xef, 0x0d, 0x06, 0xee, 0xef, 0xf6, 0xeb, 0x11, 0x09, 0xf4, - 0xf7, 0x06, 0x0f, 0x01, 0x2a, 0x0b, 0x01, 0xdd, 0xfc, 0xf4, 0xf1, 0x17, - 0x03, 0x04, 0x07, 0xfc, 0x22, 0xfc, 0xde, 0xfe, 0x0b, 0x03, 0xf3, 0xfb, - 0x0c, 0x25, 0x04, 0x19, 0x04, 0x03, 0x01, 0xfa, 0xfb, 0xf7, 0xf6, 0x0e, - 0x15, 0x0e, 0x09, 0xff, 0x06, 0xfa, 0xfb, 0x1e, 0xfb, 0x05, 0x22, 0xf9, - 0xfe, 0xf7, 0x1d, 0xed, 0xdf, 0x18, 0x09, 0xeb, 0xef, 0x04, 0x12, 0xea, - 0xdf, 0xfb, 0xda, 0xf6, 0xdf, 0x17, 0xef, 0xef, 0xe1, 0x1a, 0xd9, 0xe2, - 0xe2, 0xfc, 0x05, 0x11, 0xf6, 0xee, 0xe8, 0xf2, 0xe1, 0x08, 0x26, 0x04, - 0xed, 0x03, 0xe0, 0xfb, 0xee, 0x0c, 0xee, 0xf6, 0x04, 0x2d, 0xf2, 0xd3, - 0xf4, 0xe0, 0xf8, 0x0c, 0xfe, 0x11, 0x0b, 0xd7, 0xfd, 0x18, 0x07, 0x0d, - 0x07, 0x08, 0xf4, 0xc6, 0x0a, 0x0a, 0x1f, 0x0c, 0xf4, 0x1d, 0x02, 0x0b, - 0x09, 0x0e, 0x21, 0xff, 0x17, 0x0b, 0x0d, 0xf2, 0xed, 0xd7, 0x0a, 0xf8, - 0x03, 0x06, 0xfa, 0xe5, 0xfd, 0x03, 0x14, 0x0f, 0xe9, 0x1a, 0xf4, 0xda, - 0x01, 0xe6, 0x09, 0x06, 0x11, 0x0d, 0xfd, 0xeb, 0x16, 0x23, 0xfa, 0x00, - 0x0b, 0x17, 0xf7, 0xda, 0xd7, 0x1b, 0xfa, 0x01, 0x03, 0x05, 0xfe, 0xd6, - 0x02, 0xee, 0xee, 0x02, 0xf3, 0x06, 0xed, 0x03, 0xec, 0x01, 0xf2, 0x0f, - 0x05, 0x17, 0x0b, 0xfb, 0x0f, 0x05, 0x03, 0x13, 0xff, 0x06, 0x02, 0xf5, - 0xf4, 0x18, 0x2b, 0xf0, 0x00, 0x17, 0xfc, 0xfd, 0x05, 0x0b, 0x0e, 0x14, - 0xe1, 0x24, 0x08, 0x24, 0xe6, 0xeb, 0x21, 0x12, 0xfb, 0x12, 0xe7, 0xf4, - 0xe8, 0x0e, 0x18, 0xee, 0xf5, 0xf3, 0xd9, 0xf3, 0xdb, 0xec, 0x0c, 0x1e, - 0xcf, 0x14, 0xdb, 0xe3, 0xdc, 0x02, 0x0c, 0xfb, 0xdb, 0x1b, 0xd0, 0xfe, - 0xf9, 0xfe, 0x2a, 0xf5, 0x00, 0x0b, 0xcd, 0xe0, 0xe2, 0x0e, 0x04, 0xf8, - 0xda, 0x1c, 0xe5, 0x0f, 0xe8, 0xf4, 0xf7, 0x15, 0x06, 0xf8, 0x02, 0xf7, - 0x0f, 0xfb, 0x17, 0xf9, 0xda, 0x01, 0xda, 0xd1, 0xf6, 0x02, 0xfd, 0x16, - 0xf1, 0xe4, 0xfa, 0x07, 0xee, 0x0a, 0xf3, 0xfd, 0xf2, 0x23, 0xf0, 0xe1, - 0x0a, 0x1a, 0x12, 0x1f, 0xef, 0x27, 0x09, 0xf1, 0x0c, 0x13, 0x23, 0xfd, - 0xf5, 0x03, 0xfe, 0x09, 0xfd, 0x16, 0xf8, 0x07, 0x08, 0x25, 0x08, 0xf8, - 0xf6, 0x0a, 0xf1, 0xf5, 0x07, 0x09, 0x05, 0xcc, 0xf8, 0x08, 0x13, 0xf9, - 0x1d, 0x11, 0x0f, 0xdc, 0xee, 0xf3, 0x27, 0xf9, 0xf9, 0x22, 0xfa, 0x0d, - 0xe2, 0x13, 0xfb, 0x11, 0x03, 0x1e, 0xff, 0xfb, 0xed, 0xf1, 0x0e, 0x0b, - 0x0f, 0x00, 0x06, 0xe0, 0x15, 0xf3, 0x13, 0xfc, 0x18, 0xf9, 0xff, 0x09, - 0xfa, 0x1f, 0x12, 0xe5, 0xe2, 0x06, 0xf9, 0xf4, 0x07, 0x15, 0x0b, 0x04, - 0xdb, 0x0d, 0xeb, 0xf3, 0xe6, 0x06, 0xe5, 0xee, 0xd8, 0x22, 0xd8, 0x10, - 0xea, 0xf9, 0x1c, 0xf7, 0xd3, 0x11, 0xc3, 0xf8, 0xde, 0x05, 0x00, 0xe6, - 0x07, 0xfd, 0xd3, 0x03, 0xea, 0xe0, 0x13, 0x14, 0xcf, 0xeb, 0xcd, 0xd3, - 0xde, 0xf5, 0xf0, 0x0c, 0x0c, 0xfa, 0xeb, 0xd3, 0xfb, 0xfd, 0x08, 0xf9, - 0xf4, 0x10, 0xfa, 0xd3, 0xf4, 0x11, 0x11, 0xf8, 0xef, 0xf8, 0xf8, 0xf1, - 0xfc, 0xe1, 0xf7, 0x12, 0x04, 0xf4, 0xfb, 0xed, 0xef, 0x0c, 0xfd, 0x1c, - 0xfe, 0x0e, 0xfd, 0xe2, 0xfe, 0x0a, 0x02, 0xfe, 0xe6, 0x1f, 0xef, 0xe5, - 0xe6, 0xf8, 0x16, 0x27, 0xe8, 0x20, 0x05, 0xe3, 0xf1, 0xef, 0xee, 0xed, - 0x0d, 0x11, 0x16, 0xfb, 0xf3, 0xff, 0x14, 0x01, 0xff, 0x15, 0x10, 0x02, - 0xe5, 0x28, 0x29, 0x13, 0x13, 0x16, 0xe6, 0x00, 0xd2, 0x26, 0xfd, 0x03, - 0x04, 0x05, 0x07, 0x06, 0xf1, 0x0e, 0x05, 0x0d, 0xe2, 0x0f, 0x02, 0xe1, - 0x07, 0xf7, 0x1c, 0xfa, 0x14, 0x30, 0xf7, 0xee, 0x00, 0xfa, 0x3d, 0x06, - 0x1c, 0x04, 0x06, 0x07, 0x05, 0x1a, 0x10, 0xf6, 0xee, 0x0a, 0xeb, 0x04, - 0xeb, 0xdf, 0x1d, 0x09, 0xd5, 0xe8, 0xd6, 0xf4, 0xf0, 0x0f, 0x1d, 0xea, - 0xf2, 0xf8, 0xa6, 0x0b, 0xdc, 0x09, 0x08, 0x24, 0xee, 0x24, 0xaa, 0xe4, - 0xcb, 0x15, 0xef, 0xe7, 0xe9, 0x0c, 0xcf, 0x06, 0xe3, 0x12, 0x11, 0x00, - 0x07, 0x14, 0xd7, 0xde, 0xf6, 0x0f, 0x0b, 0x04, 0xfb, 0x0d, 0xf8, 0x0d, - 0xf6, 0x1b, 0xf1, 0x21, 0xdd, 0xfc, 0xf4, 0xe9, 0xf8, 0xe8, 0xf7, 0x06, - 0x03, 0x1e, 0xce, 0xe1, 0xea, 0xf6, 0x05, 0xf9, 0x16, 0x15, 0x04, 0xe0, - 0x14, 0xf7, 0x1e, 0x1c, 0x0a, 0x27, 0xef, 0xf3, 0x0f, 0xf3, 0xee, 0x04, - 0xf8, 0xf1, 0x07, 0xe3, 0x05, 0x0b, 0x00, 0x1c, 0x15, 0x27, 0x07, 0xf7, - 0xfa, 0x0b, 0xfa, 0xfa, 0x17, 0x13, 0xe1, 0xf5, 0xfb, 0x0c, 0x21, 0x2f, - 0xd7, 0xfb, 0xf5, 0xfd, 0xd3, 0xf4, 0x07, 0x0e, 0xfd, 0x0b, 0xfc, 0xfa, - 0xf5, 0x0e, 0x02, 0xfa, 0xfa, 0x19, 0xfd, 0xfa, 0xfc, 0x13, 0x24, 0x0c, - 0xe4, 0x31, 0xf8, 0x12, 0xf4, 0x04, 0x18, 0x29, 0x27, 0x19, 0xfc, 0x08, - 0x11, 0xe3, 0x07, 0xfe, 0x26, 0x40, 0x05, 0x02, 0x04, 0x02, 0x0f, 0xee, - 0xf4, 0x27, 0xea, 0xf4, 0xf5, 0x11, 0x26, 0x0b, 0xe7, 0x05, 0xd2, 0xf6, - 0xea, 0xfa, 0x0b, 0xf9, 0xfa, 0x16, 0xba, 0x00, 0xfb, 0x0d, 0x0b, 0xf9, - 0xe6, 0xf6, 0xc5, 0xf8, 0xf6, 0x01, 0x0f, 0xed, 0xed, 0x13, 0xcd, 0x0d, - 0xda, 0x06, 0x17, 0xee, 0x07, 0x1d, 0xb8, 0xfa, 0xe2, 0xea, 0xf2, 0xee, - 0x04, 0x00, 0xdc, 0xd0, 0xfb, 0xf5, 0xec, 0xfe, 0xf1, 0x0d, 0xf0, 0xdb, - 0xf9, 0x0d, 0x03, 0x03, 0x0e, 0x0a, 0xda, 0xd6, 0x01, 0xf2, 0x06, 0x14, - 0x1c, 0x1f, 0xe8, 0xe8, 0x0e, 0xfd, 0x0c, 0xf5, 0xf3, 0x3d, 0xf3, 0x05, - 0x10, 0xfa, 0x1b, 0x18, 0x08, 0x36, 0x09, 0xf1, 0xeb, 0xf9, 0x22, 0x01, - 0xf3, 0xf7, 0xff, 0xf0, 0x0c, 0xe9, 0x01, 0x29, 0x21, 0x15, 0x03, 0xee, - 0xe9, 0x1a, 0xf7, 0x15, 0x06, 0x25, 0xfa, 0xf0, 0xe4, 0xf1, 0x1f, 0x01, - 0xdc, 0x2d, 0xce, 0xe9, 0xea, 0x0b, 0x06, 0x2c, 0x0a, 0x30, 0xe7, 0x09, - 0xf4, 0xf0, 0x10, 0x29, 0xf9, 0x3d, 0xe7, 0xdc, 0xe4, 0xf7, 0x3b, 0x27, - 0x23, 0x3a, 0x0a, 0x06, 0x0e, 0xfd, 0x2c, 0x07, 0x2b, 0x1c, 0xfa, 0x00, - 0xf9, 0x11, 0xea, 0x14, 0xeb, 0xfc, 0x18, 0x03, 0xf1, 0x16, 0x12, 0x04, - 0xcf, 0x12, 0xdd, 0xe4, 0x0e, 0xf0, 0x09, 0xe8, 0xf3, 0xfb, 0xa8, 0xf9, - 0xee, 0xfb, 0x1e, 0x1d, 0xfd, 0x05, 0xab, 0xe5, 0xff, 0x01, 0xfe, 0x04, - 0xf9, 0x02, 0xb9, 0xdc, 0xdf, 0x05, 0xf1, 0xef, 0xf1, 0x1e, 0xc7, 0xee, - 0xf7, 0x1e, 0x00, 0x00, 0xf8, 0x10, 0xec, 0xe8, 0x04, 0x0f, 0xf6, 0xff, - 0x04, 0x09, 0xe0, 0x0a, 0x0e, 0xe4, 0xf0, 0xf1, 0x16, 0x2b, 0xd3, 0xe1, - 0x0a, 0xef, 0xf9, 0xfe, 0x0b, 0x22, 0xf5, 0x01, 0x0a, 0xf8, 0x02, 0x00, - 0x17, 0x19, 0xf3, 0x05, 0x21, 0xfa, 0xee, 0xee, 0x12, 0xf2, 0xfa, 0xf5, - 0x05, 0x12, 0xee, 0xe4, 0x28, 0xfa, 0xf1, 0x03, 0x15, 0x16, 0x18, 0xfd, - 0x0f, 0x21, 0x04, 0xf4, 0xe5, 0x0c, 0x06, 0x13, 0xde, 0x36, 0xe8, 0xfb, - 0xe7, 0xfd, 0xf6, 0x12, 0x0e, 0x1d, 0xea, 0xf8, 0xd4, 0xe8, 0x19, 0x07, - 0xe5, 0x1c, 0xf7, 0x0c, 0xef, 0x05, 0x0f, 0x09, 0xdd, 0x1a, 0xea, 0xd7, - 0xf9, 0xf9, 0x12, 0x17, 0x2e, 0x10, 0x08, 0xfe, 0x14, 0xf5, 0x1d, 0xfa, - 0x06, 0x33, 0xed, 0xfe, 0xf7, 0x11, 0xf0, 0x15, 0xe2, 0x24, 0xf6, 0x0a, - 0xe2, 0xfc, 0x23, 0x12, 0xdd, 0x11, 0xfd, 0xe5, 0x08, 0xff, 0x15, 0xf6, - 0xf1, 0x1b, 0xae, 0xfe, 0xe6, 0x15, 0x2c, 0x2d, 0x15, 0x15, 0xc5, 0xf8, - 0xea, 0xe7, 0x07, 0x04, 0xfe, 0x28, 0xa1, 0xf2, 0xe1, 0xf9, 0xf8, 0xff, - 0xf4, 0x22, 0xb4, 0xdb, 0x03, 0x20, 0xe6, 0xf3, 0x0e, 0x19, 0xe3, 0x0a, - 0xfa, 0xee, 0xf3, 0xe5, 0xd8, 0xf9, 0xf1, 0xde, 0x06, 0x05, 0xf2, 0xf5, - 0xe7, 0x16, 0xd8, 0xfe, 0x07, 0xea, 0xee, 0x0e, 0xfa, 0xff, 0xdb, 0xe7, - 0x03, 0xed, 0x01, 0xfd, 0x09, 0x1a, 0xfa, 0xe6, 0x05, 0x10, 0xe9, 0x01, - 0x1f, 0x13, 0xf7, 0xf6, 0xfb, 0x13, 0xff, 0xdb, 0xed, 0xfe, 0x0a, 0x10, - 0x09, 0x29, 0xf5, 0x04, 0xf5, 0x26, 0x0d, 0x0c, 0xf9, 0x16, 0xfa, 0x02, - 0xf4, 0x2e, 0xde, 0xf5, 0xe1, 0x1d, 0xfb, 0x02, 0x0b, 0x23, 0x07, 0xea, - 0xd9, 0x0a, 0xf3, 0x0a, 0x0f, 0x1e, 0xe7, 0xf1, 0xd7, 0x0b, 0xf6, 0xff, - 0x0d, 0x24, 0xcc, 0x0a, 0xee, 0xda, 0x14, 0x12, 0x11, 0x29, 0xf4, 0x1a, - 0xef, 0x0b, 0xfa, 0xec, 0x0c, 0x1b, 0xf4, 0xff, 0xf5, 0xef, 0x0f, 0x10, - 0xd4, 0x04, 0xf9, 0xf8, 0xec, 0xf9, 0x21, 0x05, 0xd3, 0x27, 0xf3, 0x17, - 0xff, 0xf6, 0x15, 0xf9, 0xed, 0x0a, 0xac, 0x02, 0xfd, 0xfb, 0x04, 0x29, - 0x06, 0x03, 0xb8, 0xe6, 0xd5, 0x17, 0x09, 0x1b, 0xf6, 0x1b, 0xab, 0xdc, - 0xdf, 0xfd, 0x06, 0x09, 0x09, 0x37, 0xbb, 0xed, 0x19, 0xd7, 0xe2, 0xdd, - 0x05, 0x01, 0xec, 0xfb, 0xe4, 0x0e, 0xeb, 0xf0, 0x03, 0x17, 0x04, 0xeb, - 0x09, 0xee, 0xeb, 0xe7, 0x0c, 0x16, 0xcb, 0x0e, 0x17, 0xd8, 0xe1, 0xf8, - 0x2b, 0x19, 0xde, 0xeb, 0x10, 0xf2, 0xff, 0xf8, 0xee, 0x0e, 0xe7, 0xf0, - 0x15, 0x08, 0xf8, 0xdf, 0x06, 0x0d, 0xf9, 0x14, 0xfa, 0x0b, 0x04, 0xfd, - 0x15, 0x23, 0x20, 0xff, 0xfd, 0x1d, 0x0c, 0xf1, 0xfe, 0x15, 0x0a, 0x02, - 0xed, 0xfe, 0xfb, 0x04, 0xfb, 0x1e, 0xdd, 0x05, 0xe0, 0x16, 0xf9, 0xf6, - 0xfd, 0x32, 0xdc, 0xf2, 0xd3, 0x08, 0xf4, 0xec, 0x17, 0x25, 0xe2, 0xf0, - 0xee, 0xf1, 0x0d, 0xfe, 0x13, 0x2d, 0x01, 0x11, 0xd4, 0xe4, 0x07, 0xfb, - 0x32, 0x11, 0x14, 0x07, 0xd7, 0x02, 0x10, 0xeb, 0x2b, 0x1d, 0x01, 0xfc, - 0xf3, 0xf0, 0x13, 0x1a, 0xdb, 0x20, 0x00, 0xf0, 0xf0, 0x05, 0x16, 0x03, - 0xd4, 0xe3, 0xc2, 0xf0, 0x06, 0x02, 0x1e, 0x0a, 0xec, 0x1f, 0xab, 0xea, - 0xfa, 0xe3, 0x20, 0x22, 0x03, 0x1b, 0xb3, 0x0e, 0xe3, 0xf3, 0x1d, 0x27, - 0xe3, 0x10, 0xa7, 0xda, 0xf3, 0x00, 0x0a, 0x0a, 0x04, 0xfb, 0xb2, 0x0f, - 0x0c, 0xf5, 0x07, 0xff, 0x13, 0x1e, 0xdb, 0xf6, 0xf9, 0xef, 0xe8, 0xe7, - 0xfb, 0x18, 0xeb, 0xec, 0x09, 0xda, 0xf1, 0xf0, 0x0b, 0x04, 0xe1, 0xfa, - 0x1c, 0x25, 0xee, 0x01, 0x0b, 0x29, 0xd7, 0x0c, 0x04, 0x0b, 0xef, 0xfd, - 0x1c, 0xfc, 0xf1, 0xfb, 0x0b, 0x0f, 0xdf, 0xed, 0x17, 0x38, 0x0c, 0xd7, - 0xff, 0xfd, 0x01, 0xfc, 0xfb, 0xfb, 0x18, 0x1a, 0x18, 0xe3, 0xf9, 0xf4, - 0xfa, 0x20, 0x06, 0x09, 0x11, 0x08, 0x1d, 0xf8, 0xfa, 0x1d, 0xf5, 0x1c, - 0xf5, 0xfe, 0x03, 0x07, 0xe4, 0x33, 0xc8, 0x0c, 0xe1, 0x13, 0xff, 0xe5, - 0x10, 0x2c, 0xd3, 0xf0, 0xed, 0x04, 0x07, 0x01, 0xf1, 0x16, 0xe0, 0x13, - 0xfa, 0x11, 0x07, 0xfa, 0x19, 0x16, 0x01, 0x00, 0x07, 0x26, 0x00, 0xec, - 0x1d, 0x23, 0x05, 0xf4, 0x07, 0x17, 0x2c, 0x1d, 0xee, 0xf0, 0x0c, 0x09, - 0xe3, 0x1a, 0x24, 0x0b, 0xf3, 0x1e, 0xce, 0xfe, 0xfe, 0x12, 0x21, 0x1a, - 0xf6, 0x23, 0xc3, 0x03, 0xf4, 0x10, 0x1a, 0x2a, 0xf4, 0x08, 0xbf, 0xff, - 0x04, 0xf4, 0x0b, 0x1d, 0x1a, 0xf8, 0xcc, 0x00, 0xf7, 0x13, 0xf4, 0xfd, - 0xf4, 0x19, 0xbd, 0xef, 0x0c, 0x0d, 0x02, 0xfc, 0x12, 0x13, 0xe9, 0xe7, - 0xf5, 0xfa, 0xfa, 0xf6, 0x1a, 0x2e, 0xce, 0xd4, 0x01, 0x12, 0xfd, 0xfc, - 0x26, 0x10, 0xcc, 0xe7, 0xee, 0x13, 0xee, 0xff, 0xef, 0xea, 0x00, 0x0e, - 0x1a, 0x17, 0x04, 0x0c, 0x04, 0x0c, 0xe6, 0xf3, 0xf6, 0xdb, 0xdd, 0x04, - 0xf4, 0x22, 0x11, 0x16, 0xf3, 0x07, 0xec, 0xf8, 0xf2, 0x07, 0x03, 0x02, - 0xf5, 0x0a, 0xf6, 0x02, 0x1d, 0x1b, 0x11, 0x06, 0xf8, 0x06, 0x02, 0xea, - 0xf3, 0x1d, 0xce, 0x00, 0xed, 0xf9, 0xef, 0xf6, 0xec, 0x22, 0xc7, 0xf0, - 0xed, 0xdb, 0xe0, 0x02, 0x11, 0x07, 0xe8, 0xf0, 0xd1, 0xed, 0xff, 0xfd, - 0x0c, 0x2e, 0xd4, 0xed, 0xec, 0x0e, 0xf1, 0x07, 0x01, 0x0e, 0x0e, 0xfe, - 0xda, 0x0b, 0x0a, 0x0a, 0x1f, 0x2e, 0x13, 0x07, 0x00, 0x07, 0x14, 0x21, - 0xe9, 0xfc, 0xf0, 0x1e, 0xd7, 0xea, 0x34, 0x07, 0xc6, 0x0c, 0xd4, 0xec, - 0xfd, 0x06, 0x24, 0x0a, 0xf3, 0x15, 0xaf, 0xff, 0xe9, 0xf1, 0x0d, 0x3e, - 0xe9, 0x18, 0xba, 0x13, 0xed, 0xd7, 0x0b, 0x31, 0x05, 0x0e, 0xaf, 0x13, - 0xd6, 0x0e, 0x10, 0x02, 0x02, 0x14, 0xcb, 0xd5, 0xf9, 0x0c, 0xf9, 0x0e, - 0x1f, 0x24, 0xd5, 0xeb, 0xff, 0xf1, 0xf5, 0x0c, 0x08, 0x07, 0xf4, 0xd7, - 0x06, 0x10, 0xe8, 0xef, 0xfc, 0x2f, 0xee, 0xf1, 0x18, 0xf8, 0xf4, 0x02, - 0x11, 0x21, 0xd3, 0x12, 0x14, 0xe4, 0xf4, 0x02, 0x05, 0x24, 0xca, 0xf2, - 0xf3, 0xeb, 0xe7, 0xf8, 0x16, 0x1a, 0xeb, 0x0d, 0x05, 0x16, 0xf1, 0xec, - 0x11, 0x1c, 0x09, 0x1e, 0xe0, 0xe6, 0xfa, 0x0e, 0x0d, 0x2a, 0xea, 0x2e, - 0xed, 0xf9, 0xf7, 0x16, 0x09, 0x05, 0xdd, 0xd6, 0x02, 0xeb, 0xf5, 0xf3, - 0xe4, 0x3b, 0xed, 0x04, 0xe0, 0x0e, 0xfd, 0x09, 0xfd, 0x35, 0xdc, 0x18, - 0xf3, 0x04, 0xfa, 0x05, 0x15, 0x34, 0xe5, 0xe1, 0xe4, 0xf4, 0xe0, 0xf9, - 0x08, 0x32, 0x04, 0x08, 0xf4, 0x0f, 0xff, 0x08, 0x09, 0x2f, 0x06, 0x02, - 0xfd, 0x05, 0x0c, 0x24, 0xe3, 0x1e, 0xf5, 0x0c, 0xdd, 0xf8, 0x18, 0x20, - 0xd8, 0x14, 0xef, 0xf4, 0x17, 0x08, 0x25, 0x14, 0x04, 0x06, 0xb0, 0xf5, - 0xf5, 0x09, 0x0f, 0x3e, 0xff, 0x28, 0xb3, 0xf5, 0x19, 0xd8, 0x14, 0x21, - 0xd9, 0xf7, 0xb7, 0xe5, 0xfe, 0xe7, 0x07, 0x1e, 0x04, 0x15, 0xc5, 0xf9, - 0x14, 0x20, 0xeb, 0x01, 0x01, 0x18, 0xce, 0x00, 0xe6, 0xe2, 0xf7, 0xfb, - 0xf3, 0x0d, 0xd3, 0xf3, 0x04, 0xf8, 0xf0, 0x03, 0xf1, 0x25, 0xb5, 0xef, - 0x05, 0xe0, 0x01, 0xf6, 0x04, 0x16, 0xd1, 0x01, 0x0a, 0x21, 0x01, 0x05, - 0x0e, 0x01, 0xf0, 0x0a, 0xf3, 0x00, 0x03, 0xf8, 0xfa, 0x03, 0x0b, 0xde, - 0xfe, 0xff, 0xfb, 0xea, 0x09, 0x02, 0xf5, 0xe8, 0xe7, 0x08, 0x00, 0xf5, - 0xf8, 0x0f, 0x13, 0xfa, 0xeb, 0xe8, 0xfb, 0x1f, 0x08, 0x16, 0xe6, 0xfa, - 0xe1, 0x00, 0x03, 0xdd, 0xf1, 0x26, 0xe5, 0x1d, 0xd9, 0xff, 0xf2, 0xf8, - 0xff, 0x33, 0xea, 0xe5, 0x03, 0x0c, 0x07, 0xf9, 0xf8, 0x0f, 0xe1, 0x1e, - 0xdd, 0x0f, 0x00, 0xf1, 0x06, 0x21, 0x09, 0x05, 0xf3, 0xec, 0xe6, 0x04, - 0x07, 0x32, 0xf1, 0xf9, 0xf2, 0x01, 0x18, 0x1f, 0xd2, 0xe2, 0x0a, 0xf4, - 0xca, 0xfc, 0x28, 0x16, 0xc2, 0x10, 0xf2, 0xfc, 0x08, 0xe9, 0x2a, 0x0f, - 0xfa, 0xf5, 0xa9, 0x07, 0xec, 0xe9, 0x19, 0x43, 0x0b, 0x1c, 0xa6, 0xe9, - 0xf4, 0x16, 0x0d, 0x2b, 0xfc, 0x11, 0x9a, 0xe1, 0xf1, 0x1c, 0xf5, 0x0f, - 0xe4, 0x18, 0xc0, 0xd9, 0x14, 0x26, 0xe6, 0xf8, 0x0a, 0x17, 0xec, 0xfb, - 0xe1, 0x22, 0xdf, 0xf2, 0xfe, 0x1e, 0xd4, 0xeb, 0xd7, 0x0e, 0x08, 0xf6, - 0xef, 0xfc, 0xe6, 0xd4, 0xf7, 0x0b, 0xfb, 0xf5, 0x01, 0x25, 0xd7, 0xfb, - 0x0d, 0xfe, 0xff, 0xf3, 0x1d, 0x32, 0xfe, 0xee, 0x12, 0xf2, 0x0c, 0xec, - 0x02, 0x10, 0xef, 0x01, 0xf2, 0x0b, 0xf3, 0xf7, 0xfa, 0x25, 0xfb, 0x0d, - 0x11, 0x15, 0x04, 0xfc, 0x0c, 0x21, 0x12, 0x29, 0x00, 0xfa, 0xf6, 0xf5, - 0x06, 0x22, 0xea, 0xe2, 0xee, 0x00, 0xfd, 0xf0, 0x0b, 0x1d, 0xd3, 0xe4, - 0xe4, 0x0a, 0xfc, 0xe8, 0xea, 0x2c, 0xed, 0xed, 0xef, 0xe8, 0xf2, 0x05, - 0xfd, 0x15, 0xd8, 0xda, 0xca, 0xee, 0xfa, 0x00, 0xfe, 0x0e, 0xf2, 0xf0, - 0x0e, 0xf5, 0x04, 0x03, 0x1d, 0x2b, 0xee, 0x05, 0x0f, 0x10, 0x13, 0x35, - 0xe2, 0x04, 0x10, 0xdf, 0xcf, 0xeb, 0x40, 0x26, 0xe4, 0x03, 0xf3, 0xf9, - 0xf5, 0x14, 0x24, 0x2a, 0xdf, 0xfe, 0xab, 0xe5, 0xfe, 0x1c, 0x27, 0x35, - 0xdb, 0xff, 0xac, 0x01, 0xf6, 0xfc, 0x19, 0x1a, 0x11, 0x1f, 0xa8, 0xf5, - 0x02, 0x0f, 0x1a, 0x1f, 0xf7, 0xf2, 0xa2, 0x00, 0x15, 0x22, 0xe4, 0x13, - 0x00, 0x09, 0xd9, 0xd5, 0x02, 0x19, 0xfd, 0xf8, 0xe7, 0xff, 0xfb, 0xe0, - 0xef, 0xf7, 0xee, 0xf3, 0xf3, 0x19, 0xb0, 0xdf, 0x00, 0x0f, 0x08, 0xf3, - 0x15, 0x17, 0xec, 0x0f, 0x11, 0x14, 0x02, 0x08, 0x10, 0x17, 0xe6, 0x08, - 0xf7, 0x00, 0xed, 0xf7, 0x29, 0x07, 0x10, 0x05, 0x05, 0xe7, 0xed, 0xf4, - 0xf9, 0x15, 0xf9, 0xf0, 0x08, 0x00, 0x03, 0x09, 0x21, 0x28, 0xf6, 0x0e, - 0xfb, 0xf3, 0x03, 0xf7, 0x0f, 0x0c, 0xf0, 0xf5, 0xe3, 0xd8, 0xf8, 0xf2, - 0x09, 0x1c, 0xe7, 0xfb, 0xe4, 0xf6, 0xfa, 0xf8, 0xf1, 0x42, 0xf6, 0xda, - 0xdd, 0xd7, 0xfa, 0xff, 0x2f, 0x2c, 0xda, 0x0a, 0xde, 0xec, 0xf1, 0x14, - 0xfb, 0x1d, 0xeb, 0xee, 0xf2, 0xeb, 0xf3, 0xed, 0x0e, 0x35, 0xf0, 0x06, - 0x19, 0x04, 0x2f, 0x23, 0xe2, 0x07, 0x13, 0x0f, 0xe9, 0xf0, 0x22, 0x2e, - 0xd9, 0x1a, 0xcb, 0xed, 0xfd, 0x04, 0x27, 0x1e, 0xf6, 0x07, 0x96, 0xd6, - 0xd8, 0x11, 0x18, 0x56, 0xd2, 0xfb, 0x92, 0xfc, 0x0b, 0x0a, 0x17, 0x2c, - 0xe5, 0x04, 0xa2, 0xf8, 0xe2, 0x04, 0x1a, 0x0d, 0xeb, 0x11, 0xa2, 0xe5, - 0xe5, 0xf8, 0x02, 0xf7, 0x17, 0x03, 0xca, 0xe9, 0x0c, 0x1f, 0xfe, 0xf5, - 0x18, 0x12, 0xdd, 0x08, 0x15, 0xff, 0xfc, 0xf6, 0xe1, 0x1d, 0xe2, 0xe1, - 0xfe, 0xfc, 0x03, 0xff, 0xf2, 0x23, 0xd2, 0x01, 0x13, 0xdd, 0xf3, 0xf4, - 0xf2, 0x07, 0xef, 0x03, 0x15, 0x21, 0xd8, 0xf8, 0x09, 0xf3, 0xe8, 0xea, - 0xe8, 0xf2, 0x08, 0xf0, 0x04, 0x1a, 0xf2, 0x19, 0xfb, 0x1b, 0x15, 0xfc, - 0x1d, 0x30, 0xe5, 0x1e, 0x09, 0xe8, 0xe9, 0x09, 0xf7, 0x2a, 0xe1, 0x0e, - 0x00, 0x21, 0xf3, 0xff, 0xfb, 0x01, 0xdf, 0xf2, 0xfe, 0xf4, 0xfc, 0xf0, - 0x0b, 0x0b, 0xdd, 0xe4, 0xd2, 0x14, 0xf7, 0xfe, 0x0b, 0x39, 0x01, 0xe6, - 0xe4, 0x27, 0xfa, 0xe4, 0x04, 0x2c, 0xe2, 0x04, 0xf5, 0x07, 0xf2, 0x03, - 0xf0, 0x10, 0xf5, 0xf6, 0xfc, 0x16, 0x22, 0x1b, 0xf8, 0x11, 0xe4, 0x09, - 0xf6, 0xf0, 0x41, 0x1e, 0xcf, 0x04, 0xea, 0xee, 0x0e, 0xf6, 0x1b, 0x2f, - 0xc7, 0xf1, 0xba, 0xef, 0x0f, 0x16, 0x1e, 0x39, 0x05, 0x1e, 0x90, 0xe6, - 0x0d, 0xfa, 0x22, 0x3f, 0xe3, 0x23, 0xa5, 0xe3, 0xe9, 0x0f, 0x05, 0x27, - 0x02, 0x11, 0x99, 0x05, 0xfa, 0x05, 0x03, 0x01, 0xff, 0x26, 0xd3, 0xf7, - 0xf7, 0xf9, 0x05, 0xf4, 0xef, 0x23, 0xd2, 0xdd, 0x05, 0x08, 0xfa, 0xff, - 0x03, 0x04, 0xbd, 0xd7, 0x14, 0x06, 0xef, 0x06, 0xe5, 0x05, 0xea, 0xea, - 0x02, 0xfd, 0x0d, 0x00, 0x08, 0xff, 0xe7, 0xfb, 0xfe, 0x13, 0xfe, 0xec, - 0xf9, 0x02, 0xf3, 0xff, 0xff, 0x08, 0x04, 0xed, 0x19, 0x1d, 0xfa, 0x0a, - 0x0d, 0xf2, 0x0f, 0xec, 0x25, 0x1c, 0xec, 0x0b, 0x01, 0xff, 0x01, 0xf6, - 0x08, 0x09, 0xe8, 0xe2, 0xec, 0x23, 0xe5, 0xe9, 0xf0, 0x2e, 0xbd, 0xe1, - 0xef, 0x14, 0xe9, 0xf6, 0xf5, 0x1d, 0xdc, 0xe3, 0xd7, 0xfc, 0xf9, 0xf2, - 0xfe, 0x24, 0xf2, 0x05, 0xd5, 0xed, 0xe9, 0xf9, 0xfa, 0x2d, 0xf0, 0xfe, - 0xee, 0xf2, 0xe8, 0xf7, 0x06, 0x14, 0x01, 0x10, 0x06, 0xf3, 0x0e, 0x0e, - 0xc2, 0x1d, 0xf2, 0x1c, 0xed, 0xe3, 0x53, 0x21, 0xb8, 0x0c, 0xde, 0x03, - 0x15, 0xeb, 0x46, 0x39, 0xdf, 0xf6, 0xa3, 0xee, 0xf6, 0xe0, 0x33, 0x50, - 0xdd, 0x27, 0x9f, 0x07, 0x13, 0xe2, 0x1f, 0x35, 0xed, 0x1f, 0xb7, 0x07, - 0x11, 0xed, 0x17, 0x28, 0xf4, 0x20, 0xc1, 0xec, 0xef, 0x16, 0x02, 0xfa, - 0xe0, 0x1b, 0xf7, 0xdb, 0xfd, 0x0a, 0xe7, 0xfb, 0xe7, 0x25, 0xe2, 0xe7, - 0xf8, 0xf0, 0xee, 0xe9, 0x02, 0x06, 0xc9, 0xe4, 0x14, 0xe3, 0xe2, 0xf7, - 0xf8, 0xfd, 0xdd, 0xe2, 0x08, 0x0a, 0xe4, 0x05, 0xf5, 0x16, 0xe7, 0x01, - 0x00, 0x1c, 0xe7, 0xf0, 0xf6, 0x19, 0xfe, 0x0c, 0xf2, 0x06, 0x03, 0xe8, - 0x0b, 0xfe, 0xe3, 0x19, 0x08, 0x1a, 0x10, 0xfd, 0x00, 0x21, 0xf0, 0xeb, - 0x18, 0x02, 0xf3, 0x04, 0xf0, 0x18, 0xdb, 0x05, 0x01, 0xde, 0xed, 0xe9, - 0x23, 0x15, 0xaf, 0xe6, 0xf1, 0x0a, 0xe6, 0xea, 0x01, 0x18, 0xd8, 0xfd, - 0xf1, 0xe6, 0xec, 0xf5, 0x0e, 0x1e, 0xcc, 0xfc, 0xe7, 0x00, 0xe9, 0x11, - 0x00, 0x30, 0xf9, 0x14, 0xf4, 0x19, 0xdd, 0xf7, 0xf7, 0x2f, 0xf4, 0xf2, - 0xff, 0x27, 0x15, 0x1c, 0xbc, 0x2f, 0xe9, 0x14, 0xf5, 0xe8, 0x44, 0x30, - 0xe8, 0x1d, 0xe4, 0x18, 0x11, 0x00, 0x0c, 0x2b, 0xf3, 0x29, 0x96, 0xe0, - 0x06, 0xee, 0x3e, 0x55, 0xdc, 0x13, 0x98, 0xdf, 0xf0, 0xfe, 0x17, 0x33, - 0xe8, 0x09, 0xa3, 0x07, 0xef, 0x0e, 0x1d, 0x37, 0xdd, 0xfe, 0xb5, 0x00, - 0xf7, 0xe0, 0xea, 0xfd, 0xfd, 0x19, 0xbc, 0xfd, 0x15, 0xfe, 0x01, 0xf3, - 0xd5, 0x20, 0xbf, 0xe3, 0x15, 0x0e, 0xf0, 0xf6, 0xf2, 0x14, 0xcc, 0xf0, - 0xf7, 0x04, 0xf2, 0xff, 0x0b, 0x02, 0xd2, 0xd8, 0xfa, 0xfc, 0xe5, 0x02, - 0x00, 0xfb, 0xf0, 0xdc, 0x1e, 0x10, 0x02, 0x01, 0x00, 0x18, 0xe9, 0xdb, - 0x1e, 0xf6, 0xfc, 0x03, 0xef, 0x0a, 0x00, 0x16, 0x00, 0x0f, 0xf4, 0x16, - 0xfa, 0x0b, 0xe2, 0xfa, 0xe0, 0x07, 0xfb, 0x02, 0x21, 0x0e, 0xdd, 0x0b, - 0xea, 0xf0, 0xeb, 0xfb, 0x19, 0x09, 0xd4, 0xf2, 0xef, 0x0b, 0x00, 0xeb, - 0x1a, 0x2f, 0xea, 0x06, 0x03, 0xf6, 0xf8, 0xfb, 0xfe, 0x1d, 0xea, 0xdd, - 0xed, 0xfd, 0xfb, 0xe7, 0xfe, 0x18, 0xf4, 0xfc, 0x0b, 0xf6, 0xfc, 0x0b, - 0xfb, 0x28, 0x07, 0xff, 0x07, 0x1e, 0x03, 0x21, 0xcf, 0x22, 0x05, 0xe6, - 0xea, 0xe7, 0x43, 0x2e, 0xe7, 0x14, 0xfb, 0x0a, 0x1e, 0xfe, 0x2c, 0x24, - 0xd5, 0xfd, 0x9e, 0xd1, 0xf2, 0x1c, 0x32, 0x51, 0x01, 0xf3, 0xac, 0xe1, - 0xf4, 0xe5, 0x1c, 0x37, 0xf1, 0x0f, 0xa7, 0xdb, 0x00, 0xf6, 0x0f, 0x18, - 0xe1, 0x10, 0xc9, 0xc5, 0xe8, 0xeb, 0xf2, 0xfd, 0xf6, 0x02, 0xc2, 0xff, - 0x00, 0x19, 0x03, 0x0f, 0x02, 0x22, 0xd4, 0xe7, 0x07, 0x0f, 0xe5, 0x1a, - 0x09, 0x0b, 0xdc, 0xd2, 0x00, 0x05, 0xee, 0xf8, 0xdc, 0x14, 0xd0, 0x0a, - 0x0a, 0xfa, 0xeb, 0x04, 0xf3, 0x06, 0xde, 0x05, 0xfb, 0xfd, 0xe3, 0xec, - 0xfd, 0x14, 0xd7, 0x11, 0x0e, 0xe6, 0x06, 0xec, 0xde, 0x22, 0xd7, 0x00, - 0x03, 0xf5, 0xf5, 0x0d, 0x01, 0x05, 0xea, 0x0b, 0x16, 0x04, 0xff, 0x13, - 0xf3, 0x12, 0xd2, 0xdf, 0x0b, 0xe4, 0x06, 0xf6, 0x08, 0x2d, 0xd3, 0xd6, - 0xe7, 0x0a, 0xec, 0xff, 0xfe, 0x01, 0xdf, 0xf4, 0xdf, 0x1c, 0xfe, 0xf9, - 0xf7, 0x13, 0xca, 0xff, 0x03, 0x06, 0xe9, 0xf7, 0x06, 0x08, 0xd7, 0xf3, - 0xed, 0x08, 0xe3, 0xfd, 0x0c, 0x11, 0x15, 0xfb, 0x15, 0x08, 0x28, 0x40, - 0xe7, 0x0d, 0x08, 0xec, 0xe8, 0x16, 0x67, 0x46, 0xc8, 0x16, 0xf1, 0x02, - 0x24, 0x00, 0x3a, 0x43, 0xd6, 0x12, 0xae, 0xe7, 0xf4, 0xf8, 0x3a, 0x65, - 0xe4, 0x0c, 0xb2, 0xef, 0x1f, 0xe8, 0x29, 0x59, 0xf8, 0x11, 0xc4, 0xe1, - 0xfe, 0xfa, 0x27, 0x43, 0xc9, 0x1e, 0xbb, 0xfb, 0xf3, 0x13, 0x15, 0x0d, - 0xf1, 0x13, 0xcd, 0xf0, 0x07, 0x19, 0x07, 0x00, 0xd8, 0xeb, 0xbf, 0xf0, - 0xfc, 0xf6, 0xef, 0x16, 0x01, 0x02, 0xc1, 0xdf, 0xfd, 0xe9, 0x06, 0x06, - 0xf1, 0x08, 0xd7, 0xcc, 0xfb, 0x0e, 0xfc, 0x14, 0xf2, 0x1a, 0xe2, 0x0d, - 0xeb, 0x09, 0x07, 0x10, 0xe6, 0x13, 0xeb, 0xf5, 0x15, 0x14, 0xeb, 0xfe, - 0xf9, 0x17, 0xd2, 0xe3, 0x1e, 0xf5, 0x04, 0x0a, 0xf1, 0x0e, 0xde, 0xe7, - 0x01, 0x20, 0x0c, 0xfc, 0xdc, 0xf9, 0xe5, 0xe9, 0xff, 0x1d, 0x0a, 0xfe, - 0xec, 0x25, 0xaf, 0xd2, 0x01, 0x16, 0xfc, 0x17, 0xe8, 0x1e, 0xcd, 0xd9, - 0xe2, 0xf1, 0xeb, 0x08, 0xff, 0x33, 0xe5, 0xfb, 0xeb, 0x04, 0xfe, 0xf7, - 0xfd, 0x1f, 0xee, 0xff, 0xed, 0xf8, 0xe0, 0xff, 0xfd, 0x2b, 0x0a, 0xf5, - 0x15, 0x1d, 0xf3, 0x3f, 0x16, 0xf6, 0xf2, 0xee, 0xf4, 0xef, 0xf0, 0x56, - 0x0a, 0x1a, 0xbc, 0xfc, 0x2f, 0xfb, 0xf0, 0x56, 0x1e, 0x0e, 0xc6, 0xe8, - 0x06, 0x0b, 0x11, 0x62, 0x3e, 0xf9, 0xb8, 0xc9, 0xed, 0xeb, 0x02, 0x63, - 0x2c, 0xfd, 0xc5, 0xe9, 0x00, 0x17, 0x0f, 0x37, 0xfe, 0x20, 0xcc, 0xe0, - 0xe0, 0x0e, 0xe6, 0x20, 0x0a, 0xfd, 0xdf, 0xee, 0x0b, 0x02, 0xee, 0x1f, - 0xfb, 0x06, 0xd2, 0xed, 0xfe, 0xeb, 0xfc, 0x12, 0xfd, 0x14, 0x00, 0xd8, - 0x08, 0xf6, 0xec, 0x17, 0xf9, 0x10, 0x00, 0xd9, 0x18, 0xf1, 0xee, 0x0f, - 0xf4, 0x03, 0xee, 0xeb, 0xf0, 0xef, 0xf2, 0x06, 0x04, 0x00, 0xf4, 0x0f, - 0x09, 0x06, 0xf7, 0x0b, 0xfd, 0x01, 0x03, 0x03, 0xf4, 0xf6, 0xdd, 0x14, - 0x1c, 0xef, 0xf1, 0xdd, 0xf7, 0x13, 0xd9, 0x15, 0xef, 0x02, 0xd2, 0xe7, - 0x05, 0x05, 0xe2, 0x09, 0xf2, 0x11, 0xf5, 0xba, 0xf0, 0x04, 0xe0, 0x01, - 0x06, 0x10, 0xe6, 0xef, 0xfc, 0x12, 0xf9, 0xf4, 0x1b, 0x2f, 0xe3, 0x0f, - 0xd7, 0xf6, 0x0b, 0x11, 0xf7, 0x0c, 0x00, 0x06, 0x18, 0xef, 0x06, 0x03, - 0x0a, 0x09, 0xf6, 0x1a, 0x0d, 0xed, 0xfe, 0x2c, 0x43, 0xf4, 0xe5, 0xde, - 0xf5, 0x02, 0x25, 0x5a, 0x49, 0xd4, 0xe6, 0x24, 0x1e, 0xf7, 0x0e, 0x5c, - 0x5d, 0xf0, 0xf9, 0xe4, 0x1c, 0xeb, 0x28, 0x7f, 0x5b, 0xec, 0xfa, 0xdb, - 0x0c, 0xf5, 0x20, 0x49, 0x51, 0xe1, 0xed, 0xe6, 0x0e, 0x26, 0x28, 0x33, - 0x35, 0x05, 0xe1, 0xe4, 0x1f, 0xfc, 0xf9, 0x39, 0x18, 0x04, 0xed, 0xed, - 0x01, 0xe7, 0xe6, 0x08, 0x09, 0x03, 0xe7, 0xf9, 0x0e, 0x06, 0xec, 0x08, - 0x12, 0x1a, 0xda, 0xef, 0xdf, 0xf9, 0xe2, 0x1e, 0x1c, 0x00, 0x12, 0xd7, - 0x01, 0xf7, 0x21, 0x17, 0x13, 0x19, 0xde, 0xe0, 0xec, 0x16, 0x01, 0x1b, - 0x06, 0x0c, 0xf0, 0xe8, 0x18, 0x03, 0x06, 0x0e, 0x09, 0xfa, 0x03, 0xf3, - 0xdd, 0x01, 0xfb, 0x0a, 0x2a, 0xf4, 0xf6, 0xda, 0xe9, 0xfe, 0xe9, 0x12, - 0x19, 0xe9, 0x05, 0xdf, 0x00, 0xeb, 0xf2, 0x10, 0x0c, 0xe1, 0xcd, 0xcb, - 0xf2, 0x1f, 0xd9, 0x0c, 0xfa, 0xfb, 0xe8, 0xde, 0x00, 0xfc, 0xe5, 0x00, - 0x11, 0x02, 0xe6, 0x17, 0x14, 0x00, 0xf2, 0xfd, 0x00, 0xe1, 0x10, 0x24, - 0x12, 0xec, 0xed, 0x1e, 0x09, 0x18, 0x03, 0x0c, 0x04, 0xf4, 0x15, 0x0f, - 0x10, 0x18, 0xd6, 0x29, 0x10, 0x04, 0x1c, 0xef, 0x0f, 0x0c, 0xc7, 0x04, - 0xfe, 0xeb, 0xff, 0xf5, 0xe3, 0x15, 0xfe, 0xcb, 0x10, 0xff, 0x12, 0xfb, - 0xe4, 0xeb, 0xf9, 0x00, 0x02, 0xf1, 0x14, 0x13, 0x01, 0x02, 0xf9, 0x01, - 0x06, 0x0c, 0xf5, 0x0a, 0x1e, 0x01, 0x19, 0x0e, 0x05, 0xf5, 0x0a, 0xff, - 0xff, 0xf2, 0xfb, 0xdb, 0xf8, 0x06, 0x17, 0xf2, 0xf7, 0x0d, 0x0e, 0xf4, - 0xfa, 0xf7, 0x14, 0xdb, 0xe0, 0xfd, 0x08, 0x16, 0xf7, 0x16, 0xfc, 0x09, - 0x27, 0x07, 0x09, 0xfb, 0x0a, 0xfc, 0x0c, 0xe4, 0xdb, 0xee, 0xff, 0x10, - 0xf3, 0x09, 0xfa, 0xf4, 0x23, 0xf3, 0xf4, 0x19, 0xff, 0xfa, 0xff, 0x19, - 0x0f, 0x11, 0xed, 0xec, 0xf8, 0x0f, 0x10, 0xf3, 0xff, 0x0b, 0xf7, 0x06, - 0x0b, 0x0e, 0x07, 0xe4, 0x18, 0x0a, 0x08, 0x0e, 0x02, 0x0a, 0x05, 0x19, - 0x02, 0xf3, 0xfe, 0xfe, 0x0b, 0x0f, 0xfc, 0xfa, 0x05, 0xf9, 0xe2, 0xf9, - 0x1b, 0xf7, 0x0f, 0x07, 0xfc, 0x12, 0xfe, 0x01, 0xfd, 0xf0, 0x04, 0xf4, - 0xfd, 0x07, 0xf2, 0x04, 0x04, 0x07, 0xef, 0x0c, 0xed, 0x0e, 0xf6, 0xef, - 0x08, 0x07, 0x04, 0xe9, 0xf3, 0x20, 0xda, 0x15, 0xf8, 0xff, 0xec, 0xe0, - 0xf6, 0xff, 0xe9, 0x08, 0x01, 0x10, 0xf0, 0xfc, 0xe9, 0x08, 0xe8, 0xf5, - 0xf8, 0xe5, 0x17, 0xe6, 0x03, 0xfc, 0x09, 0xf5, 0xdd, 0xf2, 0xff, 0x05, - 0xf6, 0xf8, 0xf5, 0x07, 0xfc, 0xf1, 0x04, 0xf3, 0x13, 0xe1, 0x0f, 0xf2, - 0x0a, 0xf9, 0xfd, 0x1c, 0xe0, 0x11, 0x1b, 0xe6, 0xef, 0x05, 0x05, 0x0c, - 0x23, 0x10, 0x09, 0xfe, 0xf7, 0x1a, 0xf1, 0xfc, 0x11, 0x1d, 0xff, 0x03, - 0x03, 0xe6, 0x07, 0x11, 0x0c, 0x0d, 0x16, 0x05, 0x05, 0x25, 0xf3, 0x10, - 0x10, 0x06, 0x09, 0xe8, 0x1a, 0xf0, 0xee, 0x09, 0xff, 0x24, 0xf7, 0xfb, - 0xe6, 0x06, 0xfa, 0x08, 0x03, 0x00, 0xf2, 0x04, 0xf0, 0xeb, 0x14, 0x1c, - 0x03, 0x21, 0x14, 0x1d, 0xfe, 0x03, 0xf6, 0x02, 0x09, 0xff, 0x00, 0x13, - 0xef, 0x10, 0x1e, 0x0b, 0x1d, 0x1c, 0xf1, 0xf6, 0xe7, 0xfd, 0x14, 0x01, - 0xff, 0x13, 0xf7, 0xfc, 0x00, 0x21, 0xe3, 0xeb, 0x07, 0x0e, 0x09, 0xf1, - 0xf8, 0xfd, 0x03, 0xee, 0x19, 0xfd, 0xff, 0xfb, 0xff, 0xea, 0xfb, 0x07, - 0xf0, 0x0a, 0x04, 0x04, 0x0b, 0x12, 0xfe, 0x0b, 0xe0, 0xff, 0xf6, 0xe5, - 0xfc, 0x11, 0xed, 0xfd, 0x15, 0x03, 0xdd, 0xdb, 0x04, 0xfe, 0xff, 0x0e, - 0xff, 0xfa, 0xfb, 0xe5, 0xef, 0xf6, 0xfe, 0x22, 0x0f, 0xe8, 0xfe, 0xf4, - 0xfd, 0xd9, 0x03, 0x0a, 0xdf, 0xcf, 0xf1, 0x14, 0x05, 0xfd, 0xfb, 0xf3, - 0xfb, 0xfb, 0x0f, 0xf8, 0x05, 0x09, 0x03, 0xf7, 0x05, 0x05, 0x13, 0xfb, - 0xeb, 0x23, 0xe7, 0x18, 0xfb, 0x00, 0xfe, 0xdd, 0xe9, 0xea, 0xd3, 0xe8, - 0x1a, 0xef, 0x01, 0xf1, 0x09, 0x1d, 0xd8, 0xfc, 0xda, 0x19, 0x03, 0xec, - 0xe5, 0xf3, 0xed, 0x0a, 0xf4, 0x13, 0x0b, 0xf7, 0x0c, 0x00, 0xf9, 0xea, - 0xe3, 0xfe, 0xff, 0x0d, 0x0a, 0x1b, 0xd7, 0x17, 0xeb, 0xe9, 0x00, 0x0e, - 0xee, 0x24, 0xef, 0x09, 0x07, 0xf0, 0xf5, 0x07, 0xf5, 0xf5, 0x10, 0x17, - 0x06, 0xf7, 0xfc, 0x02, 0xfb, 0xf9, 0xe7, 0x0a, 0x26, 0xf3, 0x01, 0x01, - 0x09, 0x0b, 0x02, 0x27, 0xf8, 0xee, 0xfd, 0x1c, 0xf8, 0xf2, 0x0f, 0xfc, - 0x0d, 0xe0, 0xea, 0x02, 0x0b, 0x00, 0xe0, 0x08, 0xfe, 0x10, 0x04, 0xfe, - 0xeb, 0x13, 0x01, 0x0c, 0x0e, 0xed, 0x09, 0x01, 0x0c, 0xe3, 0x10, 0xdf, - 0xd1, 0x14, 0xf3, 0xef, 0x09, 0xf0, 0xee, 0xe5, 0x11, 0xf4, 0xf6, 0x00, - 0xe8, 0x20, 0x0a, 0xfc, 0xea, 0xf7, 0x02, 0x16, 0xe7, 0xf3, 0x0d, 0xe4, - 0x04, 0xe6, 0xef, 0xf8, 0x0f, 0x23, 0x02, 0xe0, 0x01, 0x01, 0x01, 0x05, - 0xf5, 0x0d, 0xf5, 0xf5, 0xe1, 0xff, 0x04, 0x00, 0xf4, 0x0d, 0xee, 0xf1, - 0xef, 0xf7, 0x0b, 0xff, 0x1b, 0xec, 0x05, 0xe7, 0xf3, 0x13, 0x12, 0xf2, - 0xf3, 0xfc, 0xea, 0x06, 0xfe, 0x13, 0x12, 0xdb, 0x11, 0xe2, 0xfc, 0x0d, - 0x1c, 0xe8, 0x1d, 0xfc, 0xf2, 0xe2, 0x13, 0x1d, 0xda, 0xf6, 0x1c, 0x18, - 0x1e, 0xf4, 0xfa, 0x03, 0xdc, 0x0f, 0xff, 0xff, 0x18, 0x0b, 0xed, 0xf1, - 0xf8, 0x02, 0xf4, 0x10, 0xf9, 0xeb, 0x0b, 0x0e, 0x0f, 0x01, 0x02, 0x1b, - 0x06, 0x10, 0x00, 0xe7, 0x23, 0x0d, 0xf6, 0x11, 0x08, 0xf5, 0x0f, 0x05, - 0x13, 0xf7, 0x01, 0x01, 0x0c, 0xf6, 0xf9, 0xf0, 0x29, 0x01, 0xe9, 0x11, - 0x02, 0xfa, 0xeb, 0x16, 0x0e, 0x10, 0x09, 0x0e, 0x1c, 0x0a, 0xe3, 0xd3, - 0x01, 0xe3, 0x00, 0x06, 0xe2, 0xe9, 0x19, 0xef, 0x12, 0xf3, 0xfc, 0x02, - 0x0b, 0x0c, 0x0d, 0xed, 0xfd, 0xf6, 0xf9, 0xe9, 0xf2, 0x28, 0xfe, 0x03, - 0xec, 0x03, 0x00, 0xf8, 0xde, 0x0d, 0x25, 0x07, 0x1a, 0xe7, 0xfd, 0x29, - 0xd8, 0xf7, 0xfb, 0xde, 0x0c, 0x08, 0x06, 0x22, 0xee, 0x1d, 0x05, 0x07, - 0xf0, 0xfb, 0xfe, 0x07, 0xf1, 0x04, 0xe9, 0x01, 0xfc, 0xf1, 0x00, 0xeb, - 0xe3, 0x08, 0xec, 0xfe, 0x04, 0xeb, 0xfc, 0x01, 0xf6, 0x0e, 0xdf, 0xf8, - 0x12, 0xe3, 0x16, 0xdc, 0x21, 0x0a, 0xe6, 0x06, 0xe5, 0x10, 0x07, 0xf7, - 0x1e, 0xde, 0xe3, 0x07, 0x16, 0xed, 0x23, 0xf2, 0x12, 0x0d, 0xe9, 0xf9, - 0xe8, 0xfe, 0x0e, 0x02, 0x18, 0x0a, 0xea, 0xec, 0xfb, 0xfe, 0x0c, 0x1b, - 0x19, 0x20, 0xfa, 0x07, 0xe5, 0x0c, 0x04, 0x27, 0xdb, 0xe6, 0xfe, 0x0d, - 0x0a, 0x0a, 0xfe, 0x39, 0xdd, 0xde, 0x05, 0xec, 0x09, 0x05, 0x0a, 0x2c, - 0xf4, 0x02, 0x1f, 0xd3, 0x24, 0xee, 0x0f, 0x3c, 0xf5, 0xfd, 0xf8, 0xf8, - 0x12, 0xf5, 0xf3, 0x19, 0xf9, 0xda, 0xf6, 0x0a, 0x0a, 0xf4, 0x09, 0x0f, - 0xfc, 0x00, 0x01, 0x01, 0xf3, 0xf8, 0x05, 0xf3, 0x0c, 0x19, 0x0e, 0xfd, - 0xfa, 0xe1, 0xfc, 0x0c, 0x03, 0xfb, 0x1b, 0x06, 0xcc, 0xe4, 0x08, 0xf9, - 0x10, 0xe9, 0x06, 0x00, 0x17, 0xe8, 0x0d, 0x12, 0xca, 0xf5, 0x23, 0xe4, - 0x21, 0xf6, 0x19, 0x33, 0xdd, 0xfa, 0x0c, 0x01, 0x14, 0x07, 0x00, 0x34, - 0xda, 0x05, 0x07, 0x01, 0x07, 0xe4, 0x06, 0x24, 0x02, 0xff, 0xf0, 0x09, - 0xfc, 0xf4, 0x03, 0x06, 0xee, 0x08, 0xe2, 0x1d, 0xfa, 0x0c, 0xfc, 0x02, - 0x03, 0xe5, 0xf0, 0xe2, 0x0a, 0x18, 0x12, 0x0c, 0x1e, 0x20, 0xed, 0x20, - 0xe4, 0x01, 0x2a, 0x09, 0x0d, 0x0e, 0xd0, 0xf4, 0xdd, 0xfd, 0x2b, 0xf2, - 0x08, 0x0c, 0xf8, 0xf7, 0xfc, 0xf9, 0x15, 0xef, 0x19, 0x1c, 0x01, 0xff, - 0xe2, 0x01, 0xf3, 0x30, 0x0e, 0xfb, 0x15, 0xe8, 0x1c, 0x00, 0xfa, 0x16, - 0xef, 0xea, 0xfb, 0x05, 0xf0, 0x0e, 0x02, 0x13, 0xf4, 0x01, 0x03, 0xe5, - 0x29, 0x07, 0x09, 0x24, 0xf9, 0xe3, 0xf8, 0xde, 0x2d, 0xf4, 0xf5, 0x40, - 0xed, 0xdf, 0x07, 0xef, 0x0f, 0x0a, 0x0b, 0x32, 0x0d, 0xe8, 0x00, 0xe6, - 0xf6, 0xfc, 0xfd, 0x19, 0x11, 0x09, 0xf3, 0x03, 0xea, 0xf1, 0xfb, 0x02, - 0xfd, 0x06, 0xff, 0xfe, 0x09, 0xec, 0x06, 0x0c, 0x15, 0xf9, 0x06, 0xd7, - 0xe3, 0xf7, 0xed, 0x01, 0x03, 0xfd, 0x14, 0x01, 0x0e, 0xe0, 0x37, 0x0d, - 0xd2, 0x18, 0x2f, 0xea, 0x12, 0x0d, 0x05, 0x3a, 0xd5, 0x07, 0x1e, 0xf2, - 0x21, 0x11, 0xf9, 0x36, 0xd3, 0xf5, 0x12, 0xf6, 0xfb, 0xf6, 0x06, 0x0f, - 0xde, 0xf9, 0x06, 0x09, 0xdf, 0xff, 0x0b, 0xf3, 0xf5, 0x01, 0xf1, 0xea, - 0xf2, 0x02, 0x12, 0xfc, 0x0e, 0xee, 0xf8, 0xeb, 0x00, 0xef, 0x21, 0x0f, - 0x09, 0xef, 0xeb, 0x1e, 0xef, 0xf2, 0x26, 0xf9, 0x17, 0xf1, 0xf1, 0xf0, - 0x0c, 0x10, 0x1d, 0xff, 0x1d, 0x06, 0x03, 0xf6, 0xfb, 0x14, 0x1b, 0x03, - 0x22, 0xfd, 0xec, 0x03, 0xfa, 0xf8, 0x01, 0x2b, 0x1e, 0x1b, 0x09, 0x09, - 0x07, 0xff, 0xf0, 0x20, 0xee, 0x14, 0xfb, 0xf6, 0xf8, 0x11, 0xd9, 0x29, - 0xf4, 0xfa, 0x07, 0xef, 0x20, 0xf9, 0xf2, 0x30, 0xee, 0xf0, 0xf3, 0xd6, - 0x0d, 0xfe, 0x03, 0x36, 0xf5, 0xd7, 0x01, 0xe6, 0x04, 0xf0, 0x05, 0x1f, - 0x0f, 0xdd, 0xff, 0xf8, 0x1f, 0xf2, 0x04, 0x37, 0xfa, 0x00, 0xfd, 0xf8, - 0x10, 0xe1, 0xfb, 0x0d, 0xed, 0xf6, 0xe2, 0xfe, 0x08, 0xfe, 0x07, 0x08, - 0x08, 0x11, 0x0a, 0xf0, 0xf8, 0xf5, 0x04, 0xea, 0x08, 0x12, 0x06, 0x0d, - 0x0f, 0x10, 0x40, 0x28, 0xc0, 0xfb, 0x3f, 0x08, 0x1d, 0x09, 0x1b, 0x3d, - 0xee, 0xf4, 0x29, 0x13, 0x20, 0xfc, 0x11, 0x4c, 0xdb, 0x02, 0x15, 0x05, - 0xec, 0xeb, 0x0a, 0x22, 0xe7, 0x00, 0x02, 0x01, 0xd4, 0xea, 0x0a, 0xf3, - 0xe3, 0xf8, 0xf5, 0xfa, 0x01, 0x0d, 0x19, 0x06, 0x24, 0x13, 0x02, 0xf5, - 0xf1, 0xf1, 0x1b, 0x0f, 0x19, 0x04, 0xe3, 0xf9, 0xe7, 0x02, 0x29, 0xfc, - 0x29, 0xec, 0xe9, 0x04, 0xdc, 0x22, 0x1d, 0xfd, 0x1f, 0x01, 0xec, 0xe8, - 0xf5, 0x14, 0x1b, 0x19, 0x06, 0x0e, 0x02, 0x0d, 0xf9, 0x06, 0xfc, 0x15, - 0x07, 0xfa, 0x0c, 0xe1, 0x18, 0x1a, 0xe8, 0x1b, 0xe9, 0xef, 0x0a, 0x18, - 0xfc, 0x05, 0xf9, 0x14, 0xdc, 0x04, 0x01, 0xff, 0x07, 0xfd, 0xf0, 0x2c, - 0xf2, 0xec, 0x0e, 0xe7, 0x1a, 0x05, 0xe8, 0x35, 0x13, 0x09, 0xf9, 0x07, - 0xfe, 0xfa, 0x0d, 0x40, 0x0c, 0xea, 0xf4, 0x04, 0x01, 0x11, 0xfc, 0x23, - 0xeb, 0xf4, 0xe9, 0x04, 0xeb, 0xe7, 0x07, 0x09, 0xfb, 0xf1, 0xf6, 0xfd, - 0x02, 0xfa, 0x02, 0xff, 0x00, 0xff, 0xf1, 0xf1, 0x1a, 0xe9, 0x10, 0xe3, - 0x0b, 0x0c, 0x08, 0x04, 0x1b, 0x0a, 0x2b, 0x10, 0xe1, 0x01, 0x1f, 0x06, - 0x04, 0xec, 0x19, 0x49, 0xee, 0xf8, 0x22, 0x0c, 0x20, 0x02, 0x07, 0x31, - 0xe7, 0xff, 0x0f, 0xf0, 0xfd, 0xea, 0x13, 0x26, 0xce, 0xfa, 0xff, 0xee, - 0xe9, 0xfe, 0x15, 0x08, 0x04, 0x05, 0x0d, 0xfa, 0xdd, 0xf8, 0x07, 0x0b, - 0x33, 0xef, 0xec, 0xf9, 0xd9, 0xe6, 0x1d, 0x10, 0x41, 0xf6, 0xdf, 0x11, - 0xe3, 0x14, 0x1d, 0xfb, 0x2b, 0x15, 0xdc, 0x09, 0xf6, 0x05, 0x16, 0x00, - 0x1c, 0x27, 0xe4, 0xfc, 0xf7, 0x16, 0x08, 0x08, 0x2f, 0xdd, 0xf8, 0xfa, - 0xe9, 0x0e, 0x0b, 0x0b, 0x02, 0x12, 0x02, 0xfd, 0x19, 0x03, 0xeb, 0x11, - 0xf4, 0x09, 0x09, 0x15, 0x12, 0x0d, 0xef, 0x1c, 0xe4, 0xfe, 0x17, 0x0c, - 0x09, 0x04, 0xea, 0x2f, 0xf2, 0x1e, 0x02, 0xfb, 0xfe, 0xe3, 0x00, 0x2e, - 0x04, 0xf9, 0x0c, 0x05, 0x27, 0x0c, 0x07, 0x2d, 0xf7, 0x0b, 0xfb, 0xf9, - 0x1c, 0xdf, 0x11, 0x36, 0x05, 0xf2, 0x02, 0xf8, 0x0b, 0x07, 0x05, 0xfb, - 0xfc, 0x0e, 0x13, 0xfa, 0xfb, 0x09, 0xf5, 0xfd, 0x06, 0x15, 0xf9, 0x03, - 0x18, 0xfd, 0x1a, 0x0a, 0x03, 0xe2, 0xfb, 0x00, 0x1e, 0xfe, 0x4f, 0x27, - 0xe1, 0xf7, 0x31, 0xf0, 0x1b, 0xec, 0x07, 0x5f, 0xe2, 0xf8, 0x40, 0x05, - 0x17, 0x24, 0x0c, 0x3c, 0xf3, 0x10, 0x13, 0xf8, 0x0b, 0xf3, 0xf9, 0x36, - 0xe1, 0xf3, 0xf4, 0xe8, 0xef, 0xf8, 0xfc, 0xeb, 0xe3, 0xfb, 0xf0, 0xee, - 0xdb, 0x06, 0x0c, 0x11, 0x1e, 0x10, 0xe2, 0xe9, 0xeb, 0x0d, 0x34, 0x0f, - 0x43, 0xd9, 0xef, 0x08, 0xec, 0x05, 0x1d, 0x02, 0x33, 0xef, 0xf4, 0xf7, - 0xe6, 0xf9, 0x22, 0x07, 0x04, 0x06, 0xe9, 0x02, 0xf0, 0xfc, 0x24, 0x20, - 0x24, 0x17, 0xe6, 0x0f, 0x05, 0xf6, 0xfc, 0x1f, 0xf2, 0x01, 0x0d, 0xe7, - 0xff, 0x1d, 0xf0, 0xfa, 0xd0, 0x00, 0xff, 0x0e, 0x23, 0xf9, 0xf3, 0x11, - 0xde, 0x0d, 0x05, 0x04, 0x0b, 0x0b, 0xfb, 0x26, 0x0d, 0x0d, 0xff, 0xe8, - 0x16, 0xe8, 0x0b, 0x3c, 0x18, 0xe4, 0x04, 0xff, 0xfa, 0xf3, 0xff, 0x40, - 0xee, 0x06, 0xfc, 0x0d, 0x00, 0xf7, 0x13, 0x3f, 0xf7, 0x13, 0x06, 0x08, - 0xf9, 0x13, 0xf2, 0x19, 0xfd, 0xf9, 0xf3, 0xe6, 0xfc, 0x07, 0xf6, 0xfd, - 0x0a, 0x22, 0x00, 0x01, 0x19, 0xff, 0xe7, 0xff, 0x08, 0xfd, 0x03, 0xfd, - 0x1f, 0xe7, 0x28, 0x08, 0xde, 0xf3, 0x43, 0xf6, 0x0c, 0xfe, 0x1e, 0x52, - 0xf2, 0x04, 0x17, 0xf2, 0x08, 0x0d, 0x04, 0x38, 0xde, 0x0c, 0x10, 0xef, - 0xdf, 0x0f, 0x01, 0x24, 0xde, 0xe1, 0x0d, 0xfd, 0xd4, 0xf6, 0x12, 0x0e, - 0xed, 0x01, 0xf0, 0xf3, 0xfd, 0xff, 0x18, 0xf3, 0x36, 0xda, 0xf6, 0xef, - 0xe8, 0xef, 0x37, 0x27, 0x4e, 0xf8, 0xf4, 0xff, 0xe5, 0xf3, 0x32, 0x0b, - 0x36, 0x08, 0xe9, 0xf6, 0xe2, 0x13, 0x21, 0xfe, 0x12, 0xed, 0xdd, 0xfb, - 0xf8, 0x05, 0x0f, 0x03, 0x1c, 0x04, 0xfc, 0xf2, 0x23, 0x0e, 0x03, 0xfc, - 0xf9, 0x18, 0xf7, 0x01, 0x1b, 0x03, 0xf5, 0xfd, 0xde, 0xf3, 0x19, 0xfc, - 0x11, 0x02, 0xe7, 0x13, 0xde, 0xd8, 0xf2, 0x05, 0x28, 0x02, 0x02, 0x27, - 0x07, 0x08, 0xff, 0x07, 0x27, 0x0e, 0x19, 0x40, 0xfb, 0x02, 0x0c, 0xf6, - 0x0d, 0x07, 0x0f, 0x47, 0xf8, 0x05, 0x0e, 0xfd, 0x03, 0x1e, 0x07, 0x32, - 0xe7, 0xf6, 0x24, 0x01, 0x01, 0x02, 0x0a, 0xff, 0xf6, 0x26, 0x15, 0xf0, - 0x04, 0x13, 0x03, 0xfa, 0xfe, 0xf6, 0xf1, 0x09, 0x2a, 0xe6, 0xea, 0xf6, - 0x17, 0x13, 0xeb, 0xff, 0x15, 0xeb, 0x23, 0x06, 0xc8, 0xf6, 0x33, 0xeb, - 0xf4, 0xe7, 0x12, 0x2a, 0xe3, 0xe6, 0x32, 0xfa, 0x16, 0x15, 0x17, 0x40, - 0xf1, 0x08, 0x1a, 0xf3, 0xf6, 0x0c, 0x0c, 0x11, 0xd0, 0x22, 0x02, 0xee, - 0xea, 0xf4, 0xf8, 0xf9, 0x13, 0x10, 0x17, 0xf5, 0xf1, 0x0a, 0x0e, 0xfd, - 0x32, 0xda, 0xf1, 0xe2, 0xdb, 0xf2, 0x34, 0x1f, 0x53, 0xfc, 0xe4, 0xf2, - 0xf6, 0xf2, 0x1d, 0x04, 0x4a, 0xec, 0xee, 0x06, 0xdf, 0x01, 0x1a, 0x04, - 0x27, 0xfc, 0xe6, 0xfd, 0xd9, 0xfd, 0x0e, 0x00, 0x0c, 0x16, 0xf3, 0x03, - 0xf7, 0xfc, 0x0e, 0x0f, 0x09, 0x06, 0x06, 0x04, 0x08, 0x02, 0xed, 0xf5, - 0xe4, 0xe6, 0x07, 0x06, 0x03, 0x18, 0xea, 0x13, 0xe2, 0xfa, 0x10, 0xf2, - 0x02, 0xec, 0x03, 0x3c, 0xf6, 0xf6, 0x0a, 0x10, 0x09, 0xf8, 0x15, 0x24, - 0xfd, 0x0d, 0x09, 0x01, 0x00, 0xff, 0x00, 0x1a, 0xf0, 0xee, 0x08, 0x03, - 0x1d, 0x05, 0x16, 0x46, 0xe6, 0xf8, 0x08, 0x00, 0x09, 0x09, 0xff, 0x01, - 0xfc, 0x20, 0xfc, 0xec, 0x05, 0x1b, 0x03, 0xf1, 0x12, 0xe4, 0xfa, 0x24, - 0x1c, 0xf5, 0xf2, 0x05, 0x11, 0xe7, 0xfa, 0x02, 0x20, 0xea, 0x31, 0x10, - 0xcf, 0xd8, 0x33, 0xee, 0xff, 0x09, 0x20, 0x3f, 0xe2, 0x0a, 0x29, 0xee, - 0x3a, 0xf2, 0x1e, 0x39, 0x02, 0x1e, 0xfe, 0xf2, 0xef, 0xe2, 0x0d, 0x0f, - 0xf1, 0x19, 0x02, 0xe7, 0xec, 0xff, 0xfe, 0xe4, 0xfe, 0xfb, 0x02, 0xf6, - 0xf1, 0xf4, 0x07, 0x1a, 0x2a, 0xf9, 0x06, 0xf9, 0xda, 0xf4, 0x22, 0x02, - 0x4f, 0x0a, 0xf3, 0xfc, 0xf3, 0xf6, 0x25, 0x0a, 0x28, 0x01, 0xf7, 0x09, - 0xe6, 0x05, 0x28, 0xf7, 0x1e, 0xf2, 0xee, 0x13, 0xee, 0x05, 0x0f, 0x0a, - 0x09, 0xe8, 0xe8, 0x0e, 0x05, 0x12, 0x0f, 0x15, 0x02, 0xec, 0xf8, 0x02, - 0xf7, 0x05, 0xf8, 0xff, 0xdc, 0x00, 0x01, 0x00, 0x12, 0x17, 0xec, 0x19, - 0xfa, 0x09, 0xfa, 0xf3, 0x1d, 0x0b, 0x07, 0x25, 0xea, 0x0c, 0xf5, 0xfa, - 0x04, 0xf7, 0xfe, 0x33, 0xfe, 0x14, 0xef, 0x04, 0xf0, 0x00, 0x00, 0x3a, - 0xea, 0xfa, 0x10, 0x01, 0xe4, 0x00, 0xff, 0x23, 0xe9, 0x26, 0x15, 0x10, - 0x04, 0x14, 0x0d, 0x08, 0xf8, 0xfd, 0x10, 0xfb, 0x00, 0x21, 0x06, 0xfa, - 0x0f, 0x08, 0xf1, 0x09, 0x28, 0xf0, 0xd8, 0x0d, 0x08, 0x09, 0x02, 0xfb, - 0x12, 0x03, 0x0e, 0xfb, 0xce, 0xf0, 0x39, 0xe5, 0x09, 0xf6, 0x1f, 0x35, - 0xdd, 0x1c, 0x25, 0xef, 0x17, 0x0c, 0xf6, 0x3e, 0xf0, 0x21, 0x08, 0xff, - 0xd7, 0xfc, 0xfd, 0x1f, 0xe5, 0x18, 0x12, 0xe9, 0xf5, 0xe9, 0x12, 0xf6, - 0x02, 0x13, 0xf4, 0x0a, 0xfd, 0x03, 0x09, 0x08, 0x2f, 0x07, 0xee, 0xfd, - 0xd7, 0x00, 0x2b, 0x29, 0x3b, 0xdb, 0xde, 0xf1, 0xe1, 0xf7, 0x47, 0x12, - 0x35, 0x0c, 0xe4, 0x09, 0xef, 0x17, 0x2b, 0xea, 0x2d, 0xf8, 0xe8, 0x18, - 0xef, 0x03, 0x11, 0x0a, 0x10, 0xff, 0xe8, 0x07, 0x0c, 0x07, 0x03, 0x18, - 0x05, 0x08, 0xf8, 0xf8, 0x06, 0x18, 0xe9, 0xf9, 0xe0, 0x0f, 0x0d, 0x18, - 0x04, 0x01, 0xf0, 0x1c, 0xf6, 0x14, 0xfd, 0x12, 0x0c, 0x0c, 0x02, 0x34, - 0xf6, 0xe6, 0xfd, 0xf9, 0xf9, 0xfd, 0x00, 0x2a, 0xfc, 0xf9, 0xff, 0x0a, - 0xfe, 0x1b, 0xf5, 0x34, 0xdc, 0xf9, 0x15, 0x13, 0xe7, 0x1b, 0xf7, 0x25, - 0xfd, 0x09, 0x08, 0x0a, 0xf0, 0x17, 0x0f, 0x04, 0xf4, 0xe9, 0x06, 0x07, - 0xf5, 0x02, 0xfc, 0xf5, 0x09, 0xee, 0xf1, 0x07, 0x38, 0x03, 0x05, 0x0f, - 0x16, 0x0f, 0xed, 0xff, 0x21, 0xf8, 0x34, 0x07, 0xd1, 0xf9, 0x27, 0x00, - 0x0c, 0x21, 0x18, 0x42, 0xe6, 0x02, 0x1a, 0xf1, 0x2f, 0xf1, 0x0e, 0x3b, - 0xee, 0xf8, 0x08, 0xea, 0xfe, 0xf9, 0x03, 0x18, 0xf5, 0xf8, 0x0d, 0xeb, - 0x01, 0x10, 0x09, 0x02, 0x15, 0xfb, 0xf1, 0x0b, 0xf2, 0x06, 0x08, 0x09, - 0x2f, 0x19, 0x02, 0xfe, 0xe4, 0x06, 0x1f, 0x17, 0x49, 0xf2, 0xe2, 0x02, - 0xef, 0x04, 0x26, 0x16, 0x3f, 0x08, 0xf1, 0x0a, 0xfd, 0xf9, 0x28, 0x01, - 0x15, 0x0b, 0xf9, 0x10, 0xdc, 0x02, 0x20, 0xf7, 0x16, 0xe6, 0x09, 0x03, - 0xf1, 0xf5, 0x12, 0x1c, 0xfb, 0x2a, 0x08, 0xfa, 0x0a, 0x16, 0xf6, 0x15, - 0xf0, 0x06, 0x11, 0xfd, 0x0e, 0xf9, 0xf6, 0x12, 0xed, 0xf3, 0xfd, 0x1f, - 0x0b, 0xfa, 0x08, 0x30, 0xf8, 0xff, 0x0b, 0xeb, 0x10, 0xff, 0x07, 0x22, - 0x0d, 0x07, 0x09, 0x03, 0xf6, 0xf8, 0xfc, 0x26, 0xf8, 0xee, 0x11, 0x02, - 0x03, 0x0a, 0xef, 0x38, 0xfe, 0x13, 0x1b, 0x09, 0xfe, 0x06, 0x05, 0xf3, - 0x04, 0xdf, 0xfc, 0x00, 0xe7, 0x15, 0xec, 0xf1, 0xf8, 0xfc, 0xed, 0x05, - 0x0e, 0xf3, 0x15, 0x09, 0x01, 0x0d, 0xfd, 0x00, 0x24, 0xe2, 0x31, 0x13, - 0xd5, 0x1b, 0x2b, 0xe8, 0x03, 0x08, 0x1d, 0x33, 0xdc, 0xfd, 0x24, 0xe4, - 0x20, 0xfa, 0x07, 0x33, 0x01, 0x12, 0x06, 0xf5, 0xef, 0xf7, 0xfa, 0x13, - 0x01, 0xec, 0xee, 0xe0, 0xfd, 0x0d, 0xff, 0x09, 0xf6, 0x00, 0xed, 0x07, - 0xea, 0x0e, 0xff, 0x0e, 0x26, 0xfc, 0xf0, 0xe7, 0xe7, 0xfe, 0x30, 0xff, - 0x24, 0x04, 0x06, 0xf4, 0xf5, 0xf8, 0x23, 0x0e, 0x3d, 0xf2, 0xfd, 0x04, - 0xe8, 0xfb, 0x23, 0xfe, 0x33, 0xe1, 0x01, 0xfd, 0xdc, 0xfb, 0x0e, 0xfa, - 0x22, 0xfb, 0x11, 0xfa, 0xff, 0x08, 0x21, 0x30, 0x13, 0x03, 0xf2, 0x03, - 0xf8, 0x0f, 0xec, 0x0d, 0xef, 0x0f, 0x10, 0x10, 0x0f, 0xf6, 0xf9, 0x1e, - 0xf7, 0xe5, 0x08, 0xfa, 0x09, 0xff, 0x00, 0x15, 0x02, 0x00, 0x08, 0xfe, - 0xfb, 0x0e, 0x15, 0x28, 0xfa, 0xfb, 0x13, 0x06, 0xfb, 0x05, 0xf6, 0x11, - 0xf6, 0x0b, 0x06, 0x15, 0xe1, 0x00, 0xe9, 0x0f, 0xe1, 0x1d, 0x18, 0xfd, - 0x0b, 0x0f, 0xff, 0xf2, 0xf5, 0xfd, 0x14, 0xff, 0xf4, 0xfe, 0xe2, 0xf8, - 0x14, 0x0b, 0xeb, 0x07, 0x35, 0xe2, 0xeb, 0x0b, 0x04, 0x22, 0xfe, 0x0e, - 0x1d, 0xf2, 0x24, 0x11, 0xcc, 0xec, 0x25, 0xf7, 0xff, 0xf9, 0x06, 0x29, - 0xe4, 0x07, 0x1c, 0xdb, 0xf8, 0x1d, 0xfa, 0x44, 0xf2, 0x01, 0x0f, 0xe6, - 0x11, 0x03, 0xee, 0x17, 0x06, 0xe0, 0x0c, 0xd8, 0xe9, 0xfd, 0x11, 0xfe, - 0x07, 0xdd, 0xea, 0xff, 0xde, 0xdd, 0x0a, 0x09, 0x30, 0xf2, 0x01, 0xe4, - 0xe0, 0xeb, 0x2d, 0x12, 0x2d, 0xeb, 0xfc, 0xf0, 0xe8, 0xf9, 0x1f, 0x08, - 0x3f, 0xeb, 0x0e, 0x13, 0xf9, 0x0c, 0x1c, 0x02, 0x25, 0xec, 0xf6, 0x05, - 0xf3, 0xf4, 0x18, 0x08, 0x12, 0xe9, 0xfb, 0xfd, 0xf9, 0x08, 0x13, 0x1c, - 0x08, 0xec, 0xfe, 0x02, 0xf1, 0x19, 0xf3, 0x1d, 0xf1, 0x07, 0x11, 0x12, - 0xfa, 0xf2, 0xf6, 0x0d, 0xff, 0x17, 0x0a, 0xfb, 0x1f, 0xf8, 0x11, 0x24, - 0xf6, 0xfc, 0xfe, 0x07, 0xed, 0x05, 0x1c, 0x21, 0xfe, 0xfe, 0x16, 0x0d, - 0x08, 0x0f, 0x09, 0x33, 0xf4, 0x1f, 0x14, 0x0c, 0xfe, 0xf5, 0xeb, 0x2a, - 0xee, 0xf3, 0x12, 0x19, 0xec, 0x01, 0x06, 0xf7, 0x05, 0x22, 0x0b, 0xeb, - 0xeb, 0x06, 0xe1, 0xf5, 0x0d, 0xee, 0xfb, 0x0a, 0x31, 0xff, 0xe3, 0xea, - 0x18, 0x09, 0xe3, 0x07, 0x1a, 0xf8, 0x15, 0xfc, 0xcc, 0xf2, 0x2a, 0xe5, - 0x01, 0xea, 0x10, 0x1f, 0xd9, 0x02, 0x13, 0xf6, 0x16, 0x01, 0x0e, 0x3c, - 0x02, 0x17, 0x04, 0xf1, 0xf7, 0x02, 0x07, 0x0c, 0x02, 0x1f, 0xf4, 0xe6, - 0xf0, 0xe9, 0x05, 0xf4, 0xfd, 0xe4, 0xf7, 0xe9, 0xfc, 0xef, 0x06, 0x02, - 0x26, 0xf1, 0xf1, 0xeb, 0xe9, 0xe6, 0x30, 0x1c, 0x38, 0x0f, 0x03, 0xf1, - 0x10, 0x04, 0x30, 0x19, 0x1f, 0xfb, 0xfc, 0x05, 0xe2, 0xfe, 0x18, 0xf2, - 0x1c, 0xf2, 0xf5, 0x0e, 0xf2, 0x05, 0x1d, 0x28, 0x12, 0xf0, 0xf0, 0x0f, - 0x0a, 0x03, 0x1a, 0x1a, 0xf3, 0x08, 0x13, 0xef, 0xf5, 0x1c, 0x06, 0x00, - 0xee, 0x12, 0x1d, 0x03, 0x18, 0x06, 0x0a, 0x0e, 0xf0, 0xeb, 0xfa, 0x0d, - 0x08, 0xff, 0x06, 0x24, 0x0f, 0x03, 0x0a, 0x0f, 0x0e, 0xff, 0x08, 0x33, - 0xfc, 0x00, 0x0e, 0xfb, 0xfb, 0x05, 0x07, 0x19, 0xe8, 0xe7, 0x12, 0x11, - 0x15, 0xf7, 0x0c, 0x1a, 0xf6, 0x28, 0x08, 0xeb, 0xf2, 0x25, 0xee, 0x01, - 0x03, 0xec, 0xed, 0xfa, 0xf0, 0xf2, 0xef, 0xf1, 0x02, 0x23, 0xef, 0x01, - 0x41, 0xfa, 0xf4, 0xf4, 0x15, 0xf5, 0xf5, 0xf9, 0x28, 0xde, 0x20, 0xf6, - 0xc7, 0xde, 0x21, 0xe4, 0xfe, 0xec, 0x0d, 0x2c, 0xee, 0x24, 0x10, 0xf0, - 0x1d, 0x12, 0x0e, 0x2b, 0x06, 0xf8, 0xfd, 0x01, 0x08, 0xef, 0xfd, 0x0f, - 0xeb, 0xed, 0xe1, 0xdf, 0xf1, 0xe5, 0x16, 0xe3, 0x08, 0xfc, 0xf6, 0xf6, - 0xd8, 0xf0, 0x23, 0xfc, 0x2b, 0xf5, 0xff, 0xe7, 0xf4, 0xe9, 0x29, 0x09, - 0x2b, 0x0c, 0xff, 0x08, 0x0b, 0xed, 0x29, 0x14, 0x3c, 0xf5, 0xeb, 0x18, - 0xf6, 0x10, 0x22, 0xf9, 0x17, 0x23, 0x02, 0x0c, 0xf6, 0xfa, 0x2f, 0xfe, - 0x1e, 0xeb, 0xfd, 0x03, 0xf0, 0x07, 0x1c, 0x09, 0xfa, 0xe1, 0x0d, 0x0f, - 0x18, 0x03, 0xfe, 0xf0, 0xec, 0x0b, 0x10, 0x02, 0x14, 0x06, 0xef, 0xf7, - 0xea, 0x0b, 0x05, 0xfe, 0x1f, 0x06, 0x0e, 0x07, 0x00, 0xe1, 0x01, 0x01, - 0x07, 0x05, 0x09, 0xf7, 0xef, 0x15, 0xf7, 0x12, 0x05, 0x03, 0x04, 0x1d, - 0x04, 0x10, 0x12, 0x06, 0x05, 0x00, 0x08, 0x18, 0xd6, 0xf2, 0xfa, 0x07, - 0xf8, 0x12, 0x07, 0xfd, 0xdd, 0x00, 0x04, 0xfb, 0xf8, 0x09, 0xf3, 0x09, - 0xfb, 0xf0, 0xe8, 0x09, 0x27, 0xf5, 0xf8, 0x06, 0x01, 0x02, 0x0e, 0xf6, - 0x1f, 0xfa, 0x29, 0xf8, 0xd6, 0x01, 0x22, 0xf8, 0x1d, 0xe3, 0x1a, 0x39, - 0x0a, 0x0d, 0x19, 0xf5, 0x12, 0xfb, 0x1d, 0x2a, 0x03, 0xf6, 0x0c, 0xf2, - 0xfd, 0xec, 0x18, 0x13, 0xfe, 0x1a, 0xe8, 0xdd, 0x01, 0xf8, 0x30, 0x01, - 0xf8, 0xfe, 0xe4, 0xe7, 0xff, 0xeb, 0x23, 0xfa, 0x2c, 0xf0, 0xfc, 0xe7, - 0x0a, 0xf8, 0x18, 0x10, 0x23, 0x01, 0xfa, 0xe8, 0xf1, 0xfa, 0x1d, 0x0e, - 0x17, 0xe7, 0xe4, 0xf5, 0xf9, 0x0c, 0x17, 0x0c, 0x13, 0xe8, 0xe1, 0x17, - 0x19, 0x05, 0x0b, 0x0f, 0x23, 0xed, 0xff, 0xfe, 0xe0, 0x14, 0x16, 0x00, - 0x0d, 0x1c, 0x0b, 0xf5, 0xfb, 0x18, 0xee, 0xff, 0xff, 0xf3, 0x18, 0x0c, - 0x05, 0xfa, 0xf6, 0xfe, 0xfe, 0xf8, 0xf8, 0x09, 0xef, 0xf8, 0x0e, 0xf0, - 0x00, 0xf8, 0x0c, 0xf8, 0xf6, 0x07, 0x16, 0x11, 0xf8, 0xea, 0xff, 0xff, - 0x01, 0x20, 0x07, 0x08, 0xfd, 0x1c, 0xfc, 0x06, 0xed, 0x0d, 0x08, 0x15, - 0xf0, 0x25, 0x01, 0x1b, 0x00, 0x02, 0xfe, 0x01, 0x05, 0x01, 0xfd, 0xf1, - 0xe5, 0x0c, 0xe4, 0xe1, 0xf0, 0xfa, 0xee, 0x0e, 0x35, 0xee, 0x15, 0xef, - 0x0a, 0xf9, 0x01, 0xf5, 0x1f, 0x05, 0x1f, 0x0d, 0xe1, 0xf4, 0xff, 0xf5, - 0x23, 0x02, 0x18, 0x30, 0xfc, 0xf0, 0x0d, 0x04, 0x0d, 0x06, 0x29, 0x1d, - 0xf9, 0x08, 0x06, 0xe5, 0x13, 0xfd, 0x0d, 0x26, 0xef, 0x09, 0xdc, 0xf2, - 0x05, 0xdf, 0x0c, 0xf6, 0xf3, 0xd9, 0xf8, 0x08, 0xef, 0xeb, 0x0f, 0xf9, - 0x3a, 0x03, 0xff, 0xe0, 0xf7, 0xf0, 0x15, 0x12, 0x41, 0x0b, 0xf1, 0x04, - 0x04, 0xe2, 0x0e, 0x0b, 0x2c, 0x03, 0xea, 0x02, 0xfb, 0xe7, 0x08, 0xe9, - 0x22, 0xf3, 0xf2, 0x1c, 0xfa, 0xf3, 0x11, 0x04, 0x1f, 0xf5, 0x02, 0x0f, - 0x1a, 0x1f, 0x24, 0x0b, 0x06, 0x1f, 0xf3, 0x06, 0x00, 0x02, 0xe8, 0xf6, - 0xf4, 0xe8, 0x07, 0x2e, 0xfb, 0xf8, 0x10, 0x09, 0xf0, 0x0e, 0xff, 0xfe, - 0x1c, 0x14, 0x17, 0x06, 0xe2, 0xf1, 0xfa, 0x01, 0x11, 0x13, 0x12, 0x29, - 0xf1, 0x0f, 0x1f, 0xfa, 0xfd, 0xfd, 0x02, 0x07, 0x0e, 0xfb, 0x0e, 0x04, - 0x01, 0x01, 0xed, 0xfe, 0xde, 0xfd, 0x08, 0xef, 0xf6, 0x0a, 0xff, 0x0f, - 0xe7, 0xf2, 0x0f, 0x02, 0xea, 0x10, 0xf9, 0xec, 0xfd, 0x09, 0xea, 0x1f, - 0x46, 0xdd, 0xe2, 0xf7, 0x08, 0xf5, 0xf7, 0xe9, 0x33, 0xfb, 0x2f, 0xf6, - 0xb5, 0x1d, 0x15, 0xeb, 0x11, 0xf7, 0x2a, 0x2e, 0x08, 0x1d, 0xf4, 0xfb, - 0x15, 0xfa, 0x22, 0x34, 0xff, 0x06, 0xf6, 0xfd, 0xfa, 0xf9, 0x03, 0xf5, - 0xf4, 0xf4, 0xd5, 0xea, 0x01, 0x08, 0x22, 0xf1, 0xf2, 0x06, 0xd1, 0xe5, - 0x0c, 0xef, 0x12, 0x03, 0x08, 0x02, 0xf7, 0x05, 0x1b, 0x07, 0x39, 0x34, - 0x21, 0xe2, 0xe3, 0x0b, 0x0c, 0xf6, 0x29, 0xf7, 0x24, 0x0a, 0xfc, 0xff, - 0x1a, 0xfd, 0x05, 0xff, 0xff, 0x0e, 0x0a, 0x1a, 0x09, 0xfb, 0x15, 0x04, - 0x03, 0xf7, 0xfe, 0x00, 0xfc, 0xfb, 0x11, 0xfa, 0x1d, 0x0e, 0x06, 0xed, - 0xfc, 0x23, 0xd8, 0xf2, 0x04, 0xe5, 0x0f, 0x16, 0x29, 0xfe, 0xf5, 0xec, - 0xe2, 0x0e, 0xeb, 0x09, 0x1d, 0x11, 0x05, 0x11, 0xe4, 0x29, 0x12, 0x02, - 0x12, 0x19, 0x0e, 0x1a, 0xee, 0xf9, 0x05, 0x09, 0xf5, 0xfd, 0x05, 0x04, - 0xe4, 0xf1, 0x17, 0x01, 0xf2, 0xfe, 0x0b, 0xf4, 0x0d, 0x04, 0x06, 0xfe, - 0xff, 0xec, 0xe9, 0x00, 0xff, 0x03, 0x03, 0xfd, 0xf1, 0x15, 0xfc, 0xf3, - 0xff, 0xfe, 0x09, 0xee, 0x3c, 0x01, 0xec, 0x02, 0xf0, 0xf6, 0x20, 0xeb, - 0x16, 0x07, 0x32, 0xf3, 0xce, 0xf0, 0x02, 0xd4, 0x11, 0xe6, 0x28, 0x0e, - 0xe3, 0x21, 0xee, 0xce, 0x1e, 0xd9, 0x23, 0x26, 0x06, 0xfa, 0xf9, 0xf1, - 0x01, 0xe6, 0x0b, 0x07, 0xdc, 0x21, 0xbc, 0xe3, 0xef, 0xf8, 0x12, 0xfc, - 0xe6, 0xfe, 0xf5, 0xd4, 0x15, 0x0a, 0x00, 0x13, 0xfc, 0xec, 0xf3, 0xd6, - 0x1a, 0xe3, 0x21, 0x36, 0x2a, 0x03, 0xe9, 0xe3, 0xff, 0x00, 0x13, 0x1c, - 0x0e, 0x20, 0xe5, 0xf5, 0x24, 0x0b, 0x20, 0x14, 0x13, 0xf8, 0x04, 0x1b, - 0x2f, 0x0a, 0x15, 0x00, 0xf4, 0x1a, 0x11, 0x0d, 0x03, 0x18, 0x0f, 0x18, - 0x04, 0x1f, 0xfb, 0xf2, 0x1f, 0x15, 0x03, 0xfb, 0x0b, 0x17, 0xfb, 0x0b, - 0x1b, 0x1f, 0xf4, 0x07, 0xf9, 0xf9, 0xf8, 0xf4, 0x14, 0x0f, 0xf6, 0xfe, - 0xdd, 0x0b, 0xff, 0x01, 0x18, 0x04, 0x1b, 0x0a, 0xed, 0xe7, 0xf9, 0x16, - 0x02, 0x01, 0x00, 0xf7, 0xf1, 0x07, 0xf0, 0x06, 0xf8, 0x0b, 0x02, 0xf3, - 0xff, 0x20, 0xfd, 0x01, 0x04, 0xf5, 0xd9, 0xf4, 0xf4, 0xf2, 0xe8, 0xff, - 0x04, 0x00, 0xf0, 0xe2, 0xfe, 0xed, 0x1b, 0xef, 0x20, 0xfa, 0xfb, 0xf4, - 0x02, 0x18, 0x07, 0xfb, 0xef, 0xe4, 0x08, 0x0d, 0xe1, 0x0e, 0x25, 0xc6, - 0xfd, 0x0c, 0x1c, 0x0b, 0xf0, 0x01, 0x1c, 0xd4, 0x11, 0xf5, 0x1b, 0x09, - 0xfb, 0xda, 0x13, 0xe3, 0xf9, 0x10, 0x14, 0xf0, 0xf0, 0xfd, 0x1f, 0xcf, - 0xf4, 0xe4, 0xfb, 0x0e, 0x0a, 0x11, 0xed, 0xdc, 0xfc, 0xe6, 0xf7, 0xfc, - 0x13, 0xe1, 0x0b, 0xe4, 0x04, 0x11, 0xee, 0x21, 0x14, 0xe1, 0x07, 0xe4, - 0xfb, 0x08, 0x03, 0x2b, 0x27, 0xf6, 0x0d, 0x02, 0x1b, 0x09, 0x09, 0xf8, - 0x14, 0x19, 0x0f, 0x0b, 0x01, 0x10, 0x09, 0x12, 0x03, 0xf5, 0x18, 0xf3, - 0xfb, 0xf5, 0x02, 0x0e, 0x0d, 0x00, 0x07, 0xfc, 0x18, 0x25, 0x0b, 0xf0, - 0xf9, 0xe6, 0x08, 0x01, 0x24, 0x14, 0xfa, 0xed, 0xe5, 0x1f, 0x09, 0xfe, - 0x08, 0xee, 0x1a, 0x1a, 0x05, 0x00, 0xff, 0x0c, 0xfe, 0xf9, 0x11, 0x11, - 0xea, 0xfe, 0x08, 0xf9, 0xf0, 0xe4, 0x01, 0x0d, 0xf1, 0x00, 0x0b, 0xea, - 0x19, 0xea, 0xf3, 0xf8, 0x08, 0x12, 0x1c, 0x1f, 0xfb, 0xef, 0xf0, 0xf2, - 0x14, 0xe1, 0x03, 0xfa, 0xf9, 0xda, 0xe9, 0xfc, 0xf3, 0xff, 0x12, 0x04, - 0xf7, 0xfc, 0x17, 0x0f, 0xfc, 0x29, 0x03, 0xe5, 0xf2, 0xee, 0x1e, 0xfa, - 0x04, 0xed, 0x25, 0xf4, 0xe1, 0x15, 0x10, 0x1e, 0xef, 0x1c, 0x04, 0xde, - 0xe5, 0x08, 0x21, 0xfd, 0xfd, 0xea, 0x03, 0xca, 0xda, 0x26, 0x00, 0x0a, - 0xfd, 0x05, 0xf0, 0xd4, 0xe1, 0x1a, 0xe4, 0xf5, 0x07, 0xe7, 0xfa, 0xdf, - 0xd4, 0x03, 0xf0, 0x10, 0x15, 0x0c, 0xf4, 0xed, 0xe3, 0xfb, 0x0f, 0x1e, - 0x16, 0x09, 0x00, 0xec, 0xea, 0x13, 0x16, 0x0b, 0x01, 0xfb, 0xff, 0x00, - 0xfb, 0x07, 0x13, 0x08, 0xf4, 0xe4, 0x12, 0x00, 0xfb, 0xfa, 0xfc, 0x08, - 0xeb, 0x19, 0x02, 0x1c, 0xe8, 0x26, 0xf3, 0x10, 0x09, 0x0f, 0x19, 0x02, - 0xfb, 0xec, 0xf7, 0xe2, 0xfb, 0xfa, 0x11, 0xf3, 0x0b, 0x08, 0xff, 0xd9, - 0xf8, 0x12, 0x18, 0x06, 0x07, 0x22, 0xff, 0x19, 0xf5, 0x0b, 0x0a, 0x13, - 0xf2, 0xfa, 0x02, 0x21, 0xeb, 0x11, 0x17, 0x17, 0xec, 0xe1, 0x0e, 0xf7, - 0xe8, 0xd8, 0x0e, 0x01, 0xf1, 0xed, 0xed, 0xf0, 0x09, 0xf7, 0xe7, 0xfd, - 0xf0, 0xf9, 0xdb, 0xee, 0xdc, 0xfb, 0xf8, 0x0a, 0xf5, 0x0b, 0xd4, 0xd7, - 0x08, 0x06, 0x18, 0x06, 0x0c, 0x13, 0xfd, 0x09, 0x13, 0x26, 0x12, 0xf4, - 0xef, 0x00, 0xf5, 0x28, 0x18, 0xfe, 0x04, 0x0e, 0x21, 0x1a, 0x0a, 0x1e, - 0x09, 0xf0, 0x0d, 0x0f, 0xec, 0xf3, 0x17, 0x22, 0x00, 0xec, 0x0e, 0x01, - 0xe9, 0x08, 0x09, 0xf2, 0xf2, 0x08, 0xf0, 0x0b, 0xd9, 0x09, 0x14, 0xf5, - 0xf6, 0x04, 0x19, 0xf4, 0x11, 0xe9, 0xf2, 0x0d, 0x20, 0x17, 0x0a, 0x05, - 0x0c, 0x04, 0x01, 0xfd, 0xf4, 0xfb, 0x1b, 0x0c, 0xf2, 0x0b, 0xff, 0xfe, - 0x01, 0xd8, 0xfa, 0x0e, 0xf5, 0x14, 0xf9, 0x01, 0x04, 0xf8, 0xfa, 0x02, - 0xe8, 0xf9, 0xf9, 0xea, 0xf1, 0x07, 0xff, 0x1e, 0x01, 0x0b, 0xf7, 0x0a, - 0xf7, 0x0c, 0xfd, 0xec, 0xf3, 0x05, 0xf8, 0xda, 0x0b, 0x15, 0xf6, 0xee, - 0xf9, 0x10, 0xfa, 0xfe, 0x08, 0xf0, 0xe6, 0xec, 0x05, 0xff, 0x15, 0x19, - 0x1f, 0x11, 0xfc, 0x09, 0x08, 0x01, 0x06, 0xfe, 0x04, 0x08, 0xfb, 0xfb, - 0x08, 0xf4, 0xf6, 0x28, 0x10, 0xf9, 0x28, 0x0b, 0xf8, 0x0d, 0x01, 0x00, - 0xff, 0x02, 0x05, 0x08, 0xea, 0xe9, 0xf4, 0xf6, 0x01, 0xea, 0xdf, 0x1f, - 0xfe, 0x0a, 0xf9, 0xf7, 0x0c, 0x1b, 0x06, 0xed, 0xf6, 0xf2, 0x03, 0x03, - 0xfd, 0x04, 0xf5, 0x10, 0x0a, 0x0b, 0xf4, 0xf8, 0xf1, 0xe7, 0x05, 0xfe, - 0xe7, 0x0b, 0xf1, 0xec, 0xf4, 0xec, 0x06, 0xee, 0xde, 0x05, 0x1b, 0xfe, - 0x13, 0xf3, 0xd9, 0xea, 0x04, 0x10, 0x05, 0xed, 0x15, 0x02, 0x0b, 0x10, - 0xfa, 0x02, 0x05, 0x0b, 0x02, 0x07, 0xfc, 0xf5, 0x15, 0x14, 0x05, 0xf7, - 0x0c, 0xfe, 0xf6, 0xf4, 0xfa, 0x06, 0xfc, 0x13, 0xdc, 0xe4, 0x09, 0xfa, - 0x02, 0x23, 0xec, 0x06, 0x11, 0x13, 0xf8, 0xfa, 0x27, 0x28, 0x0b, 0x23, - 0xec, 0xf1, 0x09, 0x17, 0x0f, 0x13, 0xff, 0xf2, 0xfc, 0x0a, 0xf5, 0x0d, - 0x03, 0x26, 0x01, 0x0f, 0xfe, 0xf1, 0xfb, 0xe6, 0xf0, 0x02, 0xf2, 0xff, - 0x02, 0x11, 0xff, 0xfd, 0x1c, 0x02, 0x0b, 0xf6, 0x14, 0x0c, 0x0b, 0x21, - 0x28, 0xf0, 0x11, 0x05, 0x06, 0xed, 0xf9, 0x0a, 0xf2, 0xef, 0xf8, 0xf1, - 0xfe, 0x0d, 0xf9, 0xf7, 0xea, 0x00, 0x08, 0xdb, 0x02, 0x0f, 0xfe, 0x04, - 0xef, 0x20, 0x16, 0x01, 0xe8, 0xed, 0xe4, 0x22, 0xf6, 0x19, 0x00, 0x04, - 0x01, 0x13, 0xeb, 0x0d, 0xec, 0x01, 0x08, 0x05, 0x0c, 0x0e, 0xfe, 0x02, - 0x12, 0xf7, 0x27, 0xf9, 0xfd, 0x18, 0xfe, 0x24, 0xf7, 0x13, 0xed, 0x1e, - 0x09, 0xff, 0xd8, 0xf4, 0x12, 0xf8, 0x04, 0x0c, 0x1c, 0x11, 0xfd, 0x17, - 0x1d, 0x01, 0x13, 0xee, 0x11, 0xf3, 0xf8, 0x06, 0xf6, 0x16, 0xfe, 0x15, - 0x16, 0xdc, 0x1f, 0x00, 0x25, 0xee, 0xff, 0xf7, 0xf6, 0x02, 0xdd, 0x15, - 0xf1, 0x14, 0x08, 0xe8, 0xe5, 0x21, 0xea, 0xf0, 0x1a, 0x07, 0xea, 0x08, - 0xea, 0xe4, 0x1e, 0x00, 0x13, 0x17, 0xec, 0x11, 0xd6, 0x11, 0x18, 0x17, - 0x04, 0x15, 0x03, 0x3a, 0xd6, 0x02, 0x07, 0x04, 0xe6, 0xe5, 0xfe, 0x0e, - 0xff, 0xed, 0xfc, 0xfb, 0xff, 0x1c, 0x06, 0x0a, 0xfb, 0xf9, 0xea, 0x1a, - 0x21, 0xf5, 0x04, 0x06, 0x0a, 0xe3, 0x16, 0xea, 0x04, 0xe2, 0xf9, 0xf9, - 0xe6, 0xfb, 0x0f, 0xfc, 0x06, 0xfb, 0x10, 0x07, 0x07, 0x13, 0x07, 0xfc, - 0x16, 0xef, 0x07, 0xdc, 0x12, 0x1f, 0x08, 0xf4, 0xe9, 0x14, 0x06, 0xf7, - 0xf1, 0x0c, 0x01, 0x0c, 0xe6, 0x04, 0xf3, 0xf2, 0xe5, 0xf3, 0xef, 0x1d, - 0xf6, 0x20, 0x07, 0xfe, 0xf4, 0x05, 0xee, 0x10, 0xfd, 0x0e, 0x0b, 0x02, - 0x0d, 0xd8, 0x07, 0xfb, 0x26, 0x0a, 0x1c, 0x21, 0x06, 0x1f, 0xf4, 0x06, - 0x37, 0x18, 0xfa, 0x16, 0x1e, 0x24, 0xfb, 0xf0, 0x12, 0xf9, 0x02, 0x09, - 0x17, 0x16, 0xf3, 0xf9, 0x17, 0xf2, 0x02, 0x0a, 0x2d, 0xe7, 0xe3, 0x25, - 0xf0, 0xf9, 0x0f, 0xdd, 0x15, 0xe6, 0x04, 0xfc, 0xf1, 0x17, 0x0a, 0xea, - 0x24, 0x07, 0xf1, 0x11, 0x13, 0x29, 0xf4, 0xc5, 0xfb, 0x07, 0xef, 0x13, - 0x0b, 0xe1, 0xf1, 0xeb, 0xf8, 0x1b, 0x09, 0x08, 0x1f, 0x15, 0xf2, 0x05, - 0x02, 0xdd, 0x09, 0x0f, 0x16, 0x10, 0x01, 0x30, 0xf2, 0xe0, 0x27, 0xfe, - 0xf1, 0x0e, 0x0e, 0x07, 0xe6, 0x07, 0x0b, 0x18, 0xfe, 0x0f, 0x01, 0x07, - 0xf4, 0x07, 0x10, 0xe7, 0xfb, 0xf3, 0xf7, 0x0b, 0xf9, 0x15, 0x18, 0x25, - 0x0c, 0x14, 0x02, 0x08, 0x0a, 0x0f, 0x10, 0xec, 0xee, 0x1a, 0x03, 0x14, - 0x0f, 0xfa, 0x25, 0xff, 0x18, 0x0d, 0x0b, 0xea, 0x1f, 0x28, 0x10, 0x0c, - 0xe7, 0xee, 0xf7, 0xfa, 0x03, 0x15, 0x0c, 0x1d, 0x01, 0x00, 0x12, 0xee, - 0x01, 0xf1, 0xf8, 0x0b, 0xf3, 0xfd, 0x04, 0xf8, 0x02, 0x1e, 0x0e, 0xf3, - 0x02, 0x10, 0xfd, 0x07, 0x0b, 0x09, 0x03, 0x10, 0x3e, 0x08, 0x0e, 0x0c, - 0xf4, 0xe7, 0xfd, 0x1c, 0x27, 0x1a, 0xed, 0xe1, 0x08, 0xdc, 0xd9, 0xf1, - 0x1e, 0x07, 0x12, 0xf1, 0x10, 0xfb, 0xc8, 0x08, 0x0f, 0x03, 0x1d, 0xdc, - 0x23, 0x04, 0xf9, 0x0a, 0xff, 0x08, 0x0e, 0xc9, 0x39, 0x0a, 0x01, 0x07, - 0xec, 0xe0, 0x05, 0xe8, 0x14, 0xd8, 0xe1, 0xfa, 0xd6, 0xf8, 0xed, 0xdb, - 0xff, 0x1d, 0xf5, 0x17, 0x0f, 0x1c, 0xdc, 0xed, 0xff, 0xff, 0x04, 0x13, - 0xf5, 0xe7, 0xd2, 0x12, 0xdb, 0xe1, 0x13, 0x11, 0x23, 0x0e, 0xf9, 0x31, - 0xdc, 0xef, 0x07, 0x0a, 0x20, 0xf2, 0xf9, 0x13, 0xff, 0x1c, 0x2a, 0xdf, - 0xdb, 0xe7, 0x11, 0xf2, 0xfd, 0xfb, 0x28, 0x00, 0x15, 0x03, 0x02, 0x20, - 0x07, 0xf7, 0x19, 0x13, 0x13, 0xf6, 0x09, 0xfe, 0xfd, 0x20, 0x14, 0xf5, - 0xf5, 0xfc, 0x14, 0x0e, 0x17, 0xfe, 0x15, 0x04, 0xf9, 0xf6, 0x1d, 0xf6, - 0x1b, 0xe4, 0xee, 0xfd, 0x00, 0xe9, 0xee, 0xce, 0x0f, 0x20, 0x05, 0x02, - 0x0d, 0x06, 0x05, 0xf8, 0xef, 0xdf, 0x16, 0x17, 0xe6, 0xf1, 0x10, 0xf3, - 0x06, 0x04, 0xdb, 0xfb, 0xe7, 0xf8, 0x02, 0x11, 0xff, 0x0d, 0x0a, 0xfa, - 0x27, 0x0a, 0xfc, 0xe8, 0x11, 0x17, 0xf0, 0x0d, 0x0d, 0xee, 0xdf, 0xdd, - 0xf1, 0x15, 0xd6, 0xf7, 0x00, 0xef, 0x2e, 0xe6, 0x24, 0xfd, 0xd5, 0x04, - 0xf0, 0x08, 0x08, 0xed, 0x22, 0x07, 0xe1, 0x09, 0xd0, 0x0b, 0x18, 0xe6, - 0x3f, 0x0a, 0xe5, 0xe2, 0xf9, 0x08, 0x02, 0xd6, 0x13, 0x15, 0xbd, 0x00, - 0x0e, 0xf8, 0xe2, 0xca, 0xec, 0x0e, 0xe6, 0xef, 0x15, 0x11, 0xcb, 0xdf, - 0xf9, 0x03, 0x22, 0x10, 0xfb, 0xf9, 0xe5, 0x08, 0xe1, 0x11, 0x10, 0xfc, - 0xfa, 0x00, 0xf8, 0x30, 0xe5, 0x08, 0x14, 0xe8, 0x12, 0xe2, 0x04, 0x19, - 0x0b, 0xfa, 0x33, 0xf3, 0xec, 0xfe, 0xf8, 0x25, 0xf8, 0x21, 0x28, 0xef, - 0x00, 0xde, 0xff, 0x2b, 0x03, 0xfc, 0x10, 0x0c, 0xcf, 0xfd, 0x19, 0x0a, - 0x0c, 0xf2, 0xf7, 0x0c, 0xfd, 0x02, 0x1c, 0xdf, 0x26, 0x0d, 0xf0, 0x0b, - 0xce, 0x15, 0xfb, 0xec, 0x27, 0xf6, 0xf9, 0xe5, 0xe2, 0xfb, 0xfd, 0xd8, - 0x28, 0xec, 0xe9, 0xf2, 0xca, 0x09, 0x02, 0x06, 0x0c, 0xfa, 0x05, 0x01, - 0xd5, 0x0a, 0x02, 0xfb, 0x04, 0x17, 0xdd, 0xfe, 0xeb, 0xf1, 0x09, 0x10, - 0x12, 0xff, 0x00, 0xe0, 0x26, 0xf7, 0xed, 0xf4, 0x00, 0xf2, 0xfa, 0x07, - 0x02, 0xf5, 0x06, 0xe8, 0x03, 0xfd, 0xdc, 0xf2, 0xc2, 0xff, 0x0b, 0xd6, - 0x25, 0x04, 0xe9, 0xf0, 0xd9, 0x08, 0x09, 0xc5, 0x23, 0x12, 0xf6, 0x13, - 0x11, 0xf3, 0x18, 0xf0, 0x34, 0xfe, 0xfe, 0xed, 0xea, 0x02, 0x17, 0xdc, - 0x1b, 0x1b, 0xea, 0xfe, 0xea, 0xfe, 0xf2, 0xc4, 0xfd, 0x04, 0xe9, 0x0d, - 0x0d, 0x09, 0xca, 0xd4, 0xe1, 0x04, 0x1e, 0xff, 0x0f, 0xef, 0xd6, 0x0f, - 0xd5, 0xf8, 0x26, 0xd6, 0x33, 0xe8, 0xf5, 0x3b, 0xf1, 0xe8, 0x39, 0xe8, - 0x08, 0xe5, 0x01, 0x02, 0x04, 0xf6, 0x19, 0x0a, 0xd0, 0xeb, 0x0b, 0x15, - 0xf7, 0x0e, 0x23, 0xf6, 0xf4, 0xd8, 0xf4, 0x17, 0x23, 0x25, 0x14, 0x01, - 0xd7, 0xfd, 0xf9, 0x1f, 0x1b, 0x11, 0x0a, 0x18, 0xf5, 0xf5, 0x0f, 0xe0, - 0x2e, 0x01, 0xe5, 0xdb, 0xe2, 0xf2, 0x14, 0xfa, 0x2a, 0x00, 0xe2, 0xea, - 0xfd, 0x0e, 0xfc, 0xc1, 0x35, 0x08, 0xf6, 0xf9, 0xec, 0x00, 0x06, 0x00, - 0x0b, 0xf6, 0x01, 0xfe, 0xea, 0x0b, 0x08, 0x05, 0xe4, 0xea, 0xd7, 0xfd, - 0xee, 0xf3, 0x0c, 0x0c, 0x0d, 0x02, 0xfd, 0xee, 0x17, 0x10, 0x13, 0xfd, - 0x07, 0x03, 0xf8, 0x0c, 0xd4, 0xed, 0xfe, 0x07, 0xf4, 0xee, 0xf4, 0x03, - 0xc2, 0x18, 0x2c, 0xd1, 0x33, 0xd8, 0xdb, 0xfa, 0xed, 0x10, 0x1c, 0xe3, - 0x37, 0x0a, 0xea, 0xfe, 0xf6, 0xef, 0x20, 0xed, 0x32, 0xf7, 0xf5, 0xf3, - 0xca, 0xfd, 0x0a, 0xcf, 0x0d, 0x10, 0xde, 0x07, 0x18, 0x10, 0xf0, 0xd6, - 0x0c, 0x04, 0xeb, 0x1a, 0xf9, 0x08, 0xc4, 0xcb, 0xe4, 0x0b, 0x19, 0xfc, - 0x29, 0xf6, 0xec, 0x07, 0xf3, 0xed, 0x2b, 0xe9, 0xfa, 0x02, 0xec, 0x2b, - 0xf0, 0xf2, 0x2d, 0xe8, 0xed, 0x00, 0x12, 0x13, 0xed, 0x1a, 0x3d, 0xf0, - 0x05, 0x04, 0xfc, 0x13, 0x10, 0x01, 0x40, 0xf2, 0x06, 0x02, 0xf9, 0x22, - 0x24, 0xff, 0x18, 0x00, 0xeb, 0xe8, 0x14, 0xf9, 0x25, 0xe0, 0xff, 0x03, - 0xe5, 0xfd, 0x08, 0xea, 0x2e, 0x0b, 0x05, 0xe7, 0xde, 0xe4, 0xf5, 0xea, - 0x3a, 0xf4, 0xf4, 0xe7, 0xed, 0xec, 0xf8, 0xee, 0x30, 0x0a, 0xdb, 0x05, - 0xf7, 0x16, 0xff, 0xf7, 0xfa, 0x1f, 0xef, 0xe4, 0xce, 0xf8, 0x13, 0x04, - 0xf9, 0x01, 0xe1, 0x03, 0xf9, 0xf9, 0x08, 0x04, 0xfa, 0xe4, 0xe7, 0xf7, - 0x28, 0xfd, 0xfd, 0x00, 0xfc, 0xfb, 0xef, 0x0a, 0xec, 0x0c, 0x0a, 0xd2, - 0x05, 0xfb, 0xcd, 0xfb, 0x9d, 0xea, 0x1c, 0xe5, 0x25, 0xe8, 0xea, 0x0b, - 0xf0, 0xf3, 0x0d, 0xab, 0x49, 0x0e, 0xeb, 0x00, 0xe2, 0x03, 0x29, 0xe0, - 0x3d, 0x06, 0xf7, 0xf8, 0xcf, 0x0c, 0x1a, 0xd6, 0x1f, 0xef, 0xfd, 0xff, - 0xef, 0x0c, 0xdb, 0xe0, 0x20, 0x06, 0xdf, 0x1a, 0xe7, 0xfc, 0xb2, 0xd1, - 0xdf, 0x13, 0x07, 0x1f, 0x0c, 0xf7, 0xde, 0x0a, 0xdb, 0xdf, 0x1a, 0xf5, - 0x29, 0x0d, 0xeb, 0x2c, 0xcf, 0x0e, 0x26, 0xfe, 0xef, 0x04, 0xf5, 0x14, - 0x09, 0x13, 0x34, 0xff, 0xfe, 0x0e, 0x06, 0x0e, 0x10, 0xf9, 0x2a, 0x0b, - 0xe6, 0xfe, 0xf1, 0x1a, 0x36, 0x29, 0x29, 0x05, 0x05, 0xd8, 0x14, 0x12, - 0x26, 0x0b, 0x18, 0xff, 0xd7, 0xdf, 0x0f, 0xed, 0x31, 0xf7, 0xfc, 0xec, - 0x0b, 0xef, 0x0c, 0xd2, 0x30, 0xf9, 0x04, 0xfe, 0xef, 0xe4, 0xfb, 0xd1, - 0x32, 0xe5, 0xee, 0xf0, 0x0c, 0xe6, 0x13, 0xed, 0x1e, 0x0b, 0xe4, 0xe0, - 0xfa, 0xf4, 0x14, 0xf4, 0x18, 0xf7, 0xd9, 0xf6, 0xed, 0xea, 0xfc, 0x06, - 0xfc, 0xf5, 0xed, 0xeb, 0x05, 0x03, 0x1b, 0x0b, 0xff, 0x0b, 0xef, 0x01, - 0xf1, 0x16, 0x05, 0x00, 0xee, 0x0a, 0xdb, 0x10, 0xb4, 0x14, 0x0f, 0xe1, - 0x1c, 0xfd, 0xf0, 0xf8, 0xc3, 0x11, 0x17, 0xba, 0x47, 0x15, 0xe6, 0x01, - 0xea, 0xf1, 0x0c, 0x08, 0x4a, 0x15, 0xf0, 0xf7, 0xea, 0x00, 0xf5, 0xd4, - 0xf1, 0xff, 0xe0, 0x0c, 0xf4, 0x17, 0xd8, 0xea, 0x03, 0xff, 0xd5, 0x18, - 0xfb, 0x07, 0xc7, 0xc9, 0xdd, 0xf3, 0x15, 0x0d, 0x22, 0xea, 0xdb, 0x0a, - 0xd6, 0x09, 0x1d, 0xe5, 0x2d, 0x04, 0xfc, 0x35, 0xc6, 0x0e, 0x33, 0xf1, - 0xd7, 0xea, 0x01, 0x1b, 0x0e, 0x01, 0x2a, 0xff, 0xef, 0xf1, 0xf7, 0x0f, - 0xff, 0x00, 0x3b, 0xe8, 0x0a, 0xff, 0xf4, 0x0d, 0x1f, 0x04, 0x17, 0xf7, - 0xdf, 0xec, 0x12, 0x26, 0x36, 0x07, 0x0c, 0x06, 0xe7, 0xd6, 0x13, 0xe3, - 0x30, 0x09, 0x00, 0xf5, 0xe0, 0xf3, 0x11, 0xe2, 0x38, 0x0d, 0xf6, 0x05, - 0xec, 0x05, 0x00, 0xe5, 0x24, 0xef, 0xfe, 0xf8, 0x00, 0xd8, 0x18, 0xf1, - 0x26, 0x0b, 0xf2, 0xfc, 0xe0, 0xe4, 0x06, 0x0b, 0x1a, 0x05, 0xc6, 0xf6, - 0xe8, 0xde, 0xfe, 0x0c, 0x03, 0x09, 0xfe, 0xe2, 0x18, 0x1b, 0xfb, 0xf7, - 0x06, 0xf1, 0xfe, 0xf6, 0xef, 0x1b, 0x07, 0x0d, 0x01, 0x0a, 0xed, 0xf0, - 0xad, 0x1a, 0x17, 0xd6, 0x37, 0xfd, 0xd8, 0xec, 0xca, 0xf1, 0x15, 0xc4, - 0x33, 0xf1, 0xed, 0xf0, 0xe9, 0x15, 0x0d, 0xf2, 0x36, 0xde, 0xfd, 0x0e, - 0xfb, 0x10, 0x0f, 0xf6, 0xf9, 0x0c, 0xea, 0xf0, 0xe5, 0x0b, 0xee, 0xc1, - 0x10, 0xf4, 0xe8, 0x1f, 0xee, 0x00, 0xd0, 0xe4, 0xe7, 0x13, 0x07, 0x27, - 0x12, 0xea, 0xea, 0x0f, 0xea, 0xf4, 0x14, 0xee, 0xfe, 0x09, 0xfb, 0x31, - 0xdb, 0x1b, 0x1c, 0xe7, 0xef, 0xf5, 0xf7, 0x1a, 0x06, 0x01, 0x2c, 0xed, - 0xfb, 0x04, 0xfa, 0x07, 0x19, 0xec, 0x2b, 0x0d, 0xfc, 0xd8, 0xfc, 0x0f, - 0x1f, 0xfc, 0x2d, 0xf3, 0xc9, 0xda, 0x0a, 0xfe, 0x29, 0x00, 0xfa, 0x09, - 0xe8, 0xf6, 0x21, 0xf3, 0x4a, 0x1a, 0xf8, 0x00, 0xe7, 0xf0, 0x21, 0x01, - 0x22, 0xf3, 0x00, 0xe9, 0x06, 0xe3, 0x15, 0xd7, 0x3d, 0x0c, 0x07, 0xf1, - 0xf3, 0xec, 0x17, 0xdf, 0x29, 0x1b, 0xfd, 0xfe, 0xeb, 0xed, 0x17, 0xf6, - 0x23, 0x0a, 0xea, 0xee, 0xf9, 0xf3, 0x0f, 0x0c, 0xf8, 0xf5, 0xed, 0xe8, - 0x1c, 0x14, 0x07, 0x17, 0x0b, 0x0d, 0xed, 0xf7, 0xed, 0x10, 0x07, 0xd5, - 0xf2, 0x09, 0xd6, 0xf7, 0xb5, 0xf6, 0x19, 0xc9, 0x25, 0x15, 0xe8, 0xf5, - 0xc4, 0xf9, 0x2a, 0xb0, 0x39, 0x0e, 0x02, 0x11, 0xf0, 0xf7, 0x1d, 0xeb, - 0x39, 0x10, 0x02, 0x15, 0xe0, 0x08, 0x01, 0xee, 0x1c, 0x1e, 0x08, 0x04, - 0xf2, 0x02, 0xe8, 0xda, 0xfa, 0xfb, 0xe0, 0xfe, 0x05, 0x02, 0xd3, 0xca, - 0xf4, 0xec, 0x10, 0x16, 0x05, 0x0d, 0xd7, 0x09, 0xdc, 0xf6, 0x1e, 0xf8, - 0x10, 0xed, 0xf7, 0x27, 0xf5, 0x08, 0x28, 0xee, 0xec, 0xe0, 0xf8, 0x17, - 0xfb, 0x23, 0x2e, 0xf1, 0xfa, 0xf5, 0xfc, 0x1a, 0x10, 0xf7, 0x32, 0xfb, - 0xfb, 0xe8, 0xf1, 0x03, 0x24, 0xeb, 0x25, 0xf9, 0xca, 0xf1, 0xfe, 0x01, - 0x2e, 0x07, 0x18, 0x03, 0xe5, 0xea, 0x10, 0xfa, 0x3b, 0x07, 0x0f, 0x11, - 0x04, 0xf7, 0x1d, 0xf1, 0x24, 0xd9, 0x08, 0xef, 0x02, 0xdd, 0x07, 0xc8, - 0x2c, 0x0d, 0x06, 0xec, 0x17, 0xda, 0x21, 0xdf, 0x34, 0xd9, 0xfb, 0xf2, - 0xf4, 0xec, 0x0e, 0x0a, 0x0f, 0x0f, 0xdb, 0xf0, 0xfb, 0xe6, 0x0f, 0x00, - 0x04, 0xf9, 0x01, 0x05, 0x05, 0xfe, 0x08, 0xf3, 0x0e, 0xf2, 0xfb, 0x01, - 0xfd, 0x18, 0x1d, 0xf6, 0xee, 0x06, 0xcf, 0xfc, 0xae, 0x27, 0x21, 0xd2, - 0x33, 0x03, 0xe0, 0xe0, 0xc9, 0xfb, 0x3a, 0xbd, 0x4d, 0x04, 0xe8, 0xf5, - 0xe6, 0xeb, 0x19, 0xf2, 0x4b, 0x1d, 0xfc, 0xf7, 0xd9, 0xff, 0xfe, 0xea, - 0x0f, 0x04, 0x0e, 0x00, 0xed, 0x19, 0xe9, 0xe9, 0xff, 0x11, 0xef, 0x14, - 0x01, 0x17, 0xbc, 0xb5, 0xef, 0x0c, 0x22, 0x27, 0x0f, 0x01, 0xd4, 0x03, - 0xce, 0x01, 0x25, 0xff, 0xf9, 0xf0, 0x0a, 0x1c, 0xe5, 0x0f, 0x1c, 0xee, - 0xf4, 0xf1, 0xf4, 0x0c, 0x00, 0x08, 0x1c, 0xf4, 0xd5, 0xf1, 0xfc, 0x1f, - 0x11, 0x00, 0x18, 0x03, 0xf7, 0xe4, 0xff, 0x07, 0x09, 0x1a, 0x18, 0xff, - 0xea, 0xec, 0xfd, 0x13, 0x2b, 0xf8, 0x0c, 0xfa, 0xdf, 0xf6, 0x11, 0xda, - 0x2a, 0xdc, 0xfc, 0xff, 0xff, 0xec, 0x12, 0xe1, 0x37, 0xfd, 0xeb, 0xfe, - 0xea, 0xd1, 0x12, 0xfa, 0x28, 0x1a, 0x0d, 0xf0, 0xf7, 0xe0, 0x0c, 0xeb, - 0x35, 0x14, 0xeb, 0x00, 0xeb, 0xe7, 0x1b, 0xfc, 0x09, 0x00, 0xf2, 0x04, - 0xf9, 0xe5, 0x1a, 0x0e, 0x08, 0x12, 0xf8, 0xfe, 0x09, 0x0f, 0x0d, 0xea, - 0x03, 0xe1, 0xfe, 0xf2, 0xec, 0x0d, 0x02, 0xdb, 0x04, 0x1d, 0xd4, 0x01, - 0xca, 0x13, 0x29, 0xca, 0x28, 0x04, 0xe2, 0xf1, 0xdb, 0x0b, 0x2c, 0xcd, - 0x44, 0x00, 0xe7, 0xf4, 0xd0, 0x12, 0x15, 0xff, 0x42, 0x11, 0x05, 0xfd, - 0xd9, 0x11, 0x1c, 0xf4, 0x15, 0xec, 0xf2, 0x24, 0xd6, 0x1d, 0xec, 0xda, - 0xf5, 0xec, 0xe5, 0x22, 0xf2, 0x0b, 0xbd, 0xd0, 0xeb, 0x05, 0x07, 0x1b, - 0x01, 0xed, 0xf5, 0x02, 0xcf, 0x08, 0x15, 0xfd, 0x1c, 0xe5, 0x04, 0x19, - 0xc7, 0x25, 0x22, 0xf3, 0xde, 0xfb, 0xfb, 0x20, 0xf6, 0xeb, 0x25, 0xfe, - 0xf5, 0x08, 0xf5, 0x17, 0x0e, 0x04, 0x1c, 0xf9, 0xee, 0xec, 0xe1, 0x06, - 0x12, 0xff, 0x2a, 0x13, 0xed, 0xfe, 0x05, 0x18, 0x25, 0x20, 0x09, 0x13, - 0xea, 0xd7, 0x05, 0x06, 0x33, 0x25, 0xff, 0x0a, 0xf0, 0xea, 0x17, 0xe1, - 0x30, 0xfa, 0x0d, 0x0a, 0x04, 0x00, 0x0e, 0xe9, 0x16, 0x20, 0x0d, 0x02, - 0xe8, 0xed, 0x07, 0xe8, 0x3c, 0xf1, 0xd9, 0xfa, 0xe1, 0xed, 0x18, 0xfc, - 0xf0, 0x09, 0xe3, 0x05, 0xfe, 0xd1, 0x0b, 0x0e, 0xf5, 0x25, 0xfd, 0xfb, - 0x30, 0x1e, 0x08, 0xfc, 0x0c, 0x21, 0xea, 0xfc, 0xe5, 0x1e, 0x16, 0xf5, - 0xf4, 0xfc, 0xf0, 0xea, 0xc4, 0x21, 0x27, 0xe9, 0x2b, 0xdb, 0xdb, 0xec, - 0xe5, 0xfe, 0x37, 0xe2, 0x46, 0x25, 0xfa, 0xec, 0xe4, 0xf3, 0x19, 0xf2, - 0x4c, 0x06, 0x00, 0xfb, 0xeb, 0x10, 0x10, 0xf7, 0x2a, 0xf8, 0xe9, 0x18, - 0xee, 0x21, 0xe8, 0xd5, 0xf4, 0x0a, 0xed, 0x24, 0xfe, 0xf9, 0xb2, 0xbc, - 0xf3, 0x1d, 0x00, 0x2f, 0x07, 0x08, 0xe1, 0xf1, 0xed, 0x27, 0x27, 0xfe, - 0x22, 0xfd, 0x02, 0x20, 0xd8, 0x05, 0x25, 0xec, 0xf1, 0xff, 0x0a, 0x0f, - 0xe6, 0xfe, 0x46, 0xfd, 0xe1, 0xca, 0xf7, 0x22, 0x03, 0x08, 0x21, 0xf5, - 0x0f, 0xf7, 0xfb, 0x0c, 0xfb, 0x14, 0x2d, 0x03, 0xe5, 0xe4, 0x09, 0x0b, - 0x1a, 0xe6, 0x01, 0x28, 0xe9, 0xd6, 0x0b, 0xf7, 0x2c, 0xfb, 0x11, 0xee, - 0x0b, 0xed, 0x17, 0xf0, 0x3c, 0xf5, 0x08, 0xfa, 0xf8, 0xcd, 0x17, 0xfa, - 0x39, 0xea, 0x11, 0xf5, 0xed, 0xee, 0x0a, 0xec, 0x41, 0xd6, 0xe7, 0xf9, - 0xfa, 0xc8, 0x15, 0xf7, 0x08, 0x0e, 0xe3, 0x08, 0xe8, 0xec, 0xfd, 0xfe, - 0xf1, 0x00, 0xe9, 0xf4, 0x09, 0x26, 0x02, 0x16, 0xf0, 0x01, 0xef, 0x01, - 0xff, 0x03, 0x22, 0xdb, 0xfc, 0xf5, 0xde, 0xe5, 0xc4, 0x01, 0x28, 0xd4, - 0x38, 0x08, 0xd0, 0xec, 0xd5, 0x04, 0x2f, 0xce, 0x4e, 0xeb, 0xf9, 0xe7, - 0xdf, 0xf0, 0x1b, 0xf5, 0x42, 0xf1, 0xf6, 0x09, 0xd5, 0x0a, 0x0d, 0x08, - 0x04, 0x05, 0xe2, 0x0e, 0xd7, 0x19, 0xdb, 0xda, 0xe1, 0x25, 0xde, 0x15, - 0x0e, 0x14, 0xbd, 0xb0, 0xe3, 0xe5, 0x24, 0x1e, 0xf8, 0x0d, 0xd8, 0xf7, - 0xf2, 0xff, 0x18, 0xf5, 0x07, 0xf0, 0x02, 0x25, 0xd5, 0x1e, 0x2e, 0xdf, - 0xe7, 0x05, 0xef, 0x11, 0xe8, 0xe7, 0x47, 0xf4, 0xe1, 0xde, 0x09, 0x36, - 0x1a, 0x11, 0x11, 0xf5, 0x12, 0xe5, 0xe7, 0x18, 0x01, 0x17, 0x2a, 0x03, - 0x05, 0xea, 0x09, 0x0b, 0x12, 0x04, 0x17, 0xf0, 0xee, 0xd7, 0x11, 0xed, - 0x3c, 0x17, 0x16, 0xff, 0x02, 0xdc, 0x21, 0xf3, 0x2e, 0xe5, 0x13, 0xef, - 0xec, 0xe2, 0x10, 0xd0, 0x2e, 0xee, 0xff, 0x01, 0xe0, 0xe5, 0x0b, 0xda, - 0x1f, 0xf8, 0xf6, 0xfb, 0x07, 0xdb, 0x05, 0xf6, 0x0c, 0xf3, 0xf0, 0x10, - 0xf9, 0xf5, 0xf2, 0x0d, 0x10, 0xf7, 0xf6, 0xff, 0x2b, 0x0d, 0x06, 0x1e, - 0xf3, 0x0c, 0xe9, 0x01, 0xf2, 0x23, 0xfe, 0xe9, 0xdd, 0x12, 0xdd, 0xf7, - 0xbb, 0x22, 0x1b, 0xd4, 0x38, 0x29, 0xd4, 0xcf, 0xf5, 0xf9, 0x27, 0xdd, - 0x47, 0x00, 0xf2, 0xe5, 0x09, 0xfc, 0x0e, 0xf9, 0x34, 0x0a, 0x02, 0xfd, - 0xec, 0x25, 0x1d, 0x03, 0x15, 0x09, 0xf1, 0x1b, 0xd0, 0x17, 0xda, 0xda, - 0xe7, 0x07, 0xe3, 0x15, 0xf1, 0x02, 0xb9, 0xce, 0xe6, 0x0c, 0x10, 0x31, - 0xfe, 0xf7, 0xd9, 0xfa, 0xed, 0xed, 0x33, 0xf4, 0x19, 0xe7, 0xfe, 0x3f, - 0xe5, 0x06, 0x2e, 0xe6, 0xf2, 0xdc, 0xf5, 0x18, 0xe6, 0x01, 0x2f, 0xee, - 0xe7, 0xe4, 0xfe, 0x2c, 0x03, 0xf7, 0x20, 0x05, 0x07, 0xe2, 0x06, 0x1e, - 0x05, 0xed, 0x2f, 0x03, 0xea, 0xf8, 0x0e, 0x0c, 0x1f, 0xff, 0x20, 0xf4, - 0xe8, 0xe1, 0x1c, 0xec, 0x22, 0x1e, 0x05, 0xfd, 0xf5, 0xca, 0x30, 0xe9, - 0x30, 0xe4, 0x14, 0xff, 0xf2, 0xdc, 0x17, 0xf8, 0x26, 0xe1, 0x0b, 0x01, - 0x11, 0xc2, 0x02, 0xf1, 0x36, 0x10, 0x02, 0x05, 0xed, 0xf1, 0x15, 0xfa, - 0x17, 0xf8, 0xf7, 0xf1, 0xe8, 0xd3, 0xfd, 0x08, 0xfb, 0x27, 0xf5, 0xf5, - 0x13, 0x06, 0x0b, 0xf0, 0x01, 0xf9, 0xd7, 0x0e, 0xec, 0x12, 0xfe, 0xfd, - 0xee, 0x25, 0xd8, 0xf1, 0xb2, 0x09, 0x1c, 0xbf, 0x34, 0xea, 0xc8, 0xea, - 0xdb, 0x0e, 0x24, 0xde, 0x47, 0xfe, 0xdc, 0xe0, 0xf3, 0x06, 0x20, 0xfe, - 0x2b, 0xf6, 0x18, 0x14, 0xcd, 0x19, 0x16, 0xfe, 0x1a, 0x15, 0xf8, 0x11, - 0xf4, 0x22, 0xd7, 0xcc, 0xdd, 0x15, 0xdc, 0x14, 0xf9, 0x02, 0xbb, 0xca, - 0xe3, 0xf3, 0x0d, 0x1e, 0x2a, 0x0c, 0xe4, 0x05, 0xe0, 0x18, 0x2a, 0x07, - 0x20, 0xed, 0xf6, 0x17, 0xcf, 0xf4, 0x2a, 0xd6, 0xfb, 0xce, 0x03, 0x37, - 0xe2, 0xfd, 0x1d, 0xfb, 0xe5, 0xe0, 0x05, 0x29, 0xef, 0x16, 0x23, 0xf7, - 0x01, 0xf4, 0x0c, 0x14, 0xff, 0xee, 0x31, 0xf9, 0x12, 0xf9, 0x14, 0xf6, - 0x0c, 0xf6, 0x0b, 0x0f, 0xd8, 0xdc, 0xfe, 0x0f, 0x37, 0xfa, 0x01, 0x09, - 0x04, 0xd1, 0x0b, 0x0c, 0x29, 0xf3, 0x0a, 0xf9, 0xed, 0xc2, 0x18, 0xf4, - 0x25, 0x18, 0x0f, 0x08, 0xf7, 0xed, 0x1f, 0xf7, 0x4f, 0x0e, 0xf0, 0xe4, - 0x00, 0xeb, 0xfa, 0x1a, 0x0c, 0x03, 0xe9, 0xfc, 0xf0, 0xcc, 0x06, 0x05, - 0xf2, 0x12, 0x04, 0xe2, 0x16, 0x0a, 0x0a, 0xf3, 0x0b, 0xf3, 0xdc, 0xfd, - 0x10, 0xfc, 0x0e, 0xe2, 0xe0, 0xfe, 0xf0, 0xff, 0xb1, 0x06, 0x1b, 0xe4, - 0x30, 0x13, 0xc6, 0xc3, 0xfa, 0x0c, 0x1e, 0xd9, 0x57, 0x11, 0xe1, 0xd6, - 0xfa, 0xee, 0x1d, 0xf7, 0x37, 0xea, 0xf0, 0x05, 0xef, 0x24, 0x1e, 0xf1, - 0x10, 0xe8, 0xeb, 0x19, 0xd1, 0x18, 0xf5, 0xc8, 0xf8, 0xec, 0xf5, 0x1f, - 0xf2, 0xff, 0xb3, 0xd2, 0xe6, 0x0e, 0x06, 0x2e, 0x07, 0x17, 0xe0, 0xf5, - 0x02, 0xf9, 0x20, 0x07, 0x16, 0x08, 0xe8, 0x1d, 0xd3, 0x08, 0x34, 0xda, - 0xf2, 0xce, 0xfb, 0x1f, 0xe1, 0x00, 0x2d, 0xdb, 0xdf, 0xcc, 0x05, 0xfb, - 0xf7, 0x00, 0x33, 0xf9, 0x0b, 0x01, 0x13, 0x28, 0xf8, 0x07, 0x24, 0xf8, - 0x0f, 0x03, 0x0d, 0xe9, 0x06, 0xfe, 0x18, 0xf9, 0xed, 0xf5, 0x0c, 0xe0, - 0x2c, 0x0e, 0xf9, 0x06, 0xfb, 0xce, 0x27, 0xe8, 0x29, 0x19, 0xf9, 0x01, - 0x0e, 0xc8, 0x25, 0xed, 0x30, 0xeb, 0x01, 0xfe, 0x10, 0xdc, 0x1e, 0x00, - 0x1e, 0x10, 0xf9, 0x00, 0xfc, 0xc8, 0x0e, 0x04, 0x13, 0x04, 0xf0, 0x02, - 0xfe, 0xd8, 0x0f, 0x1b, 0xf7, 0xe1, 0xf8, 0xde, 0x12, 0xe2, 0xef, 0x0a, - 0x02, 0xe0, 0xdd, 0xf1, 0x0e, 0x2a, 0x25, 0x15, 0xeb, 0x02, 0xf4, 0xf0, - 0xbf, 0xfc, 0x27, 0xdc, 0x42, 0x0f, 0xe9, 0xbf, 0xe8, 0x20, 0x33, 0xc9, - 0x3f, 0x10, 0xec, 0xf3, 0x03, 0x02, 0x2c, 0x04, 0x38, 0x06, 0x0a, 0xf9, - 0xe5, 0x1c, 0x3f, 0x0f, 0x0c, 0x25, 0xe2, 0x06, 0xe6, 0x03, 0xf4, 0xd7, - 0xfe, 0xf6, 0xe7, 0x2f, 0xfa, 0x03, 0xb6, 0xcb, 0xf1, 0x11, 0x0a, 0x2c, - 0xfc, 0x1e, 0xe0, 0xff, 0xc2, 0xdd, 0x1d, 0xf3, 0x10, 0xfa, 0x07, 0x1e, - 0xf6, 0x20, 0x07, 0xe6, 0xf1, 0x0a, 0xe8, 0x27, 0xf1, 0xf5, 0x24, 0xed, - 0xfd, 0xee, 0x13, 0x15, 0xe9, 0xe2, 0x22, 0xe5, 0xf9, 0xdd, 0x1d, 0x32, - 0x04, 0xfa, 0x25, 0x00, 0xee, 0xfd, 0x0b, 0x0e, 0x23, 0xfa, 0x0f, 0x01, - 0xf8, 0xf0, 0x15, 0xe4, 0x21, 0xf7, 0x10, 0xf9, 0xe7, 0xc3, 0x19, 0xe1, - 0x34, 0xff, 0xed, 0xf4, 0xef, 0xd7, 0x21, 0x01, 0x31, 0xee, 0xf7, 0xf2, - 0xf3, 0xe5, 0x0a, 0xee, 0x2e, 0x1e, 0xf2, 0x0c, 0x07, 0xc2, 0x08, 0x0a, - 0x14, 0x14, 0x00, 0xfc, 0xf9, 0xd6, 0xfb, 0xf8, 0xe5, 0xf1, 0xfa, 0xe0, - 0x15, 0x21, 0xef, 0x06, 0xf9, 0x00, 0xf5, 0xf4, 0x0b, 0x0b, 0x18, 0x02, - 0xf5, 0x04, 0xdb, 0xfd, 0xcc, 0x32, 0x1d, 0xc9, 0x3b, 0x12, 0xd9, 0xaf, - 0xcf, 0x0f, 0x26, 0xde, 0x35, 0xe4, 0xdb, 0xd3, 0x22, 0x11, 0x2e, 0xfb, - 0x36, 0xfa, 0xfd, 0x02, 0xeb, 0x0f, 0x37, 0x0b, 0x14, 0x1d, 0xdd, 0x18, - 0xe0, 0x10, 0xe0, 0xdf, 0x14, 0xf9, 0xf0, 0x19, 0xf7, 0xfb, 0xc4, 0xe5, - 0xe7, 0x11, 0x01, 0x31, 0x1a, 0xf7, 0xd8, 0xf1, 0xe9, 0xf3, 0x21, 0xf9, - 0xfe, 0xe4, 0xe9, 0x02, 0xd0, 0x06, 0x14, 0xd7, 0xfc, 0xec, 0x06, 0x10, - 0xfc, 0xf0, 0x1c, 0xe7, 0xec, 0xe3, 0x03, 0x21, 0xe4, 0x04, 0x12, 0xf0, - 0xf3, 0xed, 0x16, 0x36, 0x02, 0xfd, 0x13, 0x11, 0xdf, 0xeb, 0x19, 0x07, - 0x10, 0x0c, 0xf9, 0x08, 0xf8, 0xf4, 0x1d, 0xfd, 0x1d, 0x16, 0xf4, 0x0a, - 0x08, 0xec, 0x0c, 0x09, 0x3d, 0xe0, 0x0b, 0xee, 0x10, 0xd1, 0x1e, 0x15, - 0x43, 0xeb, 0xfa, 0xf3, 0x05, 0xc7, 0xf2, 0xd9, 0x25, 0x20, 0xee, 0xe9, - 0xfd, 0xce, 0x16, 0x0c, 0x27, 0x06, 0x0a, 0x06, 0xf9, 0xd6, 0x0b, 0x05, - 0xe8, 0x02, 0xe8, 0xd2, 0x10, 0x01, 0xf2, 0x15, 0x09, 0x04, 0xd3, 0xe2, - 0xfe, 0xf0, 0x32, 0x1b, 0xd9, 0xf5, 0xea, 0xcc, 0xcb, 0x10, 0x1c, 0xf1, - 0x3b, 0x02, 0xd4, 0xbf, 0xca, 0xfe, 0x12, 0xdb, 0x3b, 0xf8, 0xd5, 0xe7, - 0x13, 0x10, 0x1a, 0xf4, 0x38, 0x09, 0x08, 0xee, 0xf4, 0xf4, 0x3c, 0xf7, - 0x15, 0x04, 0xe4, 0xfa, 0xf4, 0x04, 0xee, 0xf4, 0x07, 0xf8, 0xe9, 0x3b, - 0xe2, 0x1f, 0xd5, 0xed, 0xe6, 0xfd, 0x18, 0x49, 0x21, 0x06, 0xd8, 0xde, - 0xfa, 0xf0, 0x1b, 0xfe, 0xde, 0x08, 0xf7, 0x14, 0xc7, 0x0f, 0x1d, 0xcf, - 0x00, 0xea, 0xff, 0x1b, 0xd5, 0x08, 0x0d, 0xd9, 0xf1, 0xf4, 0x16, 0x23, - 0xd8, 0x0c, 0x29, 0xdc, 0xf1, 0xf2, 0x21, 0x49, 0xfc, 0xe2, 0x08, 0x01, - 0xf0, 0xf8, 0x17, 0xf9, 0x0f, 0xf5, 0xfa, 0x1a, 0xef, 0xec, 0x09, 0xeb, - 0x1a, 0x0c, 0x17, 0x09, 0x11, 0xe9, 0x1a, 0xf7, 0x29, 0xf9, 0xfd, 0x07, - 0x01, 0xdd, 0x0a, 0xec, 0x22, 0x15, 0x03, 0xfd, 0xe2, 0xd2, 0x15, 0xec, - 0x4d, 0xd7, 0xfc, 0xf6, 0x0b, 0xcc, 0x0e, 0x04, 0x03, 0xf7, 0xfb, 0xfb, - 0x0d, 0xeb, 0x19, 0x07, 0xf4, 0xf4, 0xe5, 0xde, 0x22, 0x07, 0xea, 0xf7, - 0xeb, 0x23, 0xc8, 0xee, 0x03, 0x04, 0x0f, 0x19, 0xc3, 0xf8, 0x06, 0xd0, - 0xf7, 0xfe, 0x0e, 0xe7, 0x0a, 0x02, 0xb0, 0xb8, 0x00, 0xfb, 0x18, 0x0f, - 0x22, 0xf7, 0xe9, 0xdc, 0x09, 0x15, 0x23, 0x0d, 0x22, 0x13, 0xe2, 0xed, - 0xeb, 0x18, 0x20, 0x0b, 0x12, 0xfc, 0x02, 0xf1, 0xdb, 0x0e, 0xe1, 0x04, - 0xdb, 0x0f, 0xf3, 0x1a, 0x06, 0xef, 0xdb, 0xdc, 0xdd, 0xfb, 0x00, 0x2a, - 0x20, 0xfd, 0xc1, 0xe3, 0xef, 0x01, 0x14, 0xf2, 0x14, 0x00, 0x0f, 0x28, - 0xd9, 0xff, 0xf4, 0xdc, 0x09, 0xfa, 0x1c, 0x08, 0xd1, 0x03, 0x0a, 0xf4, - 0xe4, 0xdb, 0x20, 0x30, 0xea, 0x06, 0x11, 0xe2, 0x26, 0xf7, 0x16, 0x22, - 0xf9, 0x07, 0x02, 0xf5, 0xf6, 0xfb, 0x1d, 0x0c, 0x16, 0x0a, 0x07, 0xf9, - 0x11, 0xde, 0x20, 0x08, 0x19, 0x04, 0x0a, 0x0b, 0x0c, 0xf7, 0xf4, 0xfc, - 0x41, 0xf1, 0xf8, 0x16, 0x09, 0xdc, 0x0e, 0x1a, 0x2b, 0x1f, 0xe7, 0xfe, - 0x01, 0xe0, 0xfd, 0xe2, 0x34, 0xec, 0xf3, 0xf5, 0x03, 0xec, 0x0b, 0xfb, - 0x04, 0xf6, 0xdd, 0xfd, 0x06, 0x14, 0x0d, 0xfa, 0xfc, 0xf1, 0x0a, 0xca, - 0x01, 0xec, 0x0e, 0x0e, 0xec, 0xd7, 0xee, 0xd4, 0xf2, 0xfe, 0x16, 0xfa, - 0xbd, 0x0d, 0xef, 0xcb, 0xc4, 0xee, 0xed, 0x13, 0x10, 0x19, 0xf8, 0xb1, - 0xf1, 0xe3, 0x00, 0xf3, 0x0c, 0xf6, 0xde, 0xc6, 0x15, 0x27, 0x14, 0x29, - 0x15, 0xf6, 0xf4, 0xf5, 0xe7, 0x00, 0x0b, 0x2f, 0x0c, 0xef, 0x03, 0x0f, - 0xfd, 0x08, 0xf3, 0xf9, 0xf9, 0x05, 0x0d, 0x34, 0x15, 0x1b, 0xc8, 0xd1, - 0xf2, 0x1b, 0x0a, 0x22, 0x12, 0x11, 0xe9, 0xf4, 0xe1, 0x2a, 0x20, 0x03, - 0xf2, 0xf8, 0x14, 0x0b, 0xd0, 0xf4, 0x0e, 0xbf, 0xc6, 0xd8, 0x04, 0x05, - 0xf8, 0xf4, 0x04, 0xc9, 0xea, 0xfd, 0xf7, 0xfa, 0xe3, 0x1b, 0x11, 0xde, - 0x0c, 0x11, 0x25, 0x29, 0xe5, 0x02, 0xef, 0xef, 0x02, 0xfa, 0x1a, 0x21, - 0x19, 0x09, 0x08, 0x05, 0x04, 0xe5, 0xfa, 0xed, 0x2d, 0x26, 0xfa, 0x17, - 0xf6, 0xe8, 0x12, 0x12, 0x31, 0xfc, 0x0d, 0x00, 0xf7, 0xeb, 0x19, 0xf1, - 0x2a, 0x06, 0x14, 0xec, 0x08, 0xd3, 0x21, 0x07, 0x32, 0xe3, 0x02, 0x0b, - 0xfb, 0xd8, 0x27, 0x07, 0x05, 0xe6, 0xf5, 0xf5, 0x0a, 0xf7, 0x2c, 0x2a, - 0xd8, 0x1b, 0xda, 0xf7, 0xea, 0xf6, 0xf9, 0x0e, 0xf8, 0x0c, 0x05, 0xc7, - 0xd6, 0x06, 0x12, 0xe3, 0xe1, 0xe1, 0xd8, 0xdb, 0xc6, 0xf8, 0xe6, 0xfa, - 0x0c, 0x07, 0xf8, 0xe7, 0xe1, 0x0f, 0x00, 0xf3, 0x03, 0xf0, 0xde, 0xcc, - 0xf5, 0xfc, 0xef, 0x1e, 0x16, 0x13, 0xfb, 0xf4, 0x03, 0xe9, 0xfc, 0xfa, - 0x15, 0xe8, 0x15, 0x09, 0xf1, 0x0d, 0xdb, 0x0a, 0xe8, 0x09, 0xf5, 0x1a, - 0x04, 0xf8, 0xd8, 0xd4, 0x04, 0xee, 0x25, 0x29, 0x09, 0xfe, 0xf3, 0xf5, - 0xd4, 0x0a, 0x15, 0x19, 0xf5, 0x12, 0xfe, 0x04, 0xe7, 0x01, 0xeb, 0xde, - 0xbe, 0xfe, 0x09, 0x12, 0xdf, 0x13, 0xe0, 0xef, 0xc7, 0xff, 0x03, 0x08, - 0xfe, 0xf2, 0x19, 0xe0, 0xe4, 0x0c, 0x22, 0x1e, 0x05, 0xf7, 0x16, 0xf2, - 0xf9, 0x06, 0x17, 0xf6, 0x0c, 0x1e, 0x23, 0x08, 0xfe, 0xdc, 0xfd, 0x17, - 0x11, 0xdf, 0xf5, 0x0f, 0x01, 0x03, 0x08, 0xee, 0x1b, 0x02, 0x0b, 0x1b, - 0x0c, 0x16, 0x1a, 0x00, 0x0f, 0x26, 0x14, 0xf8, 0xf4, 0xf3, 0x19, 0x16, - 0x22, 0x0a, 0xd0, 0xf9, 0xf1, 0x05, 0x2b, 0x1e, 0x1e, 0xef, 0xf5, 0x06, - 0x05, 0xe7, 0x3f, 0x2a, 0x06, 0xf0, 0x15, 0x14, 0x13, 0x20, 0x1b, 0xde, - 0x10, 0x05, 0x33, 0xf8, 0x08, 0x04, 0x17, 0x0d, 0x0f, 0xf6, 0x01, 0xed, - 0x28, 0x25, 0x1c, 0x13, 0xfb, 0xea, 0xfb, 0xf3, 0x1c, 0xf9, 0x1f, 0xf0, - 0xfb, 0x17, 0xf8, 0xff, 0x10, 0xf7, 0x0b, 0x24, 0x04, 0x00, 0x0d, 0x0c, - 0xf7, 0x0a, 0x16, 0x13, 0xf8, 0x05, 0x0a, 0xf1, 0xf5, 0xee, 0xf8, 0x14, - 0x0e, 0xed, 0xfe, 0x1b, 0xfe, 0x17, 0x13, 0x10, 0x12, 0x21, 0x1c, 0xfa, - 0xe5, 0x0b, 0x08, 0x0c, 0x10, 0x1b, 0x03, 0xef, 0x0d, 0x05, 0x0a, 0xf0, - 0x04, 0x11, 0x15, 0x00, 0xfd, 0xef, 0x02, 0x18, 0xf4, 0x09, 0xfa, 0xf6, - 0x02, 0xf7, 0xfd, 0x13, 0xef, 0x13, 0xf7, 0xf9, 0x17, 0x0f, 0xfa, 0xf8, - 0x15, 0xff, 0x04, 0xef, 0xf0, 0x15, 0xfa, 0xfe, 0xf0, 0xf4, 0xed, 0x06, - 0x1c, 0x02, 0xfb, 0xf7, 0x05, 0xfb, 0x0c, 0xef, 0xf4, 0xf0, 0xf6, 0xec, - 0x17, 0xf3, 0xf5, 0xef, 0x02, 0xfd, 0xe5, 0x21, 0x0c, 0xf1, 0x1e, 0x08, - 0xf1, 0x0b, 0xf7, 0x09, 0x1d, 0xf2, 0xf9, 0xf2, 0xfb, 0x0e, 0xed, 0xf8, - 0xfa, 0xdd, 0xf0, 0xfd, 0xdb, 0x1a, 0xf4, 0xef, 0x0c, 0x06, 0x0f, 0xdf, - 0xe2, 0x06, 0x06, 0xee, 0xfa, 0x0d, 0x17, 0xfc, 0xf9, 0x15, 0x1a, 0xe4, - 0xfb, 0x0c, 0x1a, 0xfc, 0x1b, 0x04, 0x07, 0x20, 0xff, 0x09, 0x0f, 0xf2, - 0x26, 0x19, 0x1f, 0x0d, 0x02, 0x16, 0x03, 0x03, 0xfd, 0x05, 0x01, 0x1b, - 0x0a, 0x11, 0xfa, 0x21, 0x13, 0xfb, 0x0c, 0x05, 0xf3, 0xdd, 0xe4, 0xdc, - 0x22, 0x1b, 0x15, 0x14, 0x0e, 0xe8, 0x00, 0xf7, 0xf8, 0xf4, 0x0b, 0x0b, - 0xfd, 0x21, 0xe3, 0x0f, 0xe1, 0x22, 0x01, 0x21, 0x0b, 0x1f, 0x09, 0x10, - 0xe2, 0x18, 0x11, 0x0e, 0xed, 0x01, 0x14, 0x12, 0xfd, 0x11, 0xf6, 0xe9, - 0x20, 0xe1, 0xf5, 0x1b, 0x27, 0x22, 0xfa, 0xf7, 0xfe, 0x13, 0xf6, 0xdc, - 0x06, 0x0d, 0xf4, 0x05, 0x20, 0x0d, 0x0b, 0xe4, 0x15, 0x28, 0x0c, 0x00, - 0xf5, 0x07, 0x0c, 0x0a, 0x06, 0x0e, 0xf3, 0xfb, 0xfe, 0x04, 0x08, 0xf4, - 0xef, 0x03, 0xe4, 0xeb, 0x06, 0xee, 0xed, 0xdb, 0xeb, 0x1d, 0xf4, 0xfa, - 0x0c, 0xfc, 0xfe, 0x11, 0xf7, 0xf8, 0xf5, 0xef, 0xe7, 0xfc, 0x1b, 0xdc, - 0x17, 0xfd, 0xfe, 0x00, 0xea, 0xf4, 0xf1, 0xf7, 0x0f, 0x21, 0x04, 0xfd, - 0x0d, 0x0c, 0x0a, 0x14, 0xfd, 0x19, 0x09, 0x01, 0xfd, 0xe2, 0x0c, 0x0c, - 0xe0, 0x25, 0xfb, 0xff, 0x0d, 0x18, 0xf6, 0x0b, 0x19, 0x12, 0x10, 0x09, - 0x0b, 0x06, 0x12, 0x1c, 0x10, 0x03, 0x13, 0x0a, 0x05, 0x0f, 0x09, 0x01, - 0x21, 0xe4, 0x01, 0x26, 0xf9, 0xf4, 0x05, 0x19, 0x00, 0xff, 0x0b, 0xff, - 0x16, 0x09, 0xe7, 0xee, 0xed, 0xf5, 0x0f, 0x2f, 0xee, 0x19, 0x03, 0x0a, - 0x10, 0xee, 0xf7, 0x2e, 0xf4, 0x08, 0xf7, 0xee, 0x07, 0x00, 0xfc, 0x0e, - 0xf0, 0x12, 0x08, 0x05, 0xed, 0x11, 0xfc, 0xfb, 0xf7, 0x25, 0xf1, 0x05, - 0x0c, 0xf9, 0xfa, 0x03, 0x0c, 0x16, 0x04, 0x25, 0xf8, 0xe7, 0xfc, 0x11, - 0x0d, 0x19, 0xd8, 0xfa, 0x0b, 0x06, 0xfd, 0xef, 0x13, 0xf6, 0xff, 0x0e, - 0xf9, 0x04, 0xf1, 0xdc, 0xfb, 0xe1, 0xf6, 0x0b, 0x15, 0x07, 0xf7, 0x02, - 0x0e, 0xf1, 0xfd, 0xe3, 0xeb, 0x07, 0xf1, 0xef, 0x03, 0xfe, 0xf8, 0x07, - 0x10, 0xf7, 0x00, 0xf9, 0xf2, 0x0e, 0xf9, 0xf2, 0x1d, 0xf5, 0xd8, 0xff, - 0xe6, 0x18, 0x2a, 0x1b, 0x03, 0x16, 0xfe, 0xf4, 0xf5, 0xfd, 0x04, 0x01, - 0xfe, 0xfe, 0x07, 0xfc, 0x0e, 0xfa, 0x15, 0xeb, 0x02, 0x15, 0xea, 0xfd, - 0x04, 0xe5, 0xfe, 0xed, 0xfe, 0x1a, 0x09, 0x2a, 0x1b, 0xdf, 0xfb, 0xf8, - 0xf1, 0x04, 0x1a, 0x34, 0x07, 0xf9, 0x0d, 0xf5, 0xef, 0xec, 0x10, 0x1a, - 0x0b, 0x0f, 0x13, 0xfe, 0x10, 0x22, 0x1e, 0x02, 0xe6, 0xf7, 0x11, 0xfa, - 0x11, 0xfc, 0x1b, 0x21, 0x12, 0xf4, 0x18, 0x16, 0x29, 0xe4, 0x0c, 0x2e, - 0x12, 0x07, 0x20, 0xf6, 0x1d, 0xf4, 0x12, 0x33, 0xf4, 0xee, 0xfe, 0x05, - 0x06, 0xfb, 0x13, 0x0c, 0x0e, 0xf0, 0x00, 0xf8, 0xee, 0xf3, 0x17, 0x00, - 0xf7, 0xfb, 0xfc, 0x0f, 0xf4, 0xd5, 0x0a, 0xed, 0xeb, 0xf5, 0xe9, 0xef, - 0xd8, 0xf0, 0xf8, 0xe2, 0x19, 0xf7, 0xf8, 0x0a, 0x0b, 0x09, 0xfa, 0xe7, - 0x0f, 0xfc, 0xe8, 0x02, 0x00, 0x1a, 0xfe, 0xfd, 0x1b, 0xe6, 0xef, 0x0f, - 0xe3, 0x10, 0xf1, 0xe2, 0x0b, 0x0e, 0x06, 0x29, 0x00, 0x01, 0xf3, 0x00, - 0x11, 0x04, 0xf2, 0xf7, 0xea, 0xf8, 0xe0, 0x09, 0x0e, 0x13, 0xf4, 0x00, - 0x09, 0xfa, 0xf5, 0x0c, 0xff, 0x18, 0x08, 0x0d, 0xfa, 0xde, 0xfa, 0x03, - 0xf2, 0xf3, 0x1b, 0xeb, 0x06, 0xea, 0xfb, 0xff, 0x0d, 0xf5, 0x10, 0x17, - 0xf8, 0xe8, 0xf1, 0xf1, 0xf5, 0x00, 0x03, 0x0a, 0x09, 0x0a, 0xf3, 0xfb, - 0x33, 0x26, 0xe7, 0x17, 0xe3, 0xfa, 0x1f, 0x24, 0xfc, 0x07, 0x02, 0xe2, - 0xeb, 0x08, 0x2c, 0xf8, 0x02, 0x1f, 0x04, 0xeb, 0x0b, 0x04, 0x17, 0xf7, - 0xff, 0x1c, 0xed, 0x00, 0x3f, 0xd5, 0x17, 0x1d, 0xfe, 0x03, 0xf1, 0x1c, - 0x17, 0xec, 0x0e, 0x54, 0xee, 0xf5, 0x25, 0xfa, 0x08, 0xee, 0x13, 0x32, - 0x0e, 0xd8, 0x09, 0x0f, 0xee, 0xe5, 0x06, 0x10, 0xf4, 0xfb, 0xe4, 0xfb, - 0x09, 0xde, 0x13, 0xff, 0x02, 0xf9, 0xec, 0x0a, 0x00, 0xe9, 0xfd, 0xdc, - 0x06, 0x04, 0xdb, 0x06, 0x01, 0xf8, 0x09, 0xe2, 0x0c, 0x14, 0xda, 0xfe, - 0x20, 0xe3, 0x09, 0xda, 0x14, 0x12, 0xe1, 0x05, 0xff, 0xf3, 0x00, 0x08, - 0xfb, 0xf1, 0xfd, 0xf3, 0x04, 0xfa, 0x08, 0xff, 0x01, 0x1d, 0x0b, 0xfd, - 0x0a, 0xf4, 0xfb, 0xfc, 0xf9, 0x19, 0xed, 0xfc, 0xf2, 0x06, 0xe7, 0x02, - 0xf6, 0x0c, 0xfc, 0xfb, 0x01, 0x0c, 0xeb, 0x1b, 0xff, 0xff, 0x08, 0x1d, - 0xf7, 0xe8, 0xfc, 0xf4, 0x0c, 0xfa, 0xf1, 0xee, 0xed, 0xdd, 0xfc, 0x06, - 0x05, 0xdc, 0x1a, 0xfc, 0xf9, 0x07, 0xdf, 0x1b, 0x14, 0x0c, 0xfc, 0x01, - 0x16, 0xe1, 0xed, 0x09, 0x34, 0xee, 0xe4, 0x1c, 0x1b, 0xfc, 0x3b, 0x03, - 0x15, 0xf2, 0xeb, 0x14, 0x00, 0xdd, 0x24, 0x04, 0xf1, 0xed, 0xfd, 0xe6, - 0x32, 0xf9, 0x24, 0x04, 0x0e, 0x22, 0x03, 0x14, 0x2f, 0xf5, 0x1a, 0x37, - 0xf4, 0x18, 0x03, 0x0f, 0x4b, 0xe6, 0x0d, 0x5c, 0xf7, 0x1f, 0x1c, 0xe6, - 0x23, 0x0c, 0x15, 0x4e, 0xe0, 0x05, 0x1c, 0xec, 0xff, 0x04, 0x13, 0x15, - 0xee, 0x07, 0xec, 0x0c, 0xdd, 0xf8, 0x0e, 0x03, 0x0c, 0x1f, 0xe8, 0x0e, - 0xf5, 0xec, 0xfc, 0xe2, 0xe8, 0xfb, 0xf6, 0x00, 0xe5, 0xea, 0xf3, 0xd3, - 0xf5, 0xfd, 0xd2, 0xfd, 0x1b, 0xed, 0x09, 0xd1, 0x23, 0xfa, 0xd4, 0xf7, - 0xe9, 0xf0, 0x0a, 0xd6, 0x14, 0x03, 0xe6, 0x10, 0xf4, 0x18, 0xfe, 0xe1, - 0x0b, 0x25, 0xf5, 0xfc, 0xe9, 0xf2, 0xe9, 0xf4, 0x0d, 0xf5, 0x00, 0xf9, - 0x17, 0x02, 0xfd, 0x03, 0x04, 0xf8, 0xf5, 0x14, 0xe3, 0xd3, 0xeb, 0xe7, - 0x09, 0xf3, 0x14, 0x17, 0xee, 0xe6, 0xf6, 0xff, 0x11, 0x26, 0xf4, 0xf7, - 0x02, 0xfa, 0x05, 0x08, 0x16, 0xff, 0x0d, 0xf7, 0xf1, 0xf7, 0xe6, 0xfb, - 0x04, 0x04, 0x07, 0x02, 0x04, 0x09, 0xf5, 0xfc, 0x5f, 0xd6, 0xe7, 0x2a, - 0x23, 0xf4, 0x1b, 0x06, 0x01, 0xea, 0xe7, 0x05, 0x25, 0xe3, 0x25, 0x07, - 0xea, 0xfb, 0xfb, 0x09, 0x25, 0xde, 0x37, 0x04, 0x07, 0xe5, 0xff, 0x14, - 0x2f, 0x0a, 0x30, 0x23, 0x04, 0xf0, 0x23, 0xfe, 0x1c, 0xd2, 0x2b, 0x55, - 0x01, 0xe5, 0x26, 0xfe, 0x14, 0xed, 0x24, 0x46, 0xe6, 0xee, 0x0f, 0xfd, - 0xed, 0xef, 0x0e, 0x1e, 0x05, 0x0a, 0x12, 0xff, 0xe4, 0xf5, 0x0c, 0xed, - 0xfd, 0xea, 0x0d, 0x13, 0x1a, 0xe5, 0xfc, 0xc2, 0xef, 0x0a, 0xe2, 0x0f, - 0xfe, 0xff, 0x0c, 0xf0, 0xff, 0xdf, 0xea, 0x00, 0xf6, 0xe1, 0x04, 0xd8, - 0x26, 0x20, 0xdc, 0xf4, 0x19, 0x06, 0xe8, 0xd2, 0x10, 0x04, 0xf1, 0x02, - 0x0c, 0x06, 0xf0, 0xf0, 0x04, 0x1f, 0xf4, 0xf5, 0xed, 0xf1, 0xfa, 0xf1, - 0x04, 0x02, 0xf8, 0xfb, 0x04, 0xf1, 0xe5, 0xe4, 0x0a, 0xf0, 0xfe, 0xef, - 0x1c, 0xe3, 0xeb, 0xf3, 0x00, 0x17, 0x01, 0x13, 0x19, 0xda, 0xf8, 0x06, - 0xde, 0x11, 0xea, 0xf7, 0xf4, 0xef, 0x03, 0x04, 0x0b, 0xe8, 0x08, 0x0e, - 0xe2, 0xee, 0xde, 0x06, 0x0e, 0x29, 0xfb, 0xfa, 0x00, 0x02, 0xec, 0x1b, - 0x52, 0xff, 0xde, 0x3a, 0x2f, 0x13, 0x30, 0xe9, 0xff, 0xf6, 0xe7, 0x15, - 0x1d, 0xd9, 0x3c, 0x0f, 0xe6, 0x14, 0xee, 0x13, 0x1f, 0xe7, 0x33, 0x08, - 0xfc, 0x06, 0x0c, 0x08, 0x19, 0xd9, 0x2b, 0x1f, 0x07, 0x10, 0x24, 0x16, - 0x29, 0xfc, 0x31, 0x4d, 0xf0, 0xd9, 0x3f, 0xf2, 0x20, 0xe2, 0x25, 0x49, - 0xe5, 0xec, 0x0a, 0xf5, 0xf2, 0xd9, 0x22, 0x1f, 0xed, 0x22, 0x02, 0x0a, - 0x16, 0x08, 0xf7, 0xfb, 0x0e, 0xfb, 0xfb, 0x1d, 0xf3, 0x1c, 0xf6, 0xe1, - 0xcf, 0x19, 0xf4, 0x0f, 0xee, 0xf9, 0x04, 0xd1, 0xf9, 0xe2, 0xda, 0xf1, - 0x24, 0xf5, 0x07, 0xdf, 0x1d, 0xf9, 0xdb, 0x18, 0x0b, 0xea, 0x08, 0xca, - 0xf2, 0xfa, 0xec, 0x04, 0x0e, 0x17, 0xed, 0xf1, 0x06, 0x15, 0xfc, 0xfd, - 0x08, 0xfa, 0xe3, 0xe4, 0x0a, 0xfc, 0xee, 0x08, 0xf5, 0x09, 0xef, 0xee, - 0x06, 0xef, 0xe1, 0x19, 0x07, 0xe8, 0xe6, 0xdf, 0xea, 0x0d, 0xf1, 0x16, - 0xee, 0xed, 0xf8, 0x09, 0xfa, 0xfb, 0x0c, 0xf8, 0xeb, 0xda, 0x00, 0xfc, - 0x04, 0xfe, 0xf5, 0xff, 0xf6, 0xe1, 0x0c, 0x0a, 0x13, 0x0d, 0xf6, 0xf5, - 0x15, 0x07, 0xca, 0xec, 0x50, 0x0e, 0xd0, 0x26, 0x4c, 0xf8, 0x23, 0xeb, - 0xff, 0x08, 0xe3, 0x11, 0x2c, 0xf9, 0x2a, 0xf1, 0xe9, 0x0b, 0xe9, 0x0f, - 0x15, 0xec, 0x33, 0x11, 0x0c, 0x0d, 0x01, 0x01, 0x32, 0xe3, 0x41, 0x27, - 0x11, 0x02, 0x2e, 0x07, 0x09, 0xe3, 0x22, 0x4d, 0xf1, 0x05, 0x27, 0x03, - 0x25, 0xf5, 0x2c, 0x3b, 0xf4, 0x00, 0x16, 0x0b, 0xec, 0xfe, 0x17, 0x0d, - 0xff, 0xe7, 0xfe, 0x24, 0x06, 0xee, 0xf0, 0xe9, 0xfa, 0x1c, 0xf2, 0x19, - 0x08, 0xfa, 0xff, 0xd2, 0x01, 0x02, 0xea, 0x05, 0xf2, 0xf4, 0x0b, 0xd2, - 0xf9, 0x0d, 0xcd, 0x0d, 0x12, 0xf2, 0x0e, 0xe1, 0x1f, 0x00, 0xe7, 0x14, - 0x04, 0xff, 0x09, 0xdb, 0xfc, 0xd9, 0x06, 0xf9, 0xeb, 0x01, 0xef, 0xfa, - 0xfb, 0xf5, 0xfc, 0xfb, 0x14, 0xe2, 0xf9, 0xf5, 0x02, 0xfd, 0xfc, 0x01, - 0xf7, 0xf3, 0x00, 0xec, 0xe7, 0xf2, 0x00, 0xf1, 0x11, 0xec, 0xf0, 0xe9, - 0x11, 0x0a, 0x07, 0x04, 0x01, 0xee, 0xfb, 0xf2, 0x14, 0x01, 0x12, 0xf0, - 0xf2, 0xf1, 0xf0, 0xfb, 0x08, 0x03, 0xf8, 0x01, 0xe8, 0xf9, 0x17, 0x26, - 0x0f, 0xea, 0xf7, 0xf8, 0x1e, 0xfe, 0xf2, 0xf8, 0x3f, 0x00, 0xd4, 0x1c, - 0x53, 0xfe, 0x1e, 0x0f, 0xef, 0xdd, 0xed, 0x10, 0x19, 0xe7, 0x34, 0x0e, - 0xde, 0xdf, 0xfa, 0x0e, 0x29, 0xe3, 0x16, 0x09, 0x06, 0x12, 0xeb, 0xf9, - 0x32, 0xe0, 0x1a, 0x1d, 0xf3, 0xed, 0x10, 0x07, 0x31, 0xf2, 0x12, 0x52, - 0xeb, 0xf7, 0x1e, 0xf7, 0x1a, 0xdc, 0x3e, 0x33, 0xe3, 0xfb, 0x1f, 0x0b, - 0x08, 0xfe, 0x13, 0x1a, 0xf4, 0xf8, 0xfe, 0x08, 0xfc, 0xe9, 0xfe, 0xeb, - 0xe6, 0xf6, 0x02, 0x18, 0x02, 0xe8, 0xfb, 0xf3, 0x01, 0x08, 0xd7, 0x13, - 0x04, 0xe6, 0x02, 0xe6, 0xd7, 0x01, 0xd4, 0xf0, 0x0e, 0x05, 0x18, 0xe5, - 0x08, 0xe5, 0xd2, 0x16, 0x12, 0xfe, 0x0e, 0xd3, 0xfc, 0x1f, 0xe9, 0xf8, - 0x11, 0x06, 0xf3, 0xd5, 0xf8, 0xff, 0xf0, 0x04, 0x0a, 0xd9, 0xf8, 0xfd, - 0xf5, 0x12, 0xff, 0x06, 0x1b, 0xe6, 0xfe, 0xfe, 0xde, 0xee, 0xf6, 0x18, - 0xf1, 0xf8, 0x06, 0xf3, 0x02, 0xea, 0x04, 0x14, 0xfc, 0xee, 0xe6, 0x09, - 0xf9, 0xee, 0xe3, 0xe7, 0xfc, 0xd9, 0xef, 0xfc, 0x0a, 0x0c, 0x03, 0xf6, - 0xe2, 0x11, 0x0f, 0x19, 0x18, 0x10, 0xef, 0xe5, 0x22, 0xf5, 0xe5, 0xe9, - 0x4b, 0xf7, 0xdb, 0x0c, 0x4f, 0xde, 0x22, 0x16, 0x09, 0x16, 0xd1, 0xf8, - 0x19, 0xe0, 0x24, 0xfe, 0xb8, 0xfb, 0xe5, 0x12, 0x1c, 0xe3, 0x22, 0x09, - 0x05, 0x29, 0xf7, 0x10, 0x31, 0xe1, 0x33, 0x3f, 0xfd, 0xed, 0x04, 0x03, - 0x2e, 0xed, 0x30, 0x36, 0xee, 0x16, 0x2f, 0xf5, 0x1b, 0xdc, 0x3a, 0x56, - 0xe5, 0xef, 0x26, 0xff, 0x03, 0xd7, 0x31, 0x16, 0xef, 0xf1, 0x08, 0x13, - 0x01, 0x02, 0x03, 0xf1, 0xf2, 0x08, 0xff, 0x05, 0x12, 0xf2, 0xee, 0xda, - 0xed, 0xec, 0xea, 0xf7, 0x0c, 0xf1, 0x09, 0xe6, 0xe6, 0x00, 0xcc, 0x10, - 0x0d, 0x0d, 0x20, 0xf4, 0x18, 0x23, 0xec, 0xf9, 0x00, 0xe4, 0x07, 0xd4, - 0xfb, 0x16, 0xd2, 0x01, 0xe6, 0x01, 0x06, 0xf0, 0xfe, 0x03, 0xf3, 0x09, - 0x01, 0x0d, 0x05, 0xf7, 0xd4, 0x02, 0xfb, 0xfb, 0x08, 0xf0, 0x1f, 0xf3, - 0xfe, 0xeb, 0x02, 0x0e, 0x1b, 0x0f, 0x04, 0xf5, 0xf0, 0x1f, 0x14, 0xf7, - 0x06, 0xdc, 0xf9, 0xe9, 0x01, 0xff, 0x08, 0xf2, 0x06, 0xff, 0xff, 0xf3, - 0x05, 0x1a, 0xfc, 0xfa, 0xeb, 0xfb, 0xfa, 0x12, 0x20, 0xf6, 0xe0, 0xe8, - 0x1c, 0xfa, 0xd6, 0x0d, 0x2c, 0x04, 0xe1, 0x09, 0x3b, 0xd3, 0x2a, 0xee, - 0xf7, 0xed, 0xf1, 0xf7, 0x0d, 0xf0, 0x32, 0x0f, 0xc9, 0x0e, 0x00, 0x10, - 0x24, 0xfb, 0x31, 0xf0, 0xf4, 0xdd, 0xf5, 0x04, 0x25, 0xc7, 0x27, 0x25, - 0x16, 0x11, 0x2e, 0x09, 0x30, 0xd1, 0x2c, 0x34, 0xe6, 0xf0, 0x21, 0xf5, - 0x21, 0xc8, 0x40, 0x39, 0xde, 0xf0, 0x12, 0xf3, 0x10, 0xe8, 0x1f, 0x18, - 0xfa, 0xea, 0x07, 0x11, 0xdf, 0xed, 0xfa, 0xf0, 0x07, 0xef, 0xf3, 0x05, - 0x10, 0xe5, 0xf3, 0xe9, 0xe9, 0xe8, 0xd6, 0x01, 0xf9, 0x05, 0x0b, 0xee, - 0xf9, 0x12, 0xe3, 0x05, 0xfd, 0xe6, 0x16, 0xe2, 0x1b, 0x12, 0xc5, 0x00, - 0xfd, 0x02, 0x04, 0xd2, 0xff, 0xec, 0xf6, 0xfd, 0x00, 0xe4, 0xf7, 0xf3, - 0xeb, 0xfa, 0xf8, 0x0d, 0x03, 0xfa, 0xfe, 0xe4, 0xdb, 0xe3, 0x06, 0xff, - 0xf4, 0xf2, 0x1b, 0xf1, 0xf7, 0x02, 0x01, 0x04, 0x13, 0xe5, 0x0c, 0x05, - 0xf7, 0x0a, 0x03, 0x03, 0x0b, 0x03, 0xee, 0xf7, 0x21, 0x20, 0xff, 0xf3, - 0x09, 0xe5, 0xff, 0xec, 0x17, 0x00, 0x06, 0x14, 0xeb, 0xf2, 0x18, 0x16, - 0x1f, 0xec, 0xee, 0xe1, 0x1e, 0x03, 0xfa, 0xfe, 0x28, 0x03, 0xc9, 0x0c, - 0x3f, 0xd8, 0x30, 0x16, 0x03, 0xf8, 0xe9, 0xfb, 0x28, 0xe1, 0x36, 0x0a, - 0xdf, 0xe5, 0xeb, 0x08, 0x1c, 0xcd, 0x29, 0xf2, 0xfc, 0x0a, 0xed, 0x01, - 0x29, 0xf1, 0x20, 0x13, 0x04, 0xec, 0x17, 0x0a, 0x35, 0xc3, 0x1a, 0x46, - 0xe0, 0xd7, 0x3c, 0x09, 0x28, 0xd1, 0x22, 0x20, 0xd5, 0xfa, 0x28, 0xfa, - 0xff, 0xea, 0x1d, 0x23, 0xe0, 0x07, 0x07, 0x0f, 0xf1, 0xf1, 0x08, 0xf0, - 0xf8, 0xff, 0x05, 0x1b, 0x05, 0xfa, 0xf0, 0xfb, 0xe3, 0xe4, 0xcc, 0x1a, - 0xf9, 0x09, 0x06, 0xee, 0xf4, 0x03, 0xd0, 0x14, 0xf4, 0xff, 0x1d, 0xe8, - 0x11, 0xf4, 0xd1, 0xf4, 0x04, 0x0b, 0xfb, 0xdc, 0x0a, 0x0c, 0xeb, 0xed, - 0x06, 0xf3, 0x04, 0xdd, 0xdf, 0xf9, 0xea, 0xfc, 0xf5, 0xf2, 0xfb, 0xea, - 0xe3, 0x03, 0xee, 0x0e, 0xff, 0xdb, 0x1e, 0x04, 0xf7, 0x1a, 0x04, 0x0c, - 0x0d, 0xda, 0x04, 0xe9, 0xff, 0x04, 0x00, 0x0c, 0xf9, 0xe4, 0xfb, 0xf6, - 0x14, 0xde, 0x1b, 0x00, 0x0b, 0xfe, 0x06, 0xf8, 0x0f, 0xdc, 0x01, 0xef, - 0xef, 0x0d, 0xf8, 0xf1, 0x0f, 0xf9, 0xf9, 0xdf, 0x0d, 0xe4, 0xd9, 0xf9, - 0x2b, 0xee, 0xe8, 0x09, 0x40, 0xf9, 0x2f, 0x0a, 0xfa, 0xe8, 0xe9, 0x01, - 0x0e, 0xe7, 0x23, 0x0a, 0xd0, 0x19, 0xd3, 0x0e, 0x04, 0xda, 0x2b, 0x0f, - 0xe7, 0xe6, 0xf3, 0xfb, 0x2c, 0xd3, 0x36, 0x19, 0x0e, 0xfe, 0x03, 0x1a, - 0x2e, 0xd0, 0x23, 0x32, 0xf1, 0xe1, 0x2a, 0x09, 0x1b, 0xf6, 0x29, 0x3e, - 0xce, 0x15, 0x0a, 0xe8, 0xec, 0xdf, 0x44, 0x28, 0xd9, 0xfd, 0xfa, 0x09, - 0xff, 0xe7, 0x08, 0xec, 0xf4, 0xef, 0x01, 0x19, 0x11, 0xf3, 0xeb, 0xeb, - 0xed, 0x1a, 0xdd, 0x15, 0x0f, 0x07, 0xfe, 0xeb, 0xff, 0xd6, 0xd5, 0x04, - 0xf5, 0x07, 0x10, 0xe6, 0x0c, 0xe4, 0xda, 0x0c, 0x08, 0xee, 0x06, 0xd8, - 0xf8, 0xf1, 0xe0, 0x01, 0x08, 0xfe, 0xf9, 0xf3, 0xdf, 0x03, 0xe6, 0xf4, - 0x0a, 0xff, 0xf2, 0xe0, 0xd9, 0xeb, 0x01, 0x10, 0x02, 0xfc, 0x0d, 0x14, - 0xea, 0xf8, 0x03, 0x18, 0xf3, 0x09, 0xfc, 0x0c, 0x0b, 0x1f, 0xf5, 0x05, - 0xf7, 0xf9, 0x00, 0xfd, 0x04, 0xfc, 0x16, 0x07, 0x00, 0xdf, 0xf9, 0xfa, - 0x0c, 0xfb, 0xf4, 0xf7, 0xf0, 0xeb, 0x07, 0x17, 0x20, 0xfb, 0xf0, 0xec, - 0x04, 0x00, 0xf8, 0xf2, 0x2d, 0xf9, 0xd9, 0x0b, 0x55, 0xec, 0x33, 0x26, - 0xf8, 0x0a, 0xf2, 0x0b, 0x25, 0xdf, 0x29, 0x05, 0xd1, 0x14, 0xe2, 0xf2, - 0x12, 0xdd, 0x28, 0xfc, 0xec, 0x08, 0xfd, 0x02, 0x3a, 0xe6, 0x29, 0x25, - 0x0d, 0x10, 0x09, 0x0a, 0x32, 0xf5, 0x17, 0x2d, 0xea, 0xfb, 0x35, 0xfc, - 0x28, 0xd0, 0x29, 0x2f, 0xcb, 0x06, 0x0f, 0x04, 0xf2, 0xf3, 0x34, 0x1c, - 0xf4, 0x08, 0x05, 0xfc, 0xfd, 0xed, 0x0f, 0xf8, 0xe9, 0xf0, 0x09, 0x16, - 0xfe, 0x02, 0xff, 0xd4, 0xea, 0x0a, 0xeb, 0x0c, 0xf8, 0xf4, 0x09, 0xf4, - 0xf2, 0x07, 0xd9, 0x0b, 0xfd, 0xe4, 0x1a, 0xef, 0x14, 0x08, 0xd8, 0xfc, - 0xf5, 0xe1, 0x03, 0xcf, 0xf1, 0x11, 0xdb, 0x15, 0x07, 0x10, 0xf8, 0xfc, - 0xe2, 0xf1, 0xf5, 0xde, 0xff, 0xe7, 0x01, 0xea, 0xee, 0xe9, 0x02, 0x0a, - 0x18, 0xec, 0xfe, 0xf9, 0x09, 0xf3, 0x0e, 0x02, 0xf1, 0xfc, 0xf9, 0x16, - 0x05, 0x07, 0x09, 0x0d, 0x0e, 0xf7, 0x04, 0xed, 0x04, 0xdb, 0x04, 0x04, - 0xf6, 0xdc, 0xee, 0xec, 0xf5, 0xfe, 0xf4, 0x02, 0xe4, 0x0b, 0xe0, 0x17, - 0x0a, 0xe0, 0xf7, 0xdc, 0x11, 0xd6, 0xfe, 0xfa, 0x35, 0xde, 0xe6, 0x06, - 0x44, 0xf9, 0x35, 0x0a, 0xfb, 0xff, 0xec, 0xfb, 0x16, 0xd9, 0x23, 0x0f, - 0xd4, 0xef, 0xdf, 0x06, 0x0b, 0xd9, 0x25, 0xff, 0xf8, 0xeb, 0xf4, 0x0a, - 0x20, 0xe5, 0x22, 0x1c, 0xeb, 0xf4, 0x0d, 0x0c, 0x19, 0xe1, 0x1e, 0x31, - 0xe9, 0xfb, 0x20, 0xf0, 0x23, 0xfe, 0x35, 0x28, 0xb4, 0x06, 0x28, 0xe7, - 0xfb, 0xe9, 0x2a, 0x1a, 0xef, 0x15, 0x0c, 0xed, 0xf1, 0x04, 0x0e, 0x0a, - 0xff, 0x16, 0x01, 0x04, 0x17, 0xea, 0xec, 0xdc, 0xf4, 0xf7, 0x04, 0x16, - 0x1f, 0x0a, 0x11, 0xef, 0x12, 0xdf, 0xd9, 0x0c, 0xf5, 0x10, 0x02, 0xf3, - 0x10, 0x03, 0xd3, 0xf5, 0x0b, 0x02, 0x00, 0xcb, 0xf6, 0x23, 0xf6, 0xf1, - 0x1f, 0xf9, 0xfc, 0xf0, 0xf6, 0xfe, 0xfa, 0xf8, 0xf9, 0xf4, 0xfb, 0x0a, - 0xd6, 0x29, 0x09, 0x02, 0x00, 0xfc, 0xfc, 0xee, 0xf5, 0x05, 0xfb, 0x1e, - 0xf1, 0xf1, 0xf3, 0x02, 0xec, 0x1c, 0x0c, 0x0e, 0x0b, 0x04, 0xf6, 0xe7, - 0x14, 0x08, 0x27, 0x01, 0xfe, 0xe5, 0xe7, 0x01, 0x1b, 0xf0, 0xf6, 0xff, - 0xf4, 0xe7, 0xee, 0x18, 0x0d, 0x08, 0xf8, 0xd6, 0x07, 0xf4, 0x08, 0xff, - 0x1d, 0x13, 0xe7, 0x0b, 0x42, 0xef, 0x28, 0x00, 0xf9, 0xf0, 0xf3, 0x00, - 0x15, 0xfd, 0x1a, 0x22, 0xc1, 0xf5, 0xe0, 0xf8, 0x09, 0xe6, 0x0e, 0x05, - 0xf9, 0xf6, 0x01, 0x01, 0x13, 0xdc, 0x1f, 0x0d, 0xfb, 0x04, 0x08, 0x0b, - 0x15, 0xdb, 0x28, 0x34, 0xed, 0x0b, 0x3a, 0xed, 0x16, 0xe3, 0x39, 0x32, - 0xc4, 0x0b, 0x20, 0xe7, 0xf7, 0x02, 0x35, 0x24, 0xfc, 0xe8, 0x1c, 0xf8, - 0xf1, 0xfa, 0x0c, 0x1d, 0xf2, 0x05, 0xff, 0x12, 0x0f, 0x01, 0xec, 0xea, - 0xf0, 0x03, 0xe7, 0x15, 0xfd, 0x05, 0x08, 0xe0, 0x1b, 0xf8, 0xe1, 0x1e, - 0xed, 0xdc, 0x11, 0xeb, 0xfd, 0x1a, 0xeb, 0x09, 0xf9, 0xf3, 0x00, 0xe8, - 0xe6, 0x08, 0xf7, 0xde, 0x1e, 0x00, 0x00, 0x00, 0xe4, 0x09, 0xf2, 0xf8, - 0xe7, 0xf2, 0x0d, 0xfa, 0xe2, 0x0f, 0x04, 0x08, 0xf2, 0x13, 0xf8, 0xf9, - 0xf1, 0xff, 0x03, 0x11, 0x12, 0xe9, 0xf4, 0x13, 0x07, 0x0c, 0x13, 0x2b, - 0xf7, 0xdd, 0xf9, 0xe9, 0xfa, 0xdb, 0x1d, 0xf6, 0xf6, 0xf9, 0xe4, 0xf6, - 0x0d, 0xeb, 0x0d, 0x08, 0xe7, 0xe7, 0xf2, 0x03, 0x1d, 0xd9, 0xd8, 0xe4, - 0xf7, 0xea, 0xdc, 0xdc, 0x26, 0x02, 0xee, 0xfa, 0x38, 0xfc, 0x1a, 0xef, - 0xda, 0xf1, 0xdf, 0x0b, 0x1a, 0xe0, 0x16, 0x16, 0xdc, 0x04, 0xfa, 0xf7, - 0xee, 0x02, 0x25, 0x02, 0xf5, 0xfb, 0x08, 0xf6, 0x11, 0xf5, 0x12, 0x08, - 0xf4, 0xe3, 0x1b, 0xf5, 0x3a, 0xdc, 0x20, 0x2e, 0xe0, 0xf5, 0x30, 0xe4, - 0x09, 0xf8, 0x3c, 0x45, 0xd3, 0x08, 0x23, 0xd8, 0x09, 0xe4, 0x35, 0x30, - 0xe4, 0xfe, 0x07, 0xf6, 0x05, 0x01, 0x05, 0xff, 0xf6, 0x0d, 0x02, 0xfd, - 0x03, 0x05, 0x0d, 0x00, 0xf5, 0xd6, 0xcf, 0x19, 0x06, 0xee, 0x0d, 0xf2, - 0x01, 0x18, 0xef, 0x12, 0x04, 0x02, 0x21, 0xd9, 0x02, 0x0d, 0xeb, 0xe9, - 0x13, 0x08, 0x15, 0xf0, 0xee, 0x03, 0xec, 0x06, 0x17, 0xed, 0x00, 0x1a, - 0xee, 0xf2, 0xfc, 0x09, 0xec, 0xf8, 0xf8, 0x18, 0xf4, 0x13, 0x04, 0xf6, - 0x02, 0xf0, 0xfc, 0xfe, 0xe3, 0x01, 0x0a, 0x1c, 0x1b, 0xec, 0x0e, 0x01, - 0xfb, 0x08, 0x11, 0xf5, 0x00, 0x14, 0xe6, 0x12, 0x07, 0xf4, 0x15, 0x07, - 0xfc, 0xfb, 0xf5, 0xf1, 0x01, 0x21, 0x01, 0xe9, 0xe8, 0xef, 0xdb, 0xdf, - 0x1f, 0x0a, 0xdd, 0xd1, 0x16, 0x04, 0xfd, 0xe1, 0x24, 0xf0, 0xec, 0xf4, - 0x38, 0xe1, 0x16, 0xfd, 0xe0, 0xec, 0xe7, 0x0c, 0x2a, 0x04, 0x0c, 0x17, - 0xdc, 0xe8, 0xf2, 0x03, 0xec, 0xfd, 0x19, 0xfe, 0xf3, 0xf0, 0xf3, 0xfb, - 0x18, 0xdf, 0x1c, 0x00, 0x09, 0xf4, 0x18, 0x0b, 0x1f, 0xf6, 0x34, 0x22, - 0xf4, 0x22, 0x45, 0xeb, 0x23, 0xcf, 0x32, 0x34, 0xf2, 0xf9, 0x29, 0xd4, - 0xf7, 0x0b, 0x38, 0x2a, 0x09, 0xe6, 0x05, 0x01, 0x0b, 0xfe, 0x17, 0xfb, - 0x00, 0xeb, 0x08, 0xfd, 0x0c, 0x02, 0x1d, 0xea, 0xfa, 0x0b, 0xeb, 0x09, - 0xfe, 0xfe, 0x10, 0xe0, 0xf6, 0x06, 0xf0, 0x15, 0xf3, 0x09, 0x11, 0xe4, - 0xf9, 0x07, 0xe1, 0xed, 0x17, 0x05, 0x0c, 0xe1, 0xdb, 0xf2, 0xf8, 0xea, - 0x22, 0xe9, 0x02, 0x00, 0xfd, 0xe7, 0xf2, 0xf8, 0xf9, 0xfc, 0xfa, 0xe8, - 0xe8, 0xeb, 0xe9, 0x0d, 0x04, 0xf8, 0xf8, 0xf7, 0xf8, 0x0d, 0x03, 0x0c, - 0x13, 0xf2, 0x0f, 0xf9, 0xe6, 0xfd, 0x0f, 0x19, 0x08, 0xf7, 0xfa, 0x01, - 0xf3, 0x12, 0x1e, 0x05, 0x0a, 0x09, 0xfd, 0x0b, 0x07, 0x08, 0x02, 0xfc, - 0xd6, 0xe8, 0x14, 0x01, 0x13, 0x19, 0xef, 0xda, 0x0e, 0x0a, 0x07, 0xef, - 0x34, 0xe0, 0x05, 0x1e, 0x4e, 0xe9, 0x19, 0xff, 0xe1, 0x04, 0xfb, 0x0e, - 0x11, 0x05, 0x1f, 0x15, 0xd4, 0xec, 0xf9, 0xe7, 0xf9, 0xfc, 0x25, 0xff, - 0x06, 0xf2, 0x01, 0xf6, 0x2a, 0x17, 0x24, 0x11, 0xf3, 0x1a, 0x1f, 0xfb, - 0x32, 0xeb, 0x33, 0x2f, 0x00, 0x08, 0x2c, 0xf0, 0x26, 0xf4, 0x25, 0x36, - 0xd9, 0xf1, 0x1a, 0xd5, 0xec, 0xf9, 0x32, 0x27, 0xfc, 0xf4, 0xf0, 0xe3, - 0xfa, 0x0c, 0x16, 0x17, 0xfa, 0xf9, 0xe5, 0x1f, 0x1f, 0xfa, 0xff, 0xfd, - 0x0d, 0x02, 0xe9, 0x0e, 0xf0, 0x12, 0x09, 0xda, 0x02, 0xea, 0xe5, 0x0a, - 0xff, 0x03, 0x13, 0xf0, 0x0a, 0xf9, 0xe9, 0xff, 0x10, 0xfc, 0x1a, 0xf3, - 0xf7, 0x0f, 0xf4, 0xfa, 0xf4, 0x05, 0x10, 0x0a, 0xdd, 0x09, 0xf7, 0xf0, - 0xe5, 0x07, 0x07, 0xfa, 0x02, 0xd7, 0xf8, 0xf7, 0x01, 0xfb, 0x0e, 0xf8, - 0x07, 0x0f, 0xfe, 0x03, 0x12, 0x05, 0x09, 0x13, 0xf8, 0xdc, 0xfd, 0x27, - 0x0f, 0xec, 0xf7, 0x07, 0x00, 0xfc, 0x12, 0xf8, 0xfb, 0xea, 0xe4, 0xe9, - 0xe9, 0xe0, 0xff, 0xdc, 0xd6, 0xeb, 0xf2, 0xf7, 0x0d, 0x1b, 0xe9, 0xc4, - 0x06, 0x00, 0xfd, 0x04, 0x46, 0xf9, 0xe9, 0x13, 0x2d, 0x0c, 0x1f, 0xf8, - 0xd3, 0x0c, 0x14, 0x11, 0x05, 0xe5, 0x27, 0x08, 0xc5, 0xef, 0xdf, 0xdd, - 0x04, 0xf8, 0x11, 0x10, 0xf0, 0xe7, 0xfb, 0x03, 0x3c, 0xe7, 0x14, 0x0c, - 0xf4, 0xf6, 0x1b, 0x0a, 0x23, 0xf2, 0x2d, 0x1a, 0x08, 0xff, 0x32, 0xe7, - 0x1a, 0x05, 0x2b, 0x34, 0xf1, 0x0a, 0x00, 0xe8, 0x02, 0xdf, 0x2c, 0x2a, - 0x03, 0xe6, 0xfc, 0xef, 0xfc, 0xe4, 0x03, 0x01, 0x03, 0xee, 0xe9, 0x15, - 0x05, 0x03, 0x13, 0x11, 0x0e, 0xee, 0xf5, 0x22, 0x1b, 0x0e, 0xfd, 0xf3, - 0x0a, 0x02, 0xdd, 0x20, 0xeb, 0x06, 0xf8, 0xe2, 0x06, 0x0e, 0xde, 0x0d, - 0xf9, 0x16, 0x1c, 0x0c, 0xe0, 0xf0, 0xec, 0x0c, 0x0f, 0xf2, 0x27, 0x1d, - 0xde, 0xe6, 0xf0, 0xf9, 0xf0, 0x02, 0x0a, 0x07, 0x06, 0xf9, 0x0f, 0xfa, - 0xf0, 0xee, 0xf1, 0xf7, 0xff, 0x02, 0x0b, 0x0d, 0x1b, 0xee, 0xf6, 0x05, - 0xff, 0x1c, 0x17, 0x04, 0x05, 0x17, 0x00, 0xff, 0x0d, 0xf3, 0x23, 0x10, - 0xfd, 0x05, 0xfb, 0xea, 0x03, 0x10, 0x07, 0xd7, 0xf7, 0xff, 0xf3, 0xf1, - 0x17, 0xed, 0xd3, 0xcb, 0x14, 0x1c, 0xf5, 0x03, 0x47, 0xf6, 0xf7, 0xf2, - 0x3e, 0xf2, 0x22, 0xf4, 0xed, 0xfc, 0xee, 0x0b, 0xf4, 0xf1, 0x25, 0x10, - 0xd0, 0xf6, 0x00, 0xef, 0x10, 0xfc, 0x15, 0xe5, 0xdb, 0xf3, 0xea, 0x10, - 0x22, 0xf2, 0x2b, 0x11, 0xf9, 0x0a, 0xfc, 0xf5, 0x53, 0x16, 0x25, 0x43, - 0xe0, 0x0e, 0x13, 0xfc, 0x2d, 0xe2, 0x55, 0x65, 0xf4, 0x08, 0x01, 0xdf, - 0x0a, 0x00, 0x49, 0x1c, 0xfe, 0xdf, 0xef, 0xf2, 0xf9, 0xf6, 0xfd, 0xff, - 0xf3, 0x02, 0xf6, 0x14, 0x0b, 0xe8, 0x09, 0xfc, 0xfc, 0xe2, 0xe5, 0x11, - 0x03, 0x09, 0xfb, 0x06, 0x10, 0x1a, 0xf3, 0x0d, 0xfa, 0x0a, 0xd5, 0xf5, - 0x1a, 0x11, 0xf2, 0xfc, 0x1f, 0xfe, 0x0e, 0xe4, 0xef, 0xd7, 0xee, 0x06, - 0x1e, 0x04, 0x12, 0x28, 0xf7, 0x0e, 0x06, 0xf8, 0xee, 0xf0, 0x1a, 0x01, - 0xf7, 0xfd, 0x03, 0x11, 0x19, 0x10, 0x04, 0xfb, 0xd7, 0xfa, 0x16, 0x06, - 0x07, 0x23, 0xfa, 0x14, 0x11, 0xf1, 0x12, 0x10, 0x04, 0xe1, 0xee, 0xf7, - 0x21, 0x0e, 0x0a, 0x0a, 0xf8, 0x07, 0x0a, 0xee, 0x03, 0x1f, 0xfa, 0xc4, - 0xec, 0x12, 0x01, 0x1e, 0xfd, 0xf1, 0xe8, 0xcc, 0xf4, 0x17, 0xff, 0xdd, - 0x45, 0x10, 0xee, 0xfa, 0x3d, 0xe7, 0x27, 0xdd, 0xd7, 0xf9, 0xf4, 0xf6, - 0x06, 0xf8, 0x1e, 0x13, 0xe7, 0xe2, 0xf1, 0xe3, 0xf3, 0xf7, 0x18, 0x12, - 0xe4, 0x0a, 0xdb, 0xff, 0xff, 0xfe, 0x20, 0x09, 0x00, 0xf7, 0x23, 0xf6, - 0x2d, 0x14, 0x26, 0x28, 0xe5, 0xff, 0x0f, 0xe3, 0x1d, 0xe8, 0x56, 0x43, - 0xe7, 0xfb, 0xf9, 0xe6, 0xe9, 0xe2, 0x19, 0x19, 0x08, 0xfa, 0xf3, 0xe5, - 0x23, 0x07, 0x0f, 0xf8, 0xf8, 0xf3, 0xfc, 0x11, 0x2a, 0x05, 0xf4, 0xf1, - 0xfa, 0xfb, 0xf1, 0x1e, 0x13, 0x0f, 0xf9, 0xf5, 0xfa, 0x09, 0xf9, 0x03, - 0xf0, 0xf0, 0xe7, 0xec, 0xf1, 0x0c, 0xe6, 0xee, 0xf6, 0x20, 0x0f, 0xe9, - 0x00, 0xf4, 0xfe, 0xf0, 0x13, 0x0a, 0x17, 0x13, 0xee, 0x13, 0xfb, 0xff, - 0xf8, 0xfd, 0xf4, 0xe2, 0xe8, 0x06, 0xfc, 0x14, 0x03, 0x17, 0x00, 0x03, - 0xe6, 0xfd, 0xf2, 0x12, 0x12, 0x20, 0xeb, 0x10, 0x02, 0xf7, 0x13, 0x0d, - 0x11, 0xfd, 0xde, 0xf5, 0x07, 0xf3, 0x04, 0xff, 0x06, 0x05, 0xfb, 0xea, - 0xf0, 0x0a, 0x00, 0xb5, 0xe8, 0x1a, 0x03, 0xfe, 0x0d, 0x1a, 0xe7, 0xc0, - 0xd6, 0xdc, 0xf6, 0xf8, 0x39, 0xf5, 0xd5, 0xf8, 0x22, 0xfa, 0x22, 0x05, - 0xd0, 0xf4, 0x2d, 0xfc, 0x00, 0x0a, 0x1b, 0xfc, 0xe6, 0x09, 0x14, 0xfa, - 0x00, 0x1d, 0x1a, 0xfd, 0xf3, 0x18, 0xfc, 0xeb, 0x15, 0xf5, 0x0e, 0x0a, - 0xf3, 0xf1, 0x1b, 0x05, 0x14, 0x03, 0x2d, 0x27, 0xfb, 0x18, 0x22, 0xef, - 0xf6, 0x06, 0x28, 0x2b, 0xde, 0xec, 0xef, 0xe8, 0xd3, 0xfe, 0x17, 0x12, - 0x01, 0x13, 0x05, 0xf7, 0x00, 0xde, 0xf3, 0xe5, 0x03, 0xfb, 0x07, 0x0b, - 0xfd, 0xdc, 0xdf, 0x03, 0x0c, 0x00, 0xfa, 0x06, 0x0e, 0x02, 0x05, 0xfa, - 0xfd, 0xed, 0x09, 0x0c, 0xfd, 0xfb, 0x0c, 0xf0, 0xe4, 0x04, 0xd6, 0xf3, - 0x09, 0x0a, 0xf9, 0xf8, 0xe2, 0xef, 0xdf, 0xf0, 0xf8, 0x03, 0x0f, 0x20, - 0xf4, 0xe3, 0xf8, 0x02, 0xe2, 0xe5, 0x25, 0x0f, 0xeb, 0xf8, 0xe9, 0xfd, - 0x04, 0x0c, 0x0c, 0xfe, 0x01, 0x08, 0xfc, 0xfc, 0x1b, 0x01, 0xe5, 0x13, - 0xf9, 0xe8, 0x07, 0x20, 0xfe, 0x06, 0xec, 0xfe, 0x09, 0xef, 0x14, 0x04, - 0x0b, 0xf5, 0xe7, 0xff, 0x0a, 0x02, 0x09, 0xe9, 0xc4, 0x16, 0x0d, 0xe7, - 0x15, 0x14, 0xf1, 0xd0, 0xec, 0xe7, 0xf0, 0xf0, 0x33, 0x05, 0xda, 0xf2, - 0x0b, 0x08, 0x38, 0x01, 0x07, 0xfd, 0xd8, 0x06, 0xd9, 0xf0, 0x16, 0x1f, - 0xff, 0xf7, 0xe0, 0xd8, 0xf3, 0xf7, 0x12, 0x08, 0x0e, 0x05, 0xf6, 0x03, - 0xef, 0x1b, 0x12, 0xf4, 0xe8, 0x0f, 0x02, 0xfd, 0xf2, 0x16, 0x26, 0x22, - 0xe0, 0x07, 0xf7, 0xe6, 0xeb, 0x16, 0x22, 0x1a, 0x0b, 0x01, 0xf5, 0xea, - 0xd2, 0x22, 0x0f, 0x13, 0x15, 0x08, 0xf0, 0xfb, 0xed, 0x11, 0xf3, 0xe9, - 0xff, 0xde, 0x0a, 0x18, 0x0f, 0x02, 0xfb, 0xf9, 0xfb, 0xe8, 0x12, 0x18, - 0x01, 0xf4, 0xf6, 0xf8, 0xf0, 0x1f, 0x24, 0x15, 0xf5, 0x00, 0x1c, 0xf9, - 0x01, 0x0a, 0x11, 0xd5, 0x01, 0x12, 0x02, 0xec, 0xfd, 0x07, 0xf2, 0xea, - 0xf9, 0xff, 0xf7, 0xfb, 0x15, 0xec, 0xe5, 0x01, 0xeb, 0x05, 0xf9, 0x10, - 0xfe, 0x28, 0xe5, 0x0a, 0xeb, 0x1b, 0x0e, 0xf9, 0xde, 0x02, 0x15, 0x0a, - 0xff, 0xfe, 0x11, 0x24, 0x03, 0xf8, 0x00, 0x08, 0xfd, 0x0e, 0xeb, 0xf3, - 0xf6, 0xf7, 0x14, 0x0e, 0xfc, 0xf5, 0xde, 0xf5, 0x9e, 0xfe, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xab, 0x01, 0x00, 0x00, - 0xfa, 0xfd, 0xff, 0xff, 0xa2, 0xff, 0xff, 0xff, 0xba, 0x00, 0x00, 0x00, - 0x24, 0xfc, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x54, 0x4f, 0x43, 0x4f, - 0x20, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x24, 0xfb, 0xff, 0xff, - 0x68, 0x01, 0x00, 0x00, 0x5c, 0x01, 0x00, 0x00, 0x50, 0x01, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xf4, 0x00, 0x00, 0x00, - 0x90, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xce, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x03, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x1a, 0xff, 0xff, 0xff, 0x00, 0x00, 0x80, 0x3f, 0x01, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, - 0x07, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x01, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xc4, 0xfc, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x16, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x07, 0x00, 0x10, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x38, 0x00, 0x00, 0x00, - 0x2c, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x07, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x1a, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x07, 0x00, 0x14, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x02, 0x00, 0x00, 0x00, - 0x38, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x31, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x34, 0x04, 0x00, 0x00, 0xcc, 0x03, 0x00, 0x00, - 0x4c, 0x03, 0x00, 0x00, 0xdc, 0x02, 0x00, 0x00, 0x60, 0x02, 0x00, 0x00, - 0x20, 0x02, 0x00, 0x00, 0xb0, 0x01, 0x00, 0x00, 0x44, 0x01, 0x00, 0x00, - 0x70, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0xfc, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0x44, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xf4, 0xfb, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x80, 0x3b, 0x0e, 0x00, 0x00, 0x00, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x6d, 0x61, 0x78, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x1a, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0c, 0x00, - 0x10, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0xb4, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x94, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, - 0x12, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x11, 0x1e, 0x23, 0x3a, 0x9e, 0xa1, 0x15, 0x39, - 0x23, 0x69, 0x45, 0x3a, 0x09, 0xe4, 0xe4, 0x39, 0x65, 0xd7, 0x13, 0x3a, - 0xe0, 0xb2, 0xfd, 0x39, 0x1b, 0xc1, 0x53, 0x3a, 0xc2, 0x50, 0x2d, 0x3a, - 0x12, 0x00, 0x00, 0x00, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x77, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x73, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x3a, 0xfd, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0x54, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x2c, 0xfd, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xb5, 0xfa, 0xfa, 0x39, 0x1f, 0x00, 0x00, 0x00, 0x66, 0x69, 0x6e, 0x61, - 0x6c, 0x5f, 0x66, 0x63, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, - 0x2f, 0x72, 0x65, 0x61, 0x64, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, - 0x6f, 0x73, 0x65, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xa0, 0x0f, 0x00, 0x00, 0xa2, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0x58, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x74, 0xfe, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xf2, 0xdd, 0xbb, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x32, 0xa3, 0x25, 0x41, 0x01, 0x00, 0x00, 0x00, - 0xf6, 0xa0, 0x50, 0xc1, 0x05, 0x00, 0x00, 0x00, 0x61, 0x64, 0x64, 0x5f, - 0x31, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x0e, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x0f, 0x00, 0x00, 0x00, 0x52, 0x65, 0x73, 0x68, 0x61, 0x70, 0x65, 0x5f, - 0x32, 0x2f, 0x73, 0x68, 0x61, 0x70, 0x65, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x4a, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0x5c, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x1c, 0xff, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50, 0x50, 0xd0, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x80, 0xcf, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x52, 0x65, 0x73, 0x68, - 0x61, 0x70, 0x65, 0x5f, 0x32, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xc2, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0x58, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x94, 0xff, 0xff, 0xff, 0x2c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x50, 0x50, 0xd0, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x80, 0xcf, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x52, 0x65, 0x73, 0x68, 0x61, 0x70, 0x65, 0x5f, - 0x31, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xa8, 0x07, 0x00, 0x00, 0x2e, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0x60, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x14, 0x00, 0x04, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x3a, 0x6a, 0xac, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0xd0, 0xbd, 0xab, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x52, 0x65, 0x6c, 0x75, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0xaa, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x02, 0x44, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x2c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x9c, 0xff, 0xff, 0xff, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x96, 0x08, 0x29, 0x38, 0x0b, 0x00, 0x00, 0x00, - 0x4d, 0x61, 0x74, 0x4d, 0x75, 0x6c, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x18, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xa0, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x9a, 0xbb, 0x84, 0x38, 0x83, 0x84, 0x73, 0x37, 0x5b, 0xa3, 0xa0, 0x38, - 0x16, 0x41, 0x3a, 0x38, 0xc7, 0x9a, 0x70, 0x38, 0xed, 0x70, 0x4e, 0x38, - 0x54, 0x4f, 0xac, 0x38, 0xfd, 0x07, 0x8d, 0x38, 0x0b, 0x00, 0x00, 0x00, - 0x43, 0x6f, 0x6e, 0x76, 0x32, 0x44, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x4c, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x19, - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, 0x05, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x16, 0x0a, 0x00, 0x0e, 0x00, 0x07, 0x00, - 0x00, 0x00, 0x08, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x07, 0x00, - 0x00, 0x00, 0x08, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - 0x03, 0x00, 0x00, 0x00}; -const int g_model_len = 18712; diff --git a/examples/micro_speech/micro_features/model.h b/examples/micro_speech/micro_features/model.h deleted file mode 100644 index deec2d6..0000000 --- a/examples/micro_speech/micro_features/model.h +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This is a standard TensorFlow Lite FlatBuffer model file that has been -// converted into a C data array, so it can be easily compiled into a binary -// for devices that don't have a file system. It was created using the command: -// xxd -i model.tflite > model.cc - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_ - -extern const unsigned char g_model[]; -extern const int g_model_len; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MODEL_H_ diff --git a/examples/micro_speech/micro_features/no_feature_data_slice.cpp b/examples/micro_speech/micro_features/no_feature_data_slice.cpp deleted file mode 100644 index 8170f60..0000000 --- a/examples/micro_speech/micro_features/no_feature_data_slice.cpp +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "micro_features/no_feature_data_slice.h" - -const int8_t g_no_feature_data_slice[g_no_feature_data_slice_size] = { - 89, 68, 96, 83, 111, 96, 115, 87, 99, 76, 105, 84, 105, 86, - 113, 91, 108, 87, 110, 78, 80, 46, 22, 74, 88, 72, 103, 86, - 80, 68, 48, 24, 68, 48, 55, 36, 108, 90, 90, 63, -}; diff --git a/examples/micro_speech/micro_features/no_feature_data_slice.h b/examples/micro_speech/micro_features/no_feature_data_slice.h deleted file mode 100644 index 01e6605..0000000 --- a/examples/micro_speech/micro_features/no_feature_data_slice.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This data was extracted from the larger feature data held in -// no_features_data.cc and consists of the 29th spectrogram slice of 43 values. -// This is the expected result of running the sample data in -// no_30ms_sample_data.cc through the preprocessing pipeline. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_ - -#include - -constexpr int g_no_feature_data_slice_size = 40; -extern const int8_t g_no_feature_data_slice[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_ diff --git a/examples/micro_speech/micro_features/no_micro_features_data.cpp b/examples/micro_speech/micro_features/no_micro_features_data.cpp deleted file mode 100644 index 2edeeb5..0000000 --- a/examples/micro_speech/micro_features/no_micro_features_data.cpp +++ /dev/null @@ -1,188 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "micro_features/no_micro_features_data.h" - -// Golden test values for the expected spectrogram from a "no" sample file -// speech_commands_test_set_v0.02/no/f9643d42_nohash_4.wav. - -const int g_no_micro_f9643d42_nohash_4_width = 40; -const int g_no_micro_f9643d42_nohash_4_height = 49; -const signed char g_no_micro_f9643d42_nohash_4_data[] = { - 103, 78, 64, 76, 75, 54, 53, 67, 77, 60, 56, 70, - 76, 71, 68, 58, 74, 32, 23, -2, -18, 11, 13, 15, - 9, 20, 5, -7, -18, -2, -10, -18, -10, -12, 9, 7, - -33, -12, -4, -18, 57, 17, 55, 62, 70, 45, 61, 37, - 67, 52, 48, 47, 55, 46, 57, 47, 73, 17, 27, 20, - 19, 8, 15, -6, -1, 10, -12, -29, -6, -23, -18, -3, - -1, 5, 3, -4, -12, -8, -1, -14, 65, 48, 58, 43, - 48, 19, 39, 39, 57, 57, 58, 55, 67, 58, 49, 50, - 70, 27, 9, 16, 37, 4, 25, 4, 11, 9, 7, -33, - -7, -12, 3, -6, -29, -7, -7, -18, -12, -18, -2, -1, - 0, 31, 60, -8, 51, 59, 70, 40, 71, 57, 52, 38, - 66, 48, 17, 6, 59, 8, 15, 7, 18, 4, 18, -23, - -8, -4, -3, -12, -3, -26, 1, 10, 2, -29, -29, -37, - -7, -4, 6, -33, 67, 44, 59, -4, 64, 51, 68, 55, - 74, 9, 40, 15, 57, 33, 60, 18, 40, 25, 27, -20, - 25, -16, 6, 17, -10, -12, -23, -43, -23, -23, -29, -37, - -4, -16, -16, -60, -20, -23, -10, -29, -12, 15, 12, -37, - 27, 15, 61, 44, 50, 8, 48, 22, 49, -18, 46, 33, - 42, 34, 46, -8, 4, -18, -43, -43, -10, 1, -10, -16, - -10, -77, -16, -33, 11, -26, -23, -37, 0, -8, -16, -29, - 42, 40, 68, 24, 47, 46, 53, -128, 30, 2, 42, 21, - 21, -4, 43, 2, 43, 5, 32, -26, 7, -37, -43, -23, - -2, -8, 2, -37, -50, -60, -1, -7, -33, -77, -6, -18, - -16, -50, -12, -33, 53, 8, 52, 18, 51, 35, 69, 26, - 44, 8, 27, -128, 21, -33, 17, -14, 38, -128, -14, -18, - 17, -20, -14, -37, 8, -60, -33, -33, -33, -43, -12, -29, - -12, -128, -33, -60, -26, -77, -26, -50, 57, 29, 11, 30, - 53, -10, 45, 15, 18, -10, 42, 2, 31, -29, 10, -4, - 42, -37, -50, -128, -4, -43, -20, -77, -14, -26, -33, -128, - -12, -43, -8, -33, -33, -60, -43, -77, -12, -60, -26, -50, - 40, -23, 36, 35, 50, -2, 37, 27, 26, -77, 49, -7, - 28, -43, 6, 11, 41, -37, 33, -26, -14, -12, -6, -33, - -16, -26, -20, -77, -14, -43, -8, -50, -14, -37, -26, -77, - -26, -77, -14, -29, 50, -60, 25, -26, 57, 38, 51, 1, - 50, 1, 53, -18, 30, -23, 11, -128, 18, -43, 20, -26, - -10, -26, -12, -128, -50, -60, -37, -77, -20, -43, -50, -128, - -77, -128, -77, -128, -33, -77, -20, -60, 53, -10, -37, -128, - 10, -128, 60, 18, -8, 13, 37, -37, 8, -128, 3, -77, - 32, -29, 14, 10, -12, -77, -37, -77, -37, -60, -23, -128, - -43, -50, -16, -77, -6, -33, 0, -60, -43, -128, -16, -60, - 20, -2, 51, 19, 43, 2, 63, 20, 60, -4, 42, -50, - 4, -128, 2, -3, 32, -33, -26, -128, -18, -128, -33, -43, - -7, -60, -50, -77, -29, -77, -23, -128, -16, -26, -23, -60, - -37, -77, -37, -128, -1, -33, 39, 48, 60, 5, 8, -128, - 44, 11, 4, 0, 13, -77, -2, -20, 33, -128, -33, -77, - -8, -128, -14, -128, -33, -18, -12, -77, -16, -128, -37, -128, - -12, -77, -60, -128, -23, -60, -23, -128, 36, -50, 46, -128, - 66, 39, 18, -14, -12, -77, -20, -6, 24, -128, 28, -26, - 21, -77, -6, -33, 1, -128, -43, -128, -1, -50, -37, -128, - -50, -128, -33, -128, -18, -128, -60, -8, -7, -60, -60, -128, - -6, -29, 20, -1, 73, 40, -43, -14, 33, -43, 33, -3, - 15, -29, 29, -43, 20, -60, -29, -128, -20, -26, 4, -77, - -16, -60, -33, -50, -29, -128, -60, -128, -77, -128, -37, -50, - 0, -77, -33, -128, 39, 8, 47, 10, 62, 16, 2, 1, - 10, 7, 4, -7, 6, -128, -77, -50, 19, -77, -77, -128, - -77, -128, -50, -128, -60, -60, -33, -50, -37, -128, -128, -128, - -60, -128, -37, -60, -18, -128, -33, -77, 37, 23, 29, -128, - -128, -128, -16, -128, -16, -33, 21, -20, -8, -60, -2, -60, - 11, -128, -50, -128, -50, -128, -29, -77, -16, -128, -26, -128, - -50, -77, -43, -128, -128, -128, -50, -128, -33, -128, -33, -50, - -23, -128, 24, -128, -128, -77, 4, -23, 32, -128, 1, -26, - -14, -128, 10, -77, -4, -128, 1, -50, -8, -77, -77, -77, - -23, -128, -50, -43, -33, -128, -43, -128, -128, -128, -43, -128, - -50, -128, -128, -128, 44, 15, 14, -128, 9, -128, 21, 0, - 29, -7, 18, -7, -7, -128, -33, -50, 14, -60, -60, -128, - -60, -128, -37, -128, -43, -128, -20, -128, -50, -128, -43, -77, - -26, -128, -60, -50, -60, -128, -77, -128, -3, -128, 14, -77, - -26, 11, 47, -77, -7, -77, 45, -43, -12, 14, 37, -60, - 22, -4, 5, -77, -14, -128, -10, -60, 22, -77, -12, -60, - -50, -128, -60, -128, -60, -128, -43, -128, -50, -128, -77, -50, - 27, -37, 33, -128, 4, -29, -4, -50, -20, -128, 6, -37, - -33, -128, -50, -128, 34, 15, -43, -128, -20, -50, -3, -37, - -37, -77, -77, -128, -43, -128, -128, -128, 4, -26, -26, 27, - 0, -128, -29, -60, 35, -26, 23, -128, -29, -77, 19, 14, - 28, -128, -16, -7, 31, -1, 17, 11, 60, 44, 8, 11, - 18, -128, -33, -60, -1, -128, -43, -128, -23, -128, -128, -128, - 59, 43, 35, 61, 37, -77, -77, -50, 116, 88, 98, 69, - 78, 53, 78, 40, 48, 7, 29, -18, -2, -14, 5, 12, - 65, 35, 31, -12, 33, -2, -6, -1, 44, -29, -14, -60, - -4, -43, -37, -128, 29, 18, 38, 51, 8, -128, -12, -37, - 115, 91, 113, 77, 89, 36, 60, 44, 49, 36, 27, 31, - 63, 30, 62, 14, 55, 49, 42, 0, 45, 17, -23, 1, - 30, -37, -50, -77, -8, -60, 9, -60, -12, -50, 13, 4, - 23, -6, 28, 13, 107, 78, 101, 73, 89, 46, 63, 17, - 34, -43, -6, 30, 67, 40, 77, 21, 53, 39, 38, 12, - -6, 5, 28, -2, 18, -43, 0, -128, -29, -77, 18, -128, - -2, -77, 39, 35, 38, 35, 50, 29, 100, 70, 94, 69, - 86, 50, 45, 38, 45, 12, 58, 64, 74, 36, 77, 45, - 78, 62, 8, -60, 38, 6, 21, 7, 8, -37, -1, -20, - 48, -37, 8, -10, 8, 13, 45, 39, 38, 22, 49, 25, - 94, 63, 87, 66, 84, -128, 29, 20, 55, 51, 80, 36, - 62, 30, 81, 72, 68, 37, 51, 27, 54, 22, 16, -29, - 4, 9, 57, 15, 35, -43, -77, -20, 4, 6, 37, -1, - 40, 31, 47, 14, 89, 68, 96, 83, 111, 96, 115, 87, - 99, 76, 105, 84, 105, 86, 113, 91, 108, 87, 110, 78, - 80, 46, 22, 74, 88, 72, 103, 86, 80, 68, 48, 24, - 68, 48, 55, 36, 108, 90, 90, 63, 83, 63, 87, 64, - 90, 92, 113, 88, 102, 79, 109, 83, 100, 89, 109, 60, - 56, 21, 75, 62, 81, 45, 63, 73, 93, 65, 94, 80, - 89, 81, 73, 3, 43, 60, 102, 70, 84, 67, 99, 74, - 78, 57, 79, 50, 93, 82, 98, 56, 77, 70, 91, 71, - 85, 82, 86, 13, 45, -18, 48, 40, 53, 28, 85, 60, - 65, 52, 86, 78, 76, 46, 73, 19, 35, 54, 75, 40, - 71, 60, 82, 37, 69, 42, 62, 40, 96, 70, 85, 77, - 70, 68, 103, 84, 94, 69, 81, -128, -128, -128, -43, -37, - 40, 2, 48, 45, 76, 37, 65, 16, 43, 18, 58, 20, - 27, 12, 71, 31, 53, 44, 88, 47, 50, 33, 39, 8, - 89, 57, 88, 69, 72, 63, 100, 68, 81, -77, -10, -128, - -128, -128, -128, -128, 13, -77, 8, 27, 60, 28, 41, -128, - -37, -128, 28, -43, -18, -128, 47, -37, 45, 27, 51, -29, - 15, 39, 52, 30, 49, -33, 65, 15, 76, 71, 90, 19, - 46, -128, -16, -128, -128, -128, -128, -128, -128, -128, -18, -128, - -20, -128, 32, -128, 21, -33, 45, -128, -128, -128, -12, -128, - -6, -14, 43, -128, -128, -128, -128, -128, 52, -18, 69, -43, - 78, 55, 42, -128, -29, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, 14, -128, -16, -128, -128, -128, 7, -128, - -128, -128, -128, -128, -128, -128, 12, -128, -128, -128, -128, -16, - 59, -50, 35, -128, 42, 0, 47, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -33, -128, -23, -128, - -128, -128, -23, -128, -128, -128, -128, -128, -128, -128, -33, -128, - -128, -128, -128, -128, -128, -128, -8, -128, 36, -50, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -37, -128, -128, -60, -10, -128, -128, -128, -128, -128, - -128, -128, 21, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -12, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -77, -128, -128, -128, -29, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -29, -128, -128, -128, -128, -128, -128, -128, -128, -128, -50, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -}; diff --git a/examples/micro_speech/micro_features/no_micro_features_data.h b/examples/micro_speech/micro_features/no_micro_features_data.h deleted file mode 100644 index 8c1b6d5..0000000 --- a/examples/micro_speech/micro_features/no_micro_features_data.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_ - -extern const int g_no_micro_f9643d42_nohash_4_width; -extern const int g_no_micro_f9643d42_nohash_4_height; -extern const signed char g_no_micro_f9643d42_nohash_4_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_MICRO_FEATURES_DATA_H_ diff --git a/examples/micro_speech/micro_features/yes_feature_data_slice.cpp b/examples/micro_speech/micro_features/yes_feature_data_slice.cpp deleted file mode 100644 index e3f6e20..0000000 --- a/examples/micro_speech/micro_features/yes_feature_data_slice.cpp +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "micro_features/yes_feature_data_slice.h" - -const int8_t g_yes_feature_data_slice[g_yes_feature_data_slice_size] = { - 86, 88, 108, 75, 108, 76, 98, 64, 75, 61, 71, 66, 85, -1, - -77, -128, 46, 61, 92, 69, 100, 93, 113, 80, 108, 93, 113, 91, - 110, 80, 85, 15, -33, -128, 12, -50, 34, 50, 70, 55, -}; diff --git a/examples/micro_speech/micro_features/yes_feature_data_slice.h b/examples/micro_speech/micro_features/yes_feature_data_slice.h deleted file mode 100644 index 18faadc..0000000 --- a/examples/micro_speech/micro_features/yes_feature_data_slice.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This data was extracted from the larger feature data held in -// no_micro_features_data.cc and consists of the 26th spectrogram slice of 40 -// values. This is the expected result of running the sample data in -// yes_30ms_sample_data.cc through the preprocessing pipeline. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_ - -#include - -constexpr int g_yes_feature_data_slice_size = 40; -extern const int8_t g_yes_feature_data_slice[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_ diff --git a/examples/micro_speech/micro_features/yes_micro_features_data.cpp b/examples/micro_speech/micro_features/yes_micro_features_data.cpp deleted file mode 100644 index 09a40cc..0000000 --- a/examples/micro_speech/micro_features/yes_micro_features_data.cpp +++ /dev/null @@ -1,188 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "micro_features/yes_micro_features_data.h" - -// Golden test values for the expected spectrogram from a "yes" sample file -// speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav. - -const int g_yes_micro_f2e59fea_nohash_1_width = 40; -const int g_yes_micro_f2e59fea_nohash_1_height = 49; -const signed char g_yes_micro_f2e59fea_nohash_1_data[] = { - 116, 98, 118, 95, 106, 85, 101, 81, 67, -18, -33, -12, - -26, -128, 9, 34, 56, 45, 9, -12, 5, 30, 23, 28, - 0, -18, 0, -128, -60, -50, -50, -37, -60, -60, -50, -26, - -33, -50, -33, -50, 83, 61, 81, 55, 76, 61, 73, 64, - 38, -8, -37, -20, -18, -20, 48, 29, 52, 41, 55, 18, - 25, 37, 44, 37, 8, 15, -6, -60, -128, -50, -37, -37, - -18, -37, -26, -29, -37, -60, -50, -60, 95, 59, 52, -4, - 54, -18, 68, 43, 31, -18, -26, -33, -37, -29, 33, 7, - -3, 8, 26, 24, 36, 6, 36, 23, 14, 8, -29, -37, - -37, -37, -50, -50, -26, -8, -26, -37, -18, -37, -60, -77, - 50, 48, 83, 44, 56, -128, -33, -60, 1, -26, -60, -43, - -14, -23, -18, -43, -26, -33, 13, -77, -43, -77, -33, -37, - 16, -12, -37, -50, -50, -77, -20, -43, -60, -128, -60, -77, - -37, -77, -60, -128, 37, -10, 65, -7, 28, -128, 10, -77, - -37, -128, -77, -128, -77, -43, -128, -128, -77, -128, -128, -128, - -128, -128, -14, -128, -43, -50, -37, -77, -128, -128, -77, -43, - -29, -43, -20, -60, -37, -43, -50, -128, -77, -128, -18, -128, - -60, -128, -128, -128, -77, -128, -77, -128, -128, -128, -60, -37, - -20, -128, -60, -128, -128, -128, -60, -128, -77, -60, -128, -50, - -60, -128, -77, -128, -50, -60, -37, -60, -50, -77, -77, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -37, -128, - -128, -128, -128, -128, -77, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -77, -60, -128, -128, -50, -128, -50, -128, - -50, -128, -77, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -77, -128, -77, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -77, -128, -77, -128, -77, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -77, -128, -128, -128, - -128, -77, -50, -128, -128, -77, -77, -128, -128, -128, -50, -128, - 85, 43, 65, 53, 69, 60, 45, 3, 46, -12, 9, -23, - 32, -1, -128, -128, -128, -128, -1, 37, 38, 33, 43, 36, - 58, 70, 68, 39, 6, 10, 32, 6, 8, -23, -77, -128, - -29, -128, -77, -128, 101, 87, 102, 91, 110, 88, 101, 83, - 110, 95, 111, 83, 81, 84, 106, 90, 93, 82, 98, 91, - 108, 95, 118, 97, 118, 97, 116, 96, 113, 90, 110, 96, - 107, 85, 94, 66, 69, 36, 29, 0, 100, 60, 105, 68, - 92, 93, 113, 92, 107, 85, 107, 83, 104, 91, 105, 85, - 112, 88, 101, 80, 101, 79, 96, 80, 98, 80, 105, 83, - 98, 81, 103, 71, 100, 79, 83, 78, 91, 47, 50, 13, - 108, 81, 93, 78, 98, 76, 105, 76, 98, 40, 77, 72, - 81, 62, 93, 77, 96, 80, 98, 61, 97, 69, 88, 61, - 71, 56, 98, 68, 97, 72, 89, 51, 81, 61, 88, 75, - 86, 56, 48, 13, 71, 22, 84, 66, 76, -7, 48, 61, - 77, 62, 91, 65, 95, 74, 88, 59, 75, 58, 83, 55, - 87, 55, 76, 43, 76, -3, 56, 60, 79, 57, 71, 54, - 82, 33, 74, 71, 91, 45, 18, -7, 61, 56, 77, 41, - 73, 42, 82, 49, 59, 63, 82, 65, 66, 38, 83, 34, - 48, -8, 46, 20, 54, 33, 54, 6, 48, 16, 60, 37, - 58, 22, 58, 14, 65, 53, 75, -4, 42, 16, 16, -50, - 22, -128, 80, 54, 43, -50, 42, -128, -10, -77, 28, -29, - 68, 43, 73, 2, 25, -60, 47, 14, 45, 7, 66, 4, - 62, 37, 71, 7, 46, -10, 44, 22, 55, 53, 57, -29, - 26, -10, -3, -128, 38, -128, 46, -10, 16, -128, -10, -26, - 60, -7, 65, 38, 70, -60, 35, -8, 42, -29, 6, -128, - 34, -128, 36, -60, 44, -12, -2, -128, -7, -60, -60, -128, - -23, -128, 31, -33, 22, -77, -37, -43, -128, -128, 3, -128, - -23, -128, 17, -77, 43, -77, -7, -128, -20, -128, 17, -43, - 32, -128, -43, -128, -128, -77, 21, -128, -50, -128, -128, -128, - -128, -128, -128, -128, -37, -128, -16, -128, -50, -26, -6, -128, - -128, -128, -128, -128, -23, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -16, -128, 36, -7, 16, -128, -128, -128, -128, -128, - -77, -128, -37, -128, -50, -128, -128, -128, -128, -128, -18, -128, - 11, -128, -16, -77, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -26, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -20, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -50, -128, -77, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -77, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -1, -18, 5, -128, - 40, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, 4, -128, 63, 66, 75, -128, - 70, 60, 34, -128, -128, -128, -128, -128, -128, -128, -128, -128, - 87, 86, 95, 76, 91, 62, 72, -6, -50, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, 64, 83, 104, 70, - 98, 90, 111, 89, 109, 80, 71, -128, -128, -128, -128, -128, - -20, -6, 27, 33, 86, 88, 108, 75, 108, 76, 98, 64, - 75, 61, 71, 66, 85, -1, -77, -128, 46, 61, 92, 69, - 100, 93, 113, 80, 108, 93, 113, 91, 110, 80, 85, 15, - -33, -128, 12, -50, 34, 50, 70, 55, 84, 72, 108, 81, - 111, 88, 100, 80, 84, 73, 97, 86, 99, 65, 85, 43, - 96, 78, 107, 94, 118, 98, 115, 92, 118, 94, 111, 93, - 111, 86, 99, 52, 32, -16, 48, 31, 81, 74, 85, 64, - 78, 64, 98, 70, 110, 92, 96, 73, 100, 72, 94, 73, - 98, 76, 85, 67, 101, 83, 101, 83, 112, 89, 98, 85, - 105, 78, 98, 72, 102, 80, 95, 23, 19, -8, 52, 57, - 103, 91, 95, 65, 74, 8, 77, 49, 96, 76, 100, 87, - 105, 81, 94, 62, 94, 78, 81, 72, 99, 82, 101, 78, - 108, 65, 82, 70, 100, 63, 79, 58, 80, 59, 87, 48, - 50, 57, 93, 67, 86, 80, 103, 56, 77, 31, 81, 57, - 62, 41, 96, 85, 91, 71, 101, 76, 89, 78, 95, 76, - 96, 79, 103, 81, 103, 48, 70, 57, 88, 66, 84, 11, - 85, 67, 104, 37, 38, 67, 90, 54, 81, 62, 90, 52, - 78, -60, 54, -8, 68, 40, 55, 8, 77, 52, 66, 31, - 55, 13, 60, 26, 69, 42, 63, -29, 57, -128, -3, -128, - 3, -128, -29, -60, 52, -43, 63, 56, 86, 75, 95, 75, - 85, 63, 82, 10, 50, -128, 31, -77, 0, -77, -23, -128, - 12, -77, 51, -3, 58, -14, 44, 0, 48, 4, 53, 47, - 28, -128, -128, -128, -37, -128, -3, -128, 49, 61, 100, 90, - 117, 88, 107, 94, 112, 64, 96, 83, -128, -128, 7, -128, - -77, -128, -23, -128, -23, -128, 16, -37, 65, -8, 48, 20, - 14, -77, 57, -18, -43, -128, -128, -128, -128, -128, -128, -128, - 24, 12, 74, 76, 105, 76, 99, 80, 108, 79, 103, 85, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - 42, -128, -8, -128, -50, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -60, -128, -128, 5, 73, 53, 93, 70, 101, 73, - 94, 57, 86, 66, -18, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -50, -128, 36, -128, -128, -128, -128, -128, -20, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, 23, 37, - 75, 54, 97, 70, 83, 52, 85, 65, 7, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -43, -128, 23, -128, -43, -128, - -33, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -26, -37, 65, 33, 76, 37, 73, 50, 77, 47, - -12, -128, -128, -128, -128, -128, -128, -128, -128, -128, -7, -14, - -4, -128, -14, -128, 18, -60, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -26, -60, 71, 42, 68, 53, - 81, 49, 73, 36, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -18, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, 15, -26, - 44, -18, 59, 39, 57, 20, 62, 26, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, 49, -128, 30, 8, 69, 27, 62, 38, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -43, -128, 28, -37, 48, -10, - 48, 11, 74, 37, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -77, -128, 11, -128, -7, -60, -77, -4, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -8, -128, -50, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -}; diff --git a/examples/micro_speech/micro_features/yes_micro_features_data.h b/examples/micro_speech/micro_features/yes_micro_features_data.h deleted file mode 100644 index cd1ad10..0000000 --- a/examples/micro_speech/micro_features/yes_micro_features_data.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_ - -extern const int g_yes_micro_f2e59fea_nohash_1_width; -extern const int g_yes_micro_f2e59fea_nohash_1_height; -extern const signed char g_yes_micro_f2e59fea_nohash_1_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_MICRO_FEATURES_DATA_H_ diff --git a/examples/micro_speech/micro_speech_test.cpp b/examples/micro_speech/micro_speech_test.cpp deleted file mode 100644 index 48360fe..0000000 --- a/examples/micro_speech/micro_speech_test.cpp +++ /dev/null @@ -1,145 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "micro_features/model.h" -#include "micro_features/no_micro_features_data.h" -#include "micro_features/yes_micro_features_data.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" -#include "tensorflow/lite/micro/testing/micro_test.h" -#include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestInvoke) { - // Set up logging. - tflite::MicroErrorReporter micro_error_reporter; - - // Map the model into a usable data structure. This doesn't involve any - // copying or parsing, it's a very lightweight operation. - const tflite::Model* model = ::tflite::GetModel(g_model); - if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(µ_error_reporter, - "Model provided is schema version %d not equal " - "to supported version %d.\n", - model->version(), TFLITE_SCHEMA_VERSION); - } - - // Pull in only the operation implementations we need. - // This relies on a complete list of all the ops needed by this graph. - // An easier approach is to just use the AllOpsResolver, but this will - // incur some penalty in code space for op implementations that are not - // needed by this graph. - // - // tflite::AllOpsResolver resolver; - tflite::MicroMutableOpResolver<4> micro_op_resolver; - micro_op_resolver.AddDepthwiseConv2D(); - micro_op_resolver.AddFullyConnected(); - micro_op_resolver.AddReshape(); - micro_op_resolver.AddSoftmax(); - - // Create an area of memory to use for input, output, and intermediate arrays. - const int tensor_arena_size = 10 * 1024; - uint8_t tensor_arena[tensor_arena_size]; - - // Build an interpreter to run the model with. - tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, - tensor_arena_size, - µ_error_reporter); - interpreter.AllocateTensors(); - - // Get information about the memory area to use for the model's input. - TfLiteTensor* input = interpreter.input(0); - - // Make sure the input has the properties we expect. - TF_LITE_MICRO_EXPECT_NE(nullptr, input); - TF_LITE_MICRO_EXPECT_EQ(2, input->dims->size); - TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(1960, input->dims->data[1]); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type); - - // Copy a spectrogram created from a .wav audio file of someone saying "Yes", - // into the memory area used for the input. - const int8_t* yes_features_data = g_yes_micro_f2e59fea_nohash_1_data; - for (size_t i = 0; i < input->bytes; ++i) { - input->data.int8[i] = yes_features_data[i]; - } - - // Run the model on this input and make sure it succeeds. - TfLiteStatus invoke_status = interpreter.Invoke(); - if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); - } - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); - - // Get the output from the model, and make sure it's the expected size and - // type. - TfLiteTensor* output = interpreter.output(0); - TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size); - TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(4, output->dims->data[1]); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type); - - // There are four possible classes in the output, each with a score. - const int kSilenceIndex = 0; - const int kUnknownIndex = 1; - const int kYesIndex = 2; - const int kNoIndex = 3; - - // Make sure that the expected "Yes" score is higher than the other classes. - uint8_t silence_score = output->data.uint8[kSilenceIndex] + 128; - uint8_t unknown_score = output->data.uint8[kUnknownIndex] + 128; - uint8_t yes_score = output->data.int8[kYesIndex] + 128; - uint8_t no_score = output->data.int8[kNoIndex] + 128; - TF_LITE_MICRO_EXPECT_GT(yes_score, silence_score); - TF_LITE_MICRO_EXPECT_GT(yes_score, unknown_score); - TF_LITE_MICRO_EXPECT_GT(yes_score, no_score); - - // Now test with a different input, from a recording of "No". - const int8_t* no_features_data = g_no_micro_f9643d42_nohash_4_data; - for (size_t i = 0; i < input->bytes; ++i) { - input->data.int8[i] = no_features_data[i]; - } - - // Run the model on this "No" input. - invoke_status = interpreter.Invoke(); - if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); - } - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); - - // Get the output from the model, and make sure it's the expected size and - // type. - output = interpreter.output(0); - TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size); - TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(4, output->dims->data[1]); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type); - - // Make sure that the expected "No" score is higher than the other classes. - silence_score = output->data.int8[kSilenceIndex] + 128; - unknown_score = output->data.int8[kUnknownIndex] + 128; - yes_score = output->data.int8[kYesIndex] + 128; - no_score = output->data.int8[kNoIndex] + 128; - TF_LITE_MICRO_EXPECT_GT(no_score, silence_score); - TF_LITE_MICRO_EXPECT_GT(no_score, unknown_score); - TF_LITE_MICRO_EXPECT_GT(no_score, yes_score); - - TF_LITE_REPORT_ERROR(µ_error_reporter, "Ran successfully\n"); -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/micro_speech/no_1000ms_sample_data.cpp b/examples/micro_speech/no_1000ms_sample_data.cpp deleted file mode 100644 index 73824fc..0000000 --- a/examples/micro_speech/no_1000ms_sample_data.cpp +++ /dev/null @@ -1,1477 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "no_1000ms_sample_data.h" - -const int g_no_1000ms_sample_data_size = 16000; -const int16_t g_no_1000ms_sample_data[16000] = { - 5, 1, -10, -16, -14, -10, -4, -5, -10, -15, -13, - -17, -22, -21, -23, -25, -22, -26, -28, -31, -28, -25, - -20, -24, -21, -13, -7, -1, -1, 3, 3, 4, -4, - -6, -8, -10, -13, -4, -2, 5, 8, 11, 26, 28, - 34, 32, 34, 30, 21, 18, 15, 13, 8, 5, 14, - 13, 7, 8, 4, -5, -7, -4, -9, -13, -17, -21, - -16, -14, -12, -12, -14, -11, -9, -2, 5, -1, 2, - 0, 2, 1, -3, -13, -14, -16, -11, -10, -9, -13, - -17, -19, -25, -21, -21, -20, -13, -5, -3, 0, 3, - 6, 5, 1, 0, -1, -7, -10, -11, -9, -6, -7, - -11, -10, -5, -14, -20, -23, -22, -22, -19, -15, -12, - -6, -5, 3, 13, 16, 17, 25, 26, 28, 34, 34, - 33, 34, 30, 21, 22, 18, 13, 20, 22, 24, 27, - 26, 23, 21, 18, 9, 5, -2, -7, -8, -10, -8, - -8, -4, 2, 2, -1, -7, -10, -8, -12, -13, -15, - -9, -5, -4, -3, -6, -11, -11, -18, -16, -13, -10, - -12, -6, 0, -2, 0, -3, -4, -8, -12, -19, -16, - -17, -19, -23, -30, -33, -36, -38, -39, -40, -36, -37, - -32, -27, -25, -31, -38, -41, -47, -52, -50, -42, -32, - -16, -7, -3, 0, -1, -1, -5, -16, -23, -29, -34, - -33, -27, -17, -11, 1, 4, 10, 18, 21, 24, 24, - 25, 30, 34, 30, 29, 26, 23, 20, 15, 14, 13, - 14, 16, 23, 28, 21, 23, 21, 13, 12, 12, 14, - 17, 21, 26, 27, 30, 30, 26, 20, 15, 15, 9, - 8, 9, 10, 7, 8, 7, 1, -2, -6, -10, -10, - -12, -15, -10, -7, -6, -5, 0, -3, -3, -12, -25, - -35, -49, -53, -49, -51, -48, -46, -48, -39, -33, -31, - -37, -42, -47, -49, -46, -47, -47, -46, -42, -39, -33, - -26, -23, -14, -8, -9, -7, -10, -11, -13, -13, -19, - -20, -16, -11, -9, 7, 16, 21, 29, 27, 29, 28, - 21, 14, 13, 17, 19, 20, 18, 13, 17, 16, 18, - 20, 17, 13, 16, 23, 26, 26, 25, 27, 31, 30, - 31, 34, 32, 35, 32, 36, 31, 26, 23, 27, 27, - 29, 27, 26, 32, 31, 28, 26, 23, 14, 6, 0, - -4, -7, -9, -10, -8, -3, 4, 12, 11, 15, 11, - 8, 2, -3, -3, -4, -6, -11, -14, -20, -28, -32, - -38, -46, -42, -44, -40, -34, -26, -29, -25, -23, -24, - -17, -21, -26, -23, -25, -19, -10, -11, -10, -10, -12, - -9, -3, 0, -3, -7, -10, -13, -10, -14, -13, -17, - -22, -22, -30, -28, -29, -26, -18, -6, -1, -3, -4, - -6, -10, -13, -10, -14, -16, -11, -15, -9, -3, -6, - -1, 2, 3, 4, 6, 6, 3, 4, 12, 14, 17, - 21, 19, 20, 16, 17, 15, 21, 21, 22, 20, 17, - 16, 16, 20, 17, 15, 9, 5, 11, 18, 24, 28, - 26, 23, 23, 26, 22, 18, 21, 23, 26, 27, 25, - 27, 29, 26, 20, 10, 7, 11, 8, 16, 25, 33, - 37, 38, 39, 35, 30, 20, 13, 9, 6, 5, 13, - 13, 14, 15, 12, 8, 3, 3, 3, 2, 9, 11, - 10, 5, 5, 0, -7, -11, -12, -15, -17, -12, -13, - -18, -19, -21, -24, -22, -27, -34, -36, -36, -32, -20, - -16, -15, -5, -5, -9, -10, -9, -17, -19, -20, -14, - -13, -10, -4, -7, -7, -14, -19, -28, -31, -30, -31, - -23, -19, -20, -12, -11, -14, -16, -20, -18, -20, -21, - -24, -29, -30, -30, -34, -31, -25, -21, -18, -11, -4, - 2, 2, 3, 3, 2, 4, -1, -4, -8, -3, -1, - 7, 15, 18, 22, 20, 20, 16, 16, 14, 13, 21, - 25, 26, 35, 28, 28, 28, 25, 21, 19, 18, 21, - 24, 20, 25, 28, 19, 16, 15, 8, 3, -1, 3, - 5, 13, 18, 25, 31, 33, 39, 36, 36, 32, 36, - 37, 39, 42, 36, 32, 27, 30, 24, 18, 15, 10, - 7, 5, 6, -1, -4, -10, -17, -15, -19, -15, -7, - -4, 3, 0, 3, 4, -2, -7, -13, -21, -23, -28, - -27, -26, -25, -15, -10, -4, -6, -5, -9, -5, -3, - 1, 2, -1, 1, -4, -7, -8, -17, -17, -15, -14, - -9, -5, -7, -6, -9, -16, -15, -15, -16, -16, -11, - -15, -15, -6, -6, -5, -2, 0, -9, -10, -12, -13, - -10, -4, 0, 8, 5, 4, 2, 0, -5, -8, -16, - -15, -12, -3, 9, 17, 24, 26, 30, 28, 22, 17, - 14, 9, 8, 9, 8, 11, 12, 12, 15, 14, 18, - 20, 17, 19, 22, 21, 12, 5, 0, 3, -3, -4, - -6, -7, 1, 8, 8, 8, 10, 2, -3, -8, -15, - -20, -24, -22, -23, -13, -6, -7, -5, -10, -8, -15, - -19, -22, -20, -17, -18, -13, -10, -1, 6, 5, 3, - 1, -5, -11, -10, -14, -19, -15, -13, -8, -2, -3, - -4, -3, -4, -1, 1, 0, -3, -4, -8, -18, -21, - -25, -24, -16, -9, -2, 1, 5, 1, 3, -2, -7, - -10, -23, -30, -29, -23, -9, -3, 4, 11, 11, 6, - 2, 0, -12, -20, -28, -24, -22, -17, -22, -19, -14, - -21, -17, -17, -12, -8, -3, 2, 0, -6, -5, -8, - -12, -17, -27, -34, -31, -30, -27, -19, -14, -14, -14, - -14, -19, -22, -21, -19, -14, -1, 5, 9, 8, 6, - 5, -4, -2, -3, -3, -1, -2, -3, 2, 7, 8, - 7, 6, 6, 3, 2, 1, -2, 0, 6, 11, 18, - 18, 19, 17, 14, 9, 4, 3, 3, 0, -1, 3, - -1, -5, 0, -2, 0, 1, 7, 7, 8, 20, 29, - 33, 31, 24, 14, 5, -6, -11, -8, -11, -2, 6, - 10, 12, 16, 26, 26, 24, 18, 12, 10, 4, 7, - 6, -2, -12, -17, -17, -20, -23, -23, -18, -8, 1, - 3, 5, 6, 3, 0, -6, -12, -12, -15, -12, -7, - 3, 3, 8, 7, 7, 7, 1, -1, -1, 4, 11, - 17, 25, 32, 35, 42, 50, 52, 56, 50, 55, 53, - 52, 47, 40, 38, 30, 26, 27, 28, 29, 25, 23, - 23, 28, 30, 25, 26, 21, 19, 14, 9, 16, 22, - 25, 33, 39, 45, 49, 48, 55, 51, 43, 35, 20, - 14, 13, 23, 25, 24, 20, 22, 28, 22, 22, 17, - 16, 13, 10, 10, 10, 9, 9, 14, 11, 10, 10, - 4, 0, 0, -2, -3, -5, -7, -3, 1, -8, -8, - -9, -4, 4, 9, 11, 14, 11, 6, 8, 3, -6, - -10, -19, -22, -24, -27, -22, -16, -21, -25, -33, -33, - -32, -30, -21, -13, -6, -5, 2, 1, 4, 9, 7, - 5, 1, 1, 8, 6, 7, 6, 0, -6, -15, -18, - -23, -22, -23, -25, -22, -21, -19, -17, -13, -10, -10, - -16, -17, -15, -13, -8, -9, -14, -13, -17, -20, -26, - -28, -31, -29, -26, -23, -13, -10, -6, -1, 5, 7, - 2, -3, -7, -20, -18, -16, -21, -27, -33, -25, -27, - -22, -22, -21, -16, -11, -7, -2, 2, 11, 18, 11, - 9, 4, 1, -1, -6, -4, -5, -9, -12, -16, -25, - -29, -37, -37, -38, -37, -33, -23, -16, -14, -7, -1, - -4, -3, -4, -5, -11, -14, -8, -8, -8, -8, -9, - -4, -14, -21, -22, -21, -18, -15, -2, 3, -3, 0, - -2, 0, -4, -7, -1, -2, 3, 3, -3, -10, -13, - -10, -16, -19, -17, -17, -14, -7, 5, 5, 7, 8, - 12, 7, 0, -5, -13, -17, -18, -14, -7, -4, 3, - 11, 11, 12, 11, 8, 4, -5, -5, -11, -15, -17, - -23, -22, -18, -14, -14, -12, -6, -4, -1, 3, 1, - -4, -10, -22, -29, -30, -26, -15, -2, 6, 16, 21, - 28, 32, 25, 24, 20, 9, 5, 0, 3, 7, 10, - 11, 13, 17, 15, 16, 13, 11, 11, 8, 7, 1, - 1, -5, -2, -2, -1, 4, 8, 17, 22, 24, 24, - 26, 23, 20, 17, 16, 9, 4, 6, 5, 8, 2, - -1, -5, -4, -10, -14, -14, -17, -19, -18, -16, -14, - -6, -3, 1, 3, 0, -4, -6, -4, -1, -1, 2, - 5, 3, 8, 7, 7, 14, 13, 20, 24, 29, 24, - 12, 7, -1, -6, -15, -22, -20, -27, -22, -14, -6, - 2, 7, 9, 9, 2, -3, -7, -8, -10, -9, -3, - -6, -11, -12, -8, -5, -4, -5, -3, 0, 3, 6, - 6, 7, 5, -7, -10, -14, -13, -14, -17, -11, -7, - -4, 1, 1, 4, -4, -8, -18, -23, -23, -25, -19, - -16, -15, -9, 3, 10, 19, 25, 30, 31, 26, 27, - 23, 19, 16, 8, 7, 2, 0, -1, -1, 1, 5, - 6, 6, 1, 3, -1, -7, -11, -17, -19, -19, -7, - 0, 3, 11, 12, 18, 20, 16, 9, -2, -7, -14, - -19, -22, -30, -33, -34, -36, -26, -14, -11, -9, -3, - 0, -2, 1, -2, -3, -5, -12, -15, -19, -14, -9, - -8, -2, -6, -13, -15, -19, -22, -25, -26, -21, -20, - -11, -1, 1, 5, 9, 13, 15, 12, 11, 3, 1, - -1, 0, 8, 13, 16, 16, 15, 16, 15, 12, 9, - 7, 8, 4, 6, 4, 3, 3, 7, 0, -4, -8, - -11, -18, -18, -15, -20, -23, -21, -22, -21, -27, -25, - -15, -7, -2, 8, 9, 8, 8, 3, 3, 7, 8, - 8, 8, 12, 11, 12, 4, -1, -7, -11, -15, -18, - -17, -17, -20, -19, -13, -11, -3, -3, -1, 1, -3, - 1, 1, 8, 10, 15, 24, 26, 29, 34, 36, 26, - 20, 12, -2, -6, -9, -7, -6, 1, 10, 13, 19, - 22, 22, 18, 21, 24, 28, 35, 37, 34, 33, 34, - 34, 30, 19, 15, 10, 19, 21, 23, 24, 21, 19, - 18, 21, 22, 22, 27, 30, 31, 32, 33, 32, 32, - 24, 18, 10, 8, 10, 10, 6, 2, -7, -14, -22, - -29, -27, -29, -32, -30, -28, -23, -22, -11, -11, -13, - -3, 2, -1, 1, 1, -3, -7, -5, -7, -11, -17, - -23, -25, -26, -27, -26, -23, -14, -5, -3, -1, -2, - -2, -1, 1, -2, -7, -4, 2, 4, 10, 13, 6, - 3, -2, -6, -7, -11, -17, -21, -15, -7, -2, 11, - 16, 22, 25, 25, 23, 24, 23, 21, 22, 25, 23, - 17, 17, 12, 8, -2, -4, 1, 0, 4, 9, 8, - 10, 9, 9, 15, 13, 10, 8, 1, 1, -3, 1, - 4, 11, 10, 9, 5, 5, 4, 1, -1, -4, 0, - 8, 7, 4, 3, 3, 0, -9, -16, -19, -20, -21, - -18, -16, -11, -10, -9, -13, -12, -19, -25, -21, -15, - -5, 8, 14, 21, 24, 18, 20, 17, 6, 1, -2, - -2, 1, 1, 4, 1, -3, 2, 0, -3, -3, -4, - 1, 0, -5, -11, -17, -21, -20, -20, -20, -14, -9, - -3, 3, 7, 5, 3, 1, -1, -3, -4, -1, 1, - -5, -1, -1, -7, -11, -14, -12, -14, -17, -18, -23, - -29, -24, -27, -19, -12, -13, -2, -3, 4, 4, 0, - -3, -5, -2, -1, -5, -6, -7, -7, -7, -9, -13, - -9, -4, 1, 1, 1, -4, -11, -8, -15, -19, -19, - -12, -5, 1, 7, 12, 8, 10, 10, 10, 11, 11, - 19, 12, 9, 9, 2, -4, -13, -22, -24, -25, -24, - -26, -19, -14, -10, -1, 5, 4, -1, -4, -5, -10, - -14, -11, -8, -10, -8, -9, -7, -8, -6, -1, -5, - -10, -18, -27, -29, -24, -19, -11, -7, 1, 10, 8, - 8, 5, 2, -5, -1, -1, 0, 2, 2, -2, -8, - -8, -14, -26, -25, -23, -18, -9, 2, 2, 7, 13, - 6, 7, 5, 4, 3, 2, 1, 7, 2, -1, 1, - -2, 2, 0, -2, -6, -3, 5, 7, 9, 6, 5, - 4, 2, 0, -1, -3, 3, 7, 6, 14, 18, 22, - 20, 22, 19, 13, 9, 2, -8, -11, -6, -2, -3, - -3, 0, 0, 0, 1, -1, -2, 1, 7, 11, 10, - 11, 17, 17, 11, 11, 4, 6, 6, 13, 19, 22, - 23, 27, 25, 24, 22, 14, 11, 13, 7, 0, -3, - -9, -11, -7, -7, -6, -4, 1, 7, 9, 15, 18, - 18, 10, 5, 3, -3, -6, -5, -8, -5, 4, 8, - 8, 11, 10, 9, 4, 4, 1, -3, -10, -11, -8, - -16, -20, -22, -19, -12, -7, -10, -10, -13, -14, -11, - -11, -13, -18, -21, -19, -17, -22, -18, -22, -22, -16, - -9, -3, 0, 3, 6, 3, 3, -3, -6, -9, -14, - -1, 14, 21, 30, 37, 33, 27, 26, 19, 15, 14, - 11, 20, 12, 9, 10, 19, 20, 19, 22, 20, 22, - 17, 13, 14, 10, 8, 12, 15, 13, 12, 12, 12, - 9, 10, 11, 11, 9, 6, 4, 5, -2, 1, 1, - -1, 5, 1, 8, 6, 3, -1, -4, -15, -24, -27, - -26, -23, -19, -9, -3, -4, -9, -9, -10, -16, -22, - -19, -18, -15, -2, 3, 5, 6, 7, 8, 11, 3, - 1, 2, 1, 1, 0, -4, -13, -18, -19, -19, -20, - -23, -15, -10, -5, -3, -1, -1, -1, 3, -1, 0, - -8, -11, -13, -14, -13, -8, -6, -3, 1, 1, 0, - 0, 5, 4, 5, 5, 5, 4, 0, -1, -4, -13, - -22, -21, -28, -26, -22, -28, -23, -23, -14, -11, -10, - -7, -8, -5, -4, 1, 9, 10, 15, 19, 21, 17, - 18, 19, 16, 13, 16, 21, 27, 29, 22, 22, 13, - 4, 1, 0, -5, -6, -2, 3, 5, 8, 6, 9, - 10, 2, -3, -9, -8, -4, -2, -7, -6, -4, -8, - -6, -8, -11, -8, -8, -6, 2, -2, -2, -1, 2, - 4, 8, 5, -1, -8, -10, -7, -6, -5, -6, -5, - 6, 13, 22, 28, 33, 31, 38, 35, 28, 27, 22, - 22, 23, 26, 23, 21, 28, 28, 23, 23, 22, 21, - 20, 14, 6, -1, -5, -8, -5, -1, 2, 5, 5, - 7, 8, 5, 4, 0, 3, 6, 10, 13, 13, 6, - 4, 4, 0, -2, -3, 0, 3, 5, 7, 9, 7, - 6, 10, 8, 3, 4, -1, -4, -2, 0, -2, -2, - -2, -3, 5, 8, 6, 4, -1, -7, -6, -7, -12, - -18, -11, -2, -1, -1, -1, -2, -7, -7, -3, -3, - -5, -6, -6, -6, -6, -6, -9, -12, -9, -5, 1, - 3, 5, 5, 8, 7, 3, -5, -3, -2, 2, 3, - 5, 5, -1, -2, -4, -8, -9, -9, -7, -12, -13, - -17, -19, -16, -19, -21, -21, -19, -11, -6, -3, 7, - 8, 6, 2, 0, 1, 1, -2, -5, 0, -2, 2, - 1, 2, 0, -2, -1, -10, -21, -25, -24, -21, -19, - -14, -8, -3, -5, 0, 0, -5, -6, -3, -6, -9, - -13, -19, -20, -21, -21, -24, -25, -27, -27, -29, -26, - -19, -14, -14, -13, -8, -5, -10, -10, -6, 1, 4, - 14, 22, 23, 24, 20, 20, 18, 14, 11, 9, 6, - 8, 12, 15, 18, 18, 12, 8, 9, 9, 9, 7, - 4, 9, 5, 6, 5, 3, 3, -1, -1, -6, -10, - -6, -8, -3, 0, -2, -3, -2, -6, -6, -7, -3, - -3, -3, -2, 1, -1, -10, -7, -13, -21, -23, -20, - -19, -18, -18, -19, -15, -16, -7, -6, -9, -13, -12, - -6, -1, 3, 6, 7, 5, 3, -3, -11, -18, -20, - -26, -29, -27, -27, -24, -30, -29, -28, -23, -18, -21, - -18, -15, -9, 1, 9, 17, 21, 23, 18, 14, 5, - -1, -2, -1, 0, 3, 6, 5, 4, 4, 0, -1, - 1, -4, -9, -13, -11, -20, -21, -19, -14, -9, -4, - 1, 6, 10, 16, 24, 30, 35, 31, 38, 37, 35, - 39, 36, 36, 32, 30, 33, 31, 24, 19, 12, 4, - -1, -7, -11, -7, -5, -3, 2, 6, 10, 16, 19, - 21, 21, 16, 10, 14, 12, 14, 13, 12, 12, 5, - 6, 2, 0, 1, 3, 4, 6, 9, 6, 2, -1, - -3, -10, -15, -13, -17, -19, -15, -16, -15, -13, -8, - -8, -7, -10, -5, -2, 1, 5, 5, 11, 10, 12, - 10, 9, 9, 15, 23, 33, 35, 33, 34, 34, 35, - 34, 24, 30, 26, 23, 21, 20, 15, 10, 3, 4, - 0, -7, -8, -9, -9, -8, -4, 0, 5, 5, 2, - 3, -2, 0, 0, -1, 0, -1, 1, 2, 6, 3, - 1, -9, -5, -6, -2, -8, -12, -9, -10, -7, -8, - -8, -6, -2, -2, -1, 0, -2, -1, -8, -18, -19, - -27, -37, -42, -40, -39, -33, -30, -23, -16, -16, -9, - -13, -11, -10, -10, -8, -3, -1, 2, 0, -1, 2, - 6, 4, 8, 10, 17, 21, 28, 31, 33, 28, 20, - 12, 8, -3, -5, -4, -3, 2, 6, 9, 8, 2, - 7, 4, -6, -9, -15, -13, -15, -17, -14, -11, -12, - -5, -6, -4, -6, -11, -11, -7, -4, -6, -8, -13, - -10, -7, -12, -11, -12, -13, -12, -9, -9, -10, -10, - -6, -8, -8, -7, -9, -9, -7, 2, 5, 5, 6, - 3, 4, 6, 3, -1, -2, -2, -2, 1, 5, 3, - 4, 2, -2, -7, -9, -13, -11, -8, 2, 12, 23, - 31, 37, 41, 40, 37, 36, 31, 31, 27, 28, 24, - 13, 16, 14, 15, 9, 4, 4, 5, 4, 7, 12, - 16, 14, 11, 13, 6, -2, -4, -1, -3, 3, 6, - 6, 9, 7, 9, 7, 5, 0, 1, -1, -2, -4, - -1, 0, 0, -4, 0, -4, -9, -15, -16, -18, -15, - -10, -6, -8, -5, -2, -2, 0, 4, 7, 0, -2, - -3, 4, 3, 2, -1, -3, -8, -19, -19, -19, -16, - -8, -5, 0, 1, 2, 1, -1, -2, -10, -12, -10, - -4, 3, 4, 2, 7, 8, 4, 1, -5, -5, -4, - -1, 9, 10, 12, 15, 15, 14, 11, 20, 16, 19, - 18, 26, 29, 21, 23, 16, 16, 3, -3, -4, -10, - -12, -10, -6, -7, -12, -17, -14, -16, -19, -13, -10, - -13, -13, -2, 2, 3, 7, 13, 22, 21, 21, 21, - 24, 27, 23, 22, 20, 17, 17, 16, 13, 11, 5, - 1, 1, 5, 5, 3, 2, -1, 2, -5, -6, -3, - -11, -9, -6, -5, -10, -4, -1, 1, 2, -1, -4, - -4, -9, -9, -7, -3, 3, -2, 1, 1, 4, -4, - -8, -8, -17, -17, -13, -13, -18, -18, -25, -27, -21, - -22, -18, -7, -1, 5, 9, 11, 11, 11, 15, 11, - 4, 1, 6, 8, 17, 12, 10, 5, -2, -3, -14, - -17, -25, -26, -22, -20, -13, -12, -12, -13, -10, -4, - -6, -6, -4, -6, -4, 0, -3, -7, -7, -10, -17, - -14, -9, -3, 4, 4, 6, 1, 0, 0, -6, -3, - -4, -3, -6, -9, -9, -5, 0, 1, 2, -2, 3, - -1, -4, -5, -11, -14, -17, -14, -12, -14, -19, -21, - -25, -35, -40, -39, -31, -24, -13, -4, -1, 0, 0, - 2, -2, -5, -8, -8, -9, -6, -2, 0, -5, -6, - 2, 5, 4, 1, 6, 8, 9, 14, 13, 19, 15, - 19, 13, 14, 20, 16, 16, 14, 14, 17, 13, 12, - 11, 6, -1, -7, -9, -10, -11, -2, 8, 12, 12, - 12, 8, 4, 1, -3, -4, -4, -3, 1, 9, 14, - 16, 10, 12, 9, 6, 4, -1, 8, 6, 3, 6, - 1, -11, -10, -10, -13, -9, -6, -2, -2, 9, 13, - 17, 17, 19, 17, 16, 9, -2, -5, -5, -3, -9, - -8, -8, -12, -17, -16, -18, -15, -9, -7, 1, 10, - 17, 18, 23, 25, 23, 20, 15, 17, 18, 23, 33, - 40, 43, 45, 51, 53, 47, 36, 27, 10, 5, 1, - 4, 5, 4, 0, 0, 6, 7, 8, 9, 3, 2, - 1, 0, -1, 3, 5, 5, 13, 7, 4, 4, 3, - 11, 17, 21, 31, 31, 31, 31, 28, 26, 23, 19, - 16, 17, 16, 10, 10, 12, 9, 7, -1, -7, -12, - -15, -15, -15, -13, -13, -16, -19, -19, -23, -31, -34, - -38, -39, -31, -30, -21, -21, -18, -11, -16, -20, -25, - -22, -18, -14, -7, -8, -3, 2, 10, 13, 12, 10, - 6, 2, 0, 0, 0, -6, -4, -1, 0, 0, -1, - -2, 1, 3, 8, 9, 3, 6, 2, -4, -2, -3, - -7, -4, -3, 2, 6, 8, 10, 12, 15, 11, 15, - 12, 13, 14, 15, 18, 14, 8, 4, 4, 3, -4, - -5, -4, -2, -3, -2, 4, 9, 13, 18, 21, 20, - 18, 15, 11, 6, 7, 10, 8, 6, 3, -3, -7, - -14, -21, -29, -33, -32, -26, -17, -12, -11, -9, -3, - -10, -13, -18, -23, -21, -26, -26, -24, -28, -25, -29, - -30, -30, -27, -17, -7, 2, 10, 13, 16, 16, 17, - 18, 17, 19, 19, 20, 15, 14, 16, 14, 10, 5, - 0, -4, -18, -21, -25, -20, -16, -13, -8, -5, 2, - 6, 11, 12, 18, 16, 18, 15, 13, 17, 18, 22, - 21, 25, 26, 25, 26, 28, 31, 27, 20, 10, 3, - -6, -10, -16, -19, -18, -15, -13, -10, -2, 0, 2, - 4, 3, 5, -1, 0, 1, 2, 0, -2, -1, -6, - -5, -7, -12, -10, -9, -4, -1, 3, 4, 2, 4, - 4, 3, -3, -6, -11, -14, -15, -23, -25, -29, -30, - -28, -25, -22, -19, -21, -19, -11, -7, -7, -3, -3, - -6, -8, -13, -10, -10, -5, 1, 4, 9, 7, 6, - 6, 4, -5, -11, -8, -6, -3, 0, 3, 7, 11, - 7, 3, 5, 6, 10, 12, 14, 16, 8, 5, -1, - -1, 4, 0, 0, -3, -5, -5, -4, -2, -2, 1, - 4, 7, 5, 10, 9, 6, 9, 12, 19, 28, 32, - 32, 33, 31, 29, 20, 17, 16, 14, 15, 6, -2, - -5, -7, -10, -10, -11, -9, -6, -3, 8, 10, 10, - 10, 12, 12, 7, 7, 5, 3, 2, 2, -2, -5, - -4, -7, -2, -6, -5, -6, -11, -14, -13, -10, -11, - -15, -16, -11, -11, -11, -10, -16, -15, -15, -16, -10, - -11, -11, -5, -1, 2, 1, 2, 0, 1, 4, 8, - 5, -4, -2, -4, -12, -18, -24, -20, -25, -14, -3, - 4, 11, 13, 13, 7, 4, -4, -9, -13, -17, -10, - -6, -1, 0, 2, 2, -1, 1, -8, -18, -22, -19, - -19, -22, -20, -22, -20, -17, -12, -9, -4, 3, 9, - 9, 9, 7, 6, 13, 10, 11, 8, 4, -1, 5, - 7, 7, 8, 4, 2, 2, -2, -8, -11, -16, -18, - -12, -12, -9, -2, 3, 3, 5, 5, 6, 9, 11, - 20, 22, 26, 30, 28, 22, 15, 15, 10, 11, 9, - 6, 9, 9, 11, 10, 12, 10, 8, 8, 7, 9, - 4, 3, 9, 5, 1, 2, 0, -3, -3, 0, 3, - 0, -2, 1, 4, 6, 4, 0, 1, -4, -13, -13, - -11, -20, -21, -15, -17, -23, -22, -24, -29, -24, -29, - -32, -21, -13, -11, -9, -9, -8, -13, -11, -11, -11, - -11, -17, -17, -21, -23, -27, -32, -33, -32, -31, -35, - -31, -26, -24, -18, -10, -1, 5, 13, 17, 15, 13, - 8, 4, 6, 9, 10, 13, 11, 12, 13, 9, 5, - 6, 8, 12, 21, 25, 24, 23, 16, 8, 7, 0, - -3, -8, -9, -2, 1, 11, 18, 25, 30, 31, 27, - 21, 19, 19, 18, 18, 22, 24, 16, 14, 8, 2, - -4, -9, -7, -10, -6, -8, -8, -13, -14, -11, -13, - -8, -7, 6, 9, 10, 15, 17, 11, 11, 9, 2, - 2, -2, 2, -6, -6, -7, -14, -11, -12, -13, -17, - -22, -25, -30, -24, -16, -4, 5, 2, 7, 5, 2, - -1, 1, -4, -4, 4, 8, 8, 5, 6, 6, 2, - 1, -2, -9, -14, -17, -16, -15, -14, -12, -11, -6, - -6, -2, -3, -3, 6, 13, 18, 27, 27, 26, 24, - 22, 19, 18, 19, 12, 8, 7, -2, 0, -6, -8, - -6, -4, -6, -14, -16, -16, -15, -12, -2, 6, 12, - 16, 18, 14, 16, 13, 12, 17, 16, 17, 17, 12, - 13, 10, 14, 14, 10, 2, -1, -3, -5, -10, -15, - -13, -20, -21, -21, -21, -19, -20, -18, -8, -4, -1, - -1, 4, 2, -3, 0, -5, -5, -3, -1, 0, 6, - 5, 6, 7, 7, 3, 2, 1, -5, -3, 0, 3, - 5, 7, 4, 10, 15, 15, 11, 6, 8, 9, 14, - 19, 18, 14, 12, 16, 15, 11, 9, 9, 5, 4, - 0, -7, -12, -18, -22, -29, -32, -36, -37, -38, -39, - -32, -24, -20, -14, -10, -2, 0, 1, 9, 13, 21, - 26, 31, 35, 40, 38, 32, 33, 25, 14, 11, 7, - 1, -1, -6, -5, -11, -20, -22, -19, -16, -9, 2, - 9, 14, 14, 13, 13, 12, 10, 3, 2, 1, 0, - 6, 5, -1, -4, -13, -17, -21, -25, -29, -30, -23, - -14, -4, 4, 11, 11, 12, 13, 13, 5, 6, 6, - 7, 5, 5, 9, -2, 3, 0, -2, -3, -5, -1, - 3, 9, 16, 18, 17, 17, 11, 5, 1, -4, -13, - -12, -7, -7, 1, 6, 4, 2, 3, 1, 1, 0, - -1, -5, -5, -3, -5, -1, 8, 9, 7, 12, 7, - 6, 4, 3, -1, -1, -4, -14, -16, -18, -24, -34, - -44, -37, -37, -36, -28, -19, -15, -6, -2, -3, 2, - 5, 6, 3, 6, 6, 9, 7, 3, -4, -15, -25, - -34, -37, -41, -41, -38, -33, -27, -22, -14, -15, -18, - -18, -15, -8, -7, -2, 2, 0, 4, 12, 13, 10, - 17, 20, 16, 17, 23, 24, 22, 24, 22, 28, 26, - 24, 22, 26, 28, 27, 23, 17, 10, 4, 4, 1, - -1, 0, 4, 9, 15, 14, 15, 14, 14, 13, 8, - 0, -1, -11, -13, -4, -3, -5, -3, -1, -6, -5, - -7, -4, -2, 2, 7, 15, 20, 14, 13, 8, 2, - -6, -15, -23, -25, -20, -22, -20, -14, -10, -4, -2, - 1, -10, -15, -12, -8, -8, -7, -5, -10, -12, -20, - -28, -26, -24, -16, -8, -5, 3, 8, 9, 12, 12, - 12, 14, 13, 12, 10, 13, 23, 29, 28, 33, 36, - 32, 28, 23, 25, 26, 30, 34, 27, 22, 16, 12, - 3, -6, -13, -13, -15, -14, -9, -11, -13, -13, -16, - -15, -20, -22, -20, -32, -30, -29, -24, -18, -18, -18, - -13, -15, -15, -16, -17, -10, -11, -12, -15, -17, -17, - -19, -21, -22, -26, -28, -21, -18, -14, -5, 2, 6, - 7, 5, 3, -2, 0, -4, -2, -3, -6, -9, -12, - -11, -11, -19, -23, -20, -21, -16, -19, -23, -22, -24, - -21, -22, -17, -15, -8, -1, 4, 14, 18, 23, 24, - 25, 25, 18, 15, 7, 2, 14, 19, 22, 20, 23, - 22, 20, 19, 20, 17, 16, 21, 22, 21, 18, 9, - 3, -6, -14, -19, -30, -36, -40, -32, -22, -21, -16, - -7, -1, 3, 2, 3, 6, 9, 16, 20, 22, 26, - 27, 29, 32, 30, 23, 19, 20, 21, 18, 22, 24, - 15, 14, 9, 9, 7, 6, 9, 9, 16, 22, 20, - 18, 18, 9, -1, -10, -16, -19, -22, -22, -20, -16, - -11, -5, 0, 1, 4, 2, 0, 3, 5, 10, 8, - 12, 10, 11, 9, 8, 7, -3, -4, -10, -11, -5, - 2, 8, 12, 12, 13, 14, 15, 14, 12, 10, 14, - 13, 8, 0, -2, -3, -9, -6, -13, -21, -12, -12, - -8, -9, -14, -16, -19, -23, -22, -23, -30, -26, -17, - -14, -9, -2, 3, 11, 16, 17, 17, 11, 12, 13, - 12, 9, 8, 7, 10, 17, 14, 13, 9, 7, 6, - 5, 10, 10, 6, 10, 9, 1, -5, -10, -12, -17, - -16, -14, -13, -10, -6, -2, 0, -1, 2, 2, -1, - 2, 6, 12, 18, 23, 22, 23, 24, 20, 16, 10, - 6, 9, 16, 15, 15, 16, 14, 8, 4, 0, -3, - -7, -4, -5, -5, 0, -4, 1, 1, 1, -4, -10, - -17, -25, -25, -28, -28, -27, -25, -20, -20, -20, -22, - -14, -11, -4, 4, 6, 11, 10, 12, 9, 6, 2, - -6, -10, -12, -7, -1, -6, 0, 1, 2, 5, 1, - -1, 1, -3, -6, -4, -5, -4, -6, -5, -7, -10, - -10, -8, -11, -9, -2, 9, 15, 14, 20, 19, 19, - 16, 16, 11, 3, 2, 2, 5, 4, 5, 3, -1, - -1, -6, -11, -16, -18, -18, -12, -17, -18, -13, -15, - -5, -4, -3, -1, 2, 6, 7, 11, 14, 17, 17, - 18, 21, 18, 19, 18, 23, 27, 36, 32, 35, 30, - 24, 25, 18, 10, 3, -1, -4, -11, -16, -21, -33, - -37, -35, -36, -35, -30, -26, -26, -21, -10, -7, -3, - -4, -3, -3, -9, -12, -16, -25, -22, -11, -6, 2, - 5, 7, 4, -2, -8, -16, -23, -30, -28, -23, -20, - -11, -11, -8, 5, 2, -3, -1, -11, -15, -10, -13, - -8, -8, -12, -9, -10, -15, -8, -4, -3, 7, 6, - 13, 20, 25, 24, 25, 27, 28, 25, 23, 22, 27, - 28, 27, 30, 28, 26, 20, 16, 13, 7, 2, 1, - 6, 3, -4, -6, -13, -18, -19, -21, -15, -3, -1, - 10, 16, 17, 20, 24, 28, 28, 26, 26, 28, 27, - 24, 23, 20, 20, 24, 20, 17, 14, 6, 0, 2, - 1, 0, -3, -7, -12, -18, -29, -28, -30, -32, -23, - -27, -25, -20, -17, -13, -11, -14, -17, -21, -22, -18, - -11, -12, -6, -8, -9, -5, -6, -10, -18, -19, -16, - -13, -9, -6, -7, -13, -10, -14, -22, -30, -37, -35, - -37, -35, -34, -36, -30, -23, -17, -16, -16, -11, -6, - -2, 3, 7, 7, 6, 7, 7, 13, 21, 20, 22, - 23, 22, 24, 17, 5, -1, -2, -8, -13, -14, -17, - -24, -28, -23, -22, -19, -12, -14, -10, -14, -21, -20, - -21, -22, -13, -6, -1, 6, 4, 10, 11, 8, 10, - 10, 17, 20, 27, 34, 32, 26, 26, 24, 17, 13, - 6, 9, 12, 15, 17, 12, 11, 9, 3, -3, -3, - -8, -9, -4, -2, -2, 2, 1, -1, -3, -7, -8, - -11, -15, -8, -5, 1, 9, 7, 10, 13, 17, 14, - 12, 8, 6, 3, 6, 9, 8, 5, 0, -2, 1, - 1, -3, -6, -12, -17, -17, -23, -28, -33, -31, -29, - -30, -35, -28, -25, -17, -5, 0, 6, 10, 14, 27, - 31, 26, 31, 30, 32, 41, 42, 42, 43, 34, 32, - 21, 12, 2, 1, -3, -1, 8, 13, 20, 19, 18, - 19, 13, 8, 5, 7, 6, 7, 6, 4, 3, -2, - 0, 2, -4, -1, -3, 2, 12, 22, 33, 32, 31, - 35, 35, 34, 32, 26, 27, 26, 21, 17, 10, 1, - -3, -14, -21, -19, -21, -19, -24, -24, -19, -16, -13, - -16, -13, -15, -17, -12, -9, -4, 7, 19, 27, 33, - 37, 34, 35, 30, 24, 23, 25, 21, 20, 18, 15, - 12, 13, 8, 2, -4, -12, -18, -17, -14, -10, -14, - -8, -14, -14, -12, -14, -19, -23, -31, -32, -28, -30, - -22, -20, -13, 1, 0, 6, 14, 15, 20, 22, 20, - 16, 9, 2, 1, 3, 6, 7, 9, 10, 14, 17, - 16, 14, 4, -7, -16, -31, -40, -41, -40, -38, -34, - -40, -37, -33, -28, -22, -17, -11, -10, -12, -5, -5, - -8, -4, 0, -1, 1, 1, 6, 11, 14, 22, 25, - 28, 31, 32, 32, 31, 31, 20, 13, 12, 5, 4, - 4, 2, 0, -3, -6, -8, -4, -4, -4, -1, 7, - 9, 10, 13, 13, 16, 10, 7, 3, 6, 8, 8, - 15, 20, 23, 18, 15, 12, 4, 1, 0, -4, -4, - -1, 8, 11, 13, 21, 24, 19, 12, 2, -5, -11, - -15, -17, -17, -19, -23, -28, -34, -33, -37, -29, -27, - -24, -17, -13, -8, -6, -2, 5, 3, 4, -2, -5, - -4, 0, 2, 3, 1, -5, -5, -6, -11, -11, -15, - -15, -19, -17, -17, -21, -23, -21, -22, -24, -28, -27, - -25, -15, -8, -1, 2, 2, 3, 3, 2, -2, 0, - 1, -1, 2, 5, 7, 2, 0, 2, -6, -9, -8, - -6, -3, -3, 3, 0, 5, 0, 0, -5, -12, -13, - -20, -14, -14, -6, -5, -2, 0, 6, 11, 9, 9, - 11, 10, 13, 19, 26, 29, 36, 37, 40, 35, 27, - 20, 13, 6, 3, -1, -1, -1, -3, -6, -8, -14, - -16, -25, -28, -23, -21, -24, -22, -22, -22, -24, -28, - -35, -43, -42, -37, -29, -20, -5, 2, 10, 23, 28, - 30, 31, 30, 39, 43, 40, 41, 43, 43, 38, 29, - 18, 14, 12, 3, 6, 3, 3, 0, -1, -3, -5, - -5, -8, -8, -10, -6, -1, 1, 5, 1, 2, 6, - 0, -3, -7, -13, -10, -7, -8, -7, -3, -5, -4, - -4, -4, -5, -2, 2, 3, 6, 4, 3, -1, -2, - -5, -16, -22, -31, -39, -38, -42, -47, -42, -42, -35, - -27, -30, -28, -25, -26, -24, -20, -19, -19, -19, -19, - -14, -16, -13, -9, -10, -1, 8, 17, 21, 28, 26, - 28, 24, 14, 8, 2, 0, -4, -4, -13, -16, -16, - -13, -12, -7, -5, 0, -4, -1, 2, 4, 8, 8, - 10, 10, 10, 14, 16, 17, 23, 20, 27, 27, 27, - 21, 14, 11, 0, -4, -8, -8, -1, -1, 1, 6, - 8, 23, 22, 23, 23, 25, 26, 26, 22, 21, 20, - 22, 17, 12, 8, 3, -2, -2, -4, -5, -3, 1, - 7, 6, 8, 9, 12, 6, 1, -4, -8, -6, -3, - -4, -5, -3, -7, -6, -6, -11, -11, -19, -23, -26, - -28, -34, -41, -41, -44, -45, -47, -40, -39, -33, -29, - -21, -14, -16, -6, -7, -3, 1, 6, 8, 11, 14, - 14, 15, 15, 18, 18, 16, 17, 12, 15, 20, 21, - 19, 21, 23, 22, 21, 16, 12, 8, 7, 7, 10, - 13, 13, 16, 16, 16, 16, 15, 15, 12, 14, 14, - 15, 12, 11, 17, 19, 19, 14, 13, 15, 17, 18, - 20, 24, 27, 24, 19, 11, 10, 1, 0, 0, -1, - 3, 8, 16, 18, 17, 22, 22, 21, 19, 7, 0, - 1, -1, -2, -1, -6, -8, -12, -14, -20, -21, -24, - -19, -9, -4, -3, 2, 2, 3, 0, -10, -19, -23, - -29, -31, -35, -29, -33, -28, -25, -25, -19, -22, -23, - -24, -21, -17, -15, -17, -13, -15, -12, -15, -14, -14, - -12, -9, -5, 1, 9, 13, 13, 17, 17, 15, 11, - 12, 8, 13, 20, 24, 30, 29, 33, 30, 26, 23, - 13, 9, 4, 3, 3, 5, 3, 2, 5, 3, 2, - 1, 3, 6, 10, 14, 19, 23, 21, 20, 21, 17, - 11, 5, -3, -7, -12, -15, -16, -13, -15, -13, -7, - -4, -5, -5, -1, 5, 11, 8, 7, -2, -2, -5, - -6, -1, -2, 0, 2, 8, 13, 15, 17, 15, 16, - 10, 13, 3, -1, -4, -4, -4, 0, 8, 13, 15, - 9, 11, 9, 12, 9, 10, 10, 5, 11, 16, 21, - 20, 15, 13, 5, 3, -3, 1, 1, 0, -4, -7, - -9, -7, -9, -10, -7, -6, -3, -2, -3, -3, -6, - -12, -16, -22, -21, -26, -28, -25, -24, -23, -23, -28, - -32, -29, -26, -26, -23, -29, -23, -16, -11, -7, -9, - -10, -12, -18, -20, -20, -26, -23, -16, -17, -10, -7, - 0, 3, -2, 0, -4, -7, -8, -6, -3, -7, -5, - -5, 1, 0, -3, -2, -3, 5, 7, 10, 19, 17, - 22, 21, 20, 16, 8, 9, 10, 12, 20, 28, 31, - 28, 28, 26, 21, 14, 8, 5, 4, 5, 8, 9, - 9, 13, 17, 16, 14, 20, 17, 13, 16, 17, 18, - 18, 15, 11, 5, -2, -8, -15, -17, -17, -24, -24, - -23, -18, -13, -13, -9, -7, -4, 0, 3, 6, 2, - 2, -4, -5, -5, -4, -4, -2, 2, 6, 10, 7, - 4, 2, -2, -3, -8, -10, -14, -27, -29, -37, -36, - -29, -27, -19, -7, -3, 0, -2, 2, 8, 13, 18, - 15, 10, 10, 6, 1, -5, -12, -17, -20, -23, -23, - -22, -19, -17, -10, -6, -3, 2, 0, 4, 11, 14, - 19, 16, 6, 7, 3, 3, 4, 1, 7, 8, 7, - 3, -2, 0, 0, 0, -1, -2, 0, 4, 3, 5, - 9, 9, 12, 7, 5, 0, 0, 1, 0, 2, -6, - -10, -9, -13, -15, -19, -15, -18, -16, -17, -9, -5, - -2, 2, 2, 3, 7, 2, -3, -8, -13, -8, 1, - 8, 12, 15, 17, 17, 11, 7, 0, -4, -8, -8, - -3, -1, -4, -6, -6, -13, -12, -12, -13, -12, -8, - -9, -5, -4, -2, 0, -1, -6, -7, -6, -10, -10, - -8, -6, 1, 5, 6, 15, 18, 16, 12, 12, 12, - 10, 13, 7, 0, -9, -10, -11, -6, -8, -8, -4, - 0, 6, 10, 11, 15, 15, 15, 12, 10, 6, 6, - 11, 12, 20, 25, 23, 25, 18, 12, 6, -1, -4, - -10, -12, -9, -13, -16, -15, -18, -18, -22, -22, -17, - -14, -12, -8, -3, 1, 4, 11, 13, 7, 0, -8, - -11, -11, -13, -14, -12, -11, -9, -6, -5, -2, 1, - 5, 6, 10, 18, 17, 15, 13, 11, 12, 13, 10, - 9, 13, 16, 16, 13, 11, 6, 5, 0, -5, -4, - -3, 2, 6, 5, 6, 11, 14, 20, 23, 28, 27, - 22, 24, 23, 22, 16, 17, 12, 7, -1, -9, -10, - -9, -9, -13, -11, -9, -2, -2, -7, -8, -6, -7, - -12, -12, -10, 0, 5, 11, 13, 11, 10, 7, 3, - 0, 0, 3, 10, 14, 16, 18, 19, 21, 14, 15, - 12, 7, 6, 7, 9, 7, 11, 6, 4, 4, -1, - -9, -12, -12, -14, -9, -9, -6, -5, -4, -6, -7, - -12, -15, -17, -27, -23, -20, -19, -19, -18, -24, -20, - -25, -28, -33, -31, -29, -27, -15, -12, -7, -3, 1, - -3, -3, -5, -8, -6, 0, 13, 17, 24, 25, 23, - 24, 18, 8, -3, -4, -4, -7, -3, 1, 4, 7, - 9, 10, 14, 14, 20, 28, 35, 38, 42, 43, 43, - 39, 30, 27, 19, 15, 8, 10, 12, 19, 25, 26, - 27, 23, 22, 15, 10, 6, 8, 4, 6, 6, 3, - 7, 7, 15, 11, 7, 6, 5, 9, 6, 0, -3, - -14, -21, -21, -30, -39, -42, -40, -37, -37, -36, -32, - -30, -24, -21, -22, -23, -24, -28, -31, -31, -29, -27, - -30, -31, -31, -31, -34, -33, -34, -26, -21, -15, -10, - -5, -3, -2, -3, -6, -5, -11, -14, -10, -5, 0, - 9, 10, 18, 21, 19, 21, 11, 7, 4, 6, 6, - 7, 3, -6, -9, -16, -23, -24, -23, -26, -18, -16, - -11, -8, 0, 6, 5, 6, 10, 8, 8, 16, 24, - 24, 23, 24, 24, 24, 18, 9, 4, -3, -11, -16, - -15, -18, -14, -12, -9, -3, -4, -1, 8, 11, 10, - 19, 21, 21, 23, 20, 22, 15, 9, 7, 5, 3, - 1, 12, 13, 10, 18, 23, 31, 37, 40, 36, 38, - 40, 40, 38, 27, 24, 21, 14, 12, 12, 7, 7, - 15, 18, 19, 18, 17, 18, 14, 12, 11, 7, 5, - 7, 9, 9, 15, 14, 15, 18, 16, 7, 0, -5, - -6, -6, -6, -1, 7, 9, 12, 6, 4, 4, 2, - -1, 2, 3, 3, 5, 4, -1, -13, -19, -29, -34, - -39, -43, -49, -54, -53, -55, -55, -56, -59, -58, -49, - -41, -32, -19, -10, -2, -4, -1, -6, -19, -27, -26, - -27, -27, -21, -22, -20, -26, -26, -20, -20, -20, -21, - -17, -18, -7, -6, -6, -5, -1, 7, 18, 10, 16, - 25, 24, 31, 30, 32, 30, 26, 24, 22, 23, 21, - 23, 21, 24, 19, 17, 13, 12, 15, 6, 2, -5, - -9, -13, -10, -5, 1, 10, 13, 17, 13, 8, 5, - 5, 6, 5, 13, 19, 16, 14, 12, 7, 15, 18, - 19, 16, 4, -1, 0, -1, -2, -9, -15, -19, -21, - -13, -13, -10, -7, -7, -7, -6, -11, -22, -18, -19, - -22, -22, -19, -18, -10, -7, -9, -7, -12, -16, -20, - -27, -35, -37, -37, -33, -24, -14, -4, 8, 14, 19, - 19, 16, 12, 6, 2, -5, -6, -11, -17, -16, -14, - -13, -12, -17, -21, -22, -24, -18, -14, -12, -1, 4, - 9, 17, 14, 9, 13, 14, 13, 14, 14, 12, 11, - 15, 11, 16, 21, 20, 20, 22, 31, 30, 26, 15, - 13, 6, 8, 5, 1, -5, -3, 2, 9, 14, 13, - 16, 17, 18, 13, 10, 8, 7, 9, 12, 21, 23, - 23, 21, 19, 16, 14, 5, -4, -12, -15, -16, -12, - -9, -12, -14, -17, -16, -15, -14, -15, -28, -27, -24, - -12, -8, -3, 3, 9, 15, 18, 25, 25, 31, 32, - 35, 36, 33, 36, 24, 13, 2, -11, -19, -18, -18, - -10, -6, -4, 0, -3, -3, -15, -18, -17, -9, -7, - 2, 5, 7, 6, 2, -2, -12, -16, -16, -9, -3, - 6, 8, 15, 17, 16, 18, 11, 5, -4, -8, -17, - -16, -22, -24, -25, -28, -23, -19, -11, -3, 5, 11, - 22, 26, 29, 24, 14, 12, 7, 6, -2, -1, 2, - 10, 23, 33, 36, 32, 31, 16, 3, -4, -3, -3, - 1, 8, 11, 13, 12, 8, 3, 5, 3, 1, -1, - 4, 2, 3, 8, 5, 5, 1, -2, -1, -3, -1, - 5, 8, 10, 17, 17, 15, 19, 27, 18, 21, 23, - 19, 20, 15, 1, -7, -18, -24, -24, -33, -28, -32, - -30, -30, -30, -30, -29, -30, -41, -43, -50, -51, -49, - -42, -32, -19, -10, 0, 4, -2, 5, 9, 8, 12, - 19, 17, 10, 9, 3, 1, -4, -8, -4, 0, 5, - 7, 10, 9, 12, 0, -6, -7, -13, -16, -10, -10, - -9, -1, -1, -2, -6, -11, -14, -17, -18, -10, -3, - -3, 0, 6, 1, 6, 4, 3, 3, 9, 16, 22, - 28, 27, 32, 18, 21, 25, 20, 21, 18, 18, 22, - 23, 15, 8, -3, -9, -10, -13, -8, 3, 7, 18, - 26, 23, 26, 30, 17, 11, 9, -1, 0, 2, 2, - 12, 15, 6, 1, 0, -5, 2, 1, -3, -1, -6, - -2, -4, -11, -18, -30, -38, -36, -33, -32, -27, -19, - -18, -14, -13, -16, -11, -12, -12, -4, 0, 7, 13, - 13, 10, 11, 6, 3, 3, 3, 4, 10, 4, -1, - -3, -11, -21, -27, -34, -33, -31, -33, -28, -22, -21, - -14, -8, -13, -10, -8, -12, -7, -11, -3, 3, 5, - 7, 7, -1, -12, -13, -17, -21, -8, -2, 4, 7, - 13, 18, 18, 16, 15, 13, 11, 15, 13, 12, 17, - 18, 15, 15, 11, -3, -1, 2, 11, 15, 10, 18, - 13, 10, 12, 9, 2, 2, 4, -1, 6, 9, 11, - 5, 7, 13, 8, 9, 10, 11, 9, 7, 11, 5, - 3, 1, -9, -19, -31, -40, -42, -33, -27, -24, -22, - -20, -25, -20, -12, -17, -23, -23, -25, -25, -20, -18, - -17, -19, -15, -22, -20, -19, -13, -8, -12, 0, 2, - -6, -1, -5, -15, -10, -12, -19, -8, -6, -3, 9, - 5, 12, 22, 10, 9, 12, 5, 8, 28, 13, 20, - 25, 11, 16, 19, 10, 15, 14, 6, 23, 19, 18, - 32, 17, 12, 19, -1, -8, 11, -4, -8, 9, -4, - -6, 0, -10, -7, -3, -8, -11, -11, -23, -7, -4, - -4, 14, 6, 4, 9, 3, -4, 4, 2, 9, 26, - 19, 26, 33, 22, 22, 24, 13, 20, 18, 18, 28, - 28, 19, 24, 16, -1, 1, -12, -34, -28, -25, -27, - -13, 6, 8, 21, 25, 22, 19, 3, 4, 0, -5, - 6, 8, 1, 6, 8, -4, -3, -10, -23, -17, -9, - -10, 3, 6, -1, 3, -10, -22, -28, -49, -49, -36, - -29, -10, 8, -1, 4, 14, -3, -14, -5, -16, -10, - 8, 7, 21, 24, 17, 25, 15, -4, 13, -7, -23, - 0, -7, -14, 12, 1, -18, -10, -27, -43, -31, -34, - -19, -3, -10, 15, 20, -7, 10, 9, -20, 7, 28, - 14, 42, 54, 32, 34, 24, 5, 10, -11, -13, 11, - -6, -4, 31, 7, 0, 34, 3, -9, 5, -24, -33, - -14, -11, -1, 8, 0, 10, 7, -7, 11, 10, -6, - 17, 16, 0, 10, 3, -26, -23, -33, -39, -26, -29, - -18, -6, -9, -1, 5, -11, -6, 7, -6, 1, 13, - 8, 1, 3, -13, -23, -25, -33, -28, -21, -9, 2, - 4, 1, 8, 4, -13, -5, -12, -14, 3, 14, 18, - 26, 30, 21, 20, 15, 15, 10, 5, 13, 11, 20, - 25, 29, 18, 19, 9, -10, -15, -13, -12, 1, 16, - 20, 30, 39, 37, 21, 15, 3, -7, -9, -1, 2, - -6, -7, -10, -20, -19, -19, -31, -25, -12, -15, -13, - -17, -18, -14, -24, -24, -18, -28, -24, -3, 1, 17, - 46, 48, 43, 46, 34, 12, 6, -14, -19, -10, -14, - 3, 15, 3, 7, 7, -13, 4, 9, -2, 3, 22, - 19, 25, 41, 48, 46, 36, 42, 40, 24, 33, 50, - 29, 30, 57, 35, 13, 29, 17, -9, 5, 15, 7, - 13, 38, 47, 40, 56, 72, 42, 29, 40, 18, 14, - 36, 52, 50, 58, 55, 42, 22, 20, 13, -8, 8, - 32, 26, 41, 70, 48, 51, 65, 36, 27, 23, 4, - 5, 1, -3, 2, -8, -23, -6, -30, -46, -24, -40, - -45, -22, -32, -35, -24, -50, -41, -35, -56, -38, -29, - -55, -25, -7, -40, -26, -25, -63, -51, -40, -61, -47, - -38, -38, -5, 2, 3, 26, -1, -7, 8, -20, -17, - 10, -14, -6, 41, 24, 27, 52, 26, 13, 25, 5, - -6, 2, -7, -2, 10, 4, 29, 36, 30, 74, 93, - 91, 131, 150, 132, 167, 177, 158, 189, 188, 178, 200, - 199, 187, 212, 202, 188, 210, 188, 173, 187, 175, 183, - 215, 218, 236, 264, 253, 279, 296, 275, 290, 288, 261, - 261, 261, 230, 216, 199, 157, 160, 147, 115, 108, 84, - 50, 32, 7, -30, -56, -96, -130, -146, -179, -199, -223, - -255, -280, -293, -326, -341, -352, -391, -410, -429, -464, -489, - -507, -538, -559, -577, -602, -634, -656, -679, -696, -702, -700, - -699, -700, -687, -666, -665, -656, -634, -626, -609, -572, -539, - -518, -484, -462, -444, -418, -390, -364, -336, -295, -245, -210, - -175, -127, -97, -63, -28, 10, 45, 83, 121, 167, 222, - 272, 324, 369, 396, 439, 485, 502, 536, 571, 585, 618, - 656, 676, 705, 729, 744, 767, 776, 786, 798, 796, 813, - 849, 855, 865, 883, 862, 843, 834, 794, 781, 778, 767, - 746, 744, 721, 702, 681, 638, 607, 562, 521, 490, 447, - 398, 361, 313, 255, 204, 123, 20, -59, -143, -217, -270, - -328, -400, -462, -529, -607, -666, -737, -797, -854, -906, -936, - -944, -955, -965, -976, -993, -1003, -1007, -1032, -1040, -1045, -1055, - -1039, -1016, -1003, -990, -995, -1026, -1046, -1070, -1079, -1058, -1060, - -1062, -1028, -1010, -1006, -991, -1000, -1004, -987, -981, -958, -921, - -890, -852, -798, -754, -713, -681, -682, -658, -617, -585, -524, - -452, -404, -332, -258, -224, -183, -144, -132, -94, -64, -31, - 37, 99, 147, 219, 280, 329, 389, 439, 483, 563, 632, - 702, 799, 884, 965, 1050, 1107, 1150, 1209, 1260, 1308, 1383, - 1446, 1514, 1582, 1632, 1679, 1727, 1770, 1804, 1837, 1872, 1916, - 1961, 1999, 2038, 2071, 2089, 2097, 2107, 2091, 2084, 2072, 2051, - 2021, 1998, 1940, 1868, 1814, 1734, 1641, 1559, 1480, 1395, 1305, - 1213, 1115, 1015, 901, 785, 667, 520, 381, 256, 110, -26, - -141, -284, -417, -528, -670, -805, -935, -1080, -1206, -1324, -1438, - -1527, -1622, -1725, -1798, -1879, -1956, -2006, -2063, -2128, -2166, -2201, - -2238, -2257, -2292, -2316, -2337, -2357, -2356, -2362, -2382, -2375, -2368, - -2367, -2358, -2337, -2329, -2318, -2296, -2273, -2240, -2195, -2140, -2095, - -2044, -1990, -1932, -1872, -1803, -1737, -1673, -1602, -1520, -1428, -1325, - -1219, -1112, -1006, -896, -780, -681, -591, -481, -388, -294, -189, - -85, 30, 148, 252, 348, 466, 579, 692, 811, 918, 1041, - 1162, 1271, 1389, 1507, 1611, 1735, 1864, 1965, 2085, 2203, 2312, - 2436, 2536, 2614, 2697, 2760, 2812, 2886, 2956, 3010, 3066, 3088, - 3098, 3120, 3110, 3101, 3106, 3108, 3130, 3149, 3139, 3122, 3085, - 3016, 2951, 2874, 2770, 2671, 2559, 2435, 2315, 2198, 2059, 1915, - 1761, 1570, 1387, 1185, 984, 787, 601, 413, 224, 40, -158, - -348, -560, -760, -960, -1147, -1312, -1471, -1621, -1779, -1925, -2069, - -2206, -2333, -2463, -2570, -2664, -2743, -2811, -2860, -2886, -2934, -2976, - -3015, -3057, -3074, -3076, -3079, -3060, -3032, -2998, -2950, -2920, -2893, - -2863, -2837, -2806, -2761, -2715, -2662, -2607, -2554, -2486, -2402, -2325, - -2264, -2190, -2127, -2063, -1989, -1932, -1862, -1788, -1724, -1640, -1545, - -1455, -1346, -1234, -1112, -984, -859, -735, -610, -494, -384, -280, - -176, -68, 40, 140, 244, 363, 478, 596, 739, 876, 1001, - 1128, 1240, 1352, 1474, 1595, 1717, 1853, 1972, 2093, 2215, 2328, - 2432, 2533, 2641, 2744, 2855, 2949, 3055, 3157, 3242, 3329, 3415, - 3479, 3528, 3569, 3588, 3617, 3649, 3676, 3708, 3747, 3751, 3753, - 3744, 3693, 3640, 3576, 3470, 3369, 3248, 3098, 2976, 2838, 2690, - 2557, 2395, 2222, 2055, 1872, 1675, 1488, 1279, 1057, 851, 623, - 393, 180, -74, -315, -537, -771, -979, -1161, -1373, -1558, -1729, - -1932, -2110, -2294, -2478, -2636, -2785, -2917, -3007, -3094, -3183, -3247, - -3319, -3402, -3450, -3510, -3564, -3595, -3622, -3635, -3627, -3635, -3639, - -3620, -3620, -3610, -3596, -3581, -3535, -3495, -3455, -3410, -3361, -3323, - -3265, -3202, -3141, -3078, -3001, -2919, -2830, -2739, -2640, -2540, -2430, - -2320, -2192, -2057, -1909, -1761, -1603, -1422, -1244, -1059, -887, -726, - -570, -425, -256, -92, 69, 238, 411, 557, 728, 910, 1066, - 1229, 1403, 1561, 1727, 1895, 2050, 2208, 2352, 2492, 2638, 2765, - 2893, 3025, 3145, 3263, 3387, 3496, 3595, 3707, 3804, 3884, 3975, - 4046, 4105, 4167, 4204, 4220, 4237, 4243, 4247, 4260, 4255, 4251, - 4246, 4201, 4143, 4092, 3996, 3885, 3772, 3604, 3435, 3283, 3086, - 2923, 2742, 2535, 2341, 2130, 1887, 1649, 1411, 1137, 915, 659, - 398, 163, -81, -351, -580, -814, -1069, -1262, -1476, -1689, -1850, - -2043, -2237, -2395, -2591, -2763, -2918, -3095, -3224, -3319, -3435, -3508, - -3582, -3698, -3772, -3858, -3950, -4008, -4047, -4088, -4093, -4085, -4098, - -4064, -4052, -4057, -4033, -4028, -4018, -3991, -3971, -3933, -3865, -3802, - -3727, -3633, -3562, -3477, -3392, -3300, -3210, -3115, -3018, -2924, -2819, - -2721, -2606, -2490, -2381, -2246, -2111, -1963, -1810, -1638, -1460, -1293, - -1132, -980, -828, -666, -496, -322, -125, 72, 264, 470, 676, - 879, 1087, 1280, 1457, 1633, 1799, 1970, 2152, 2327, 2501, 2678, - 2840, 3007, 3165, 3301, 3434, 3558, 3667, 3791, 3912, 4023, 4140, - 4257, 4359, 4475, 4554, 4614, 4656, 4682, 4697, 4726, 4749, 4775, - 4810, 4812, 4812, 4810, 4768, 4697, 4620, 4502, 4368, 4210, 4031, - 3860, 3663, 3472, 3291, 3076, 2849, 2642, 2392, 2140, 1890, 1610, - 1325, 1064, 782, 494, 231, -50, -329, -593, -861, -1112, -1345, - -1588, -1812, -2022, -2257, -2467, -2682, -2924, -3126, -3317, -3495, -3630, - -3737, -3855, -3941, -4031, -4128, -4200, -4281, -4348, -4388, -4427, -4449, - -4444, -4450, -4458, -4452, -4464, -4460, -4451, -4444, -4425, -4384, -4344, - -4289, -4234, -4160, -4076, -4000, -3917, -3837, -3753, -3669, -3558, -3460, - -3354, -3230, -3111, -2966, -2824, -2665, -2495, -2333, -2151, -1951, -1752, - -1554, -1367, -1222, -1053, -882, -716, -520, -331, -141, 62, 270, - 476, 707, 923, 1133, 1349, 1534, 1735, 1943, 2124, 2317, 2511, - 2668, 2839, 3002, 3140, 3317, 3481, 3615, 3771, 3920, 4050, 4196, - 4319, 4430, 4556, 4657, 4765, 4868, 4945, 4999, 5057, 5075, 5100, - 5123, 5133, 5134, 5127, 5104, 5084, 5058, 4968, 4896, 4750, 4575, - 4381, 4179, 3971, 3776, 3590, 3394, 3209, 2991, 2800, 2535, 2269, - 1972, 1654, 1319, 998, 697, 384, 105, -187, -476, -759, -1047, - -1316, -1579, -1841, -2085, -2317, -2550, -2745, -2938, -3145, -3326, -3523, - -3706, -3859, -3998, -4124, -4218, -4288, -4346, -4386, -4437, -4495, -4550, - -4619, -4680, -4732, -4779, -4813, -4820, -4842, -4825, -4791, -4773, -4742, - -4715, -4709, -4683, -4652, -4605, -4527, -4428, -4315, -4194, -4086, -3978, - -3872, -3779, -3685, -3569, -3458, -3313, -3121, -2921, -2693, -2454, -2230, - -1998, -1783, -1588, -1414, -1240, -1069, -886, -690, -473, -256, -36, - 170, 384, 594, 797, 1015, 1235, 1449, 1664, 1882, 2098, 2311, - 2504, 2681, 2843, 3019, 3171, 3337, 3534, 3709, 3885, 4072, 4235, - 4380, 4524, 4641, 4746, 4864, 4979, 5087, 5213, 5308, 5393, 5450, - 5468, 5475, 5472, 5452, 5462, 5467, 5453, 5451, 5425, 5342, 5255, - 5113, 4914, 4725, 4512, 4273, 4053, 3866, 3632, 3436, 3205, 2955, - 2705, 2420, 2095, 1794, 1503, 1195, 941, 639, 342, 56, -269, - -601, -894, -1208, -1499, -1736, -1994, -2239, -2426, -2652, -2891, -3099, - -3361, -3588, -3793, -4013, -4183, -4302, -4439, -4523, -4613, -4734, -4809, - -4891, -4999, -5056, -5090, -5131, -5092, -5061, -5044, -4987, -4954, -4955, - -4924, -4911, -4873, -4809, -4755, -4673, -4555, -4440, -4316, -4187, -4088, - -3986, -3881, -3802, -3717, -3605, -3495, -3359, -3207, -3063, -2889, -2698, - -2504, -2306, -2088, -1861, -1627, -1415, -1201, -1000, -799, -593, -410, - -220, -7, 203, 412, 634, 865, 1126, 1367, 1602, 1838, 2052, - 2257, 2474, 2659, 2863, 3076, 3255, 3429, 3617, 3773, 3939, 4102, - 4222, 4358, 4501, 4611, 4733, 4846, 4939, 5056, 5147, 5217, 5301, - 5357, 5388, 5428, 5417, 5400, 5430, 5422, 5406, 5442, 5446, 5431, - 5437, 5381, 5304, 5212, 5057, 4874, 4683, 4465, 4249, 4026, 3767, - 3545, 3304, 3021, 2741, 2450, 2113, 1807, 1490, 1151, 841, 544, - 212, -102, -439, -788, -1091, -1413, -1730, -2033, -2336, -2627, -2854, - -3118, -3350, -3560, -3781, -4008, -4194, -4376, -4524, -4640, -4757, -4865, - -4945, -5016, -5083, -5131, -5170, -5184, -5198, -5208, -5211, -5210, -5209, - -5192, -5174, -5154, -5108, -5052, -5002, -4932, -4854, -4780, -4704, -4604, - -4514, -4421, -4309, -4208, -4111, -4004, -3880, -3751, -3622, -3496, -3367, - -3210, -3047, -2867, -2654, -2430, -2177, -1897, -1651, -1417, -1182, -983, - -793, -593, -406, -211, 17, 232, 461, 716, 958, 1197, 1441, - 1674, 1899, 2130, 2355, 2573, 2788, 3004, 3220, 3419, 3612, 3809, - 3973, 4120, 4277, 4433, 4573, 4742, 4902, 5037, 5165, 5282, 5377, - 5460, 5539, 5596, 5654, 5716, 5741, 5759, 5770, 5776, 5762, 5751, - 5737, 5706, 5675, 5644, 5550, 5446, 5324, 5169, 4974, 4767, 4530, - 4289, 4067, 3823, 3621, 3391, 3145, 2878, 2575, 2228, 1890, 1525, - 1149, 807, 473, 145, -152, -454, -769, -1057, -1374, -1703, -2033, - -2372, -2701, -2977, -3258, -3495, -3694, -3897, -4089, -4270, -4483, -4668, - -4840, -5015, -5140, -5225, -5304, -5334, -5350, -5390, -5398, -5403, -5428, - -5438, -5449, -5472, -5463, -5441, -5401, -5333, -5252, -5151, -5051, -4974, - -4880, -4805, -4729, -4626, -4526, -4403, -4248, -4088, -3939, -3778, -3617, - -3464, -3308, -3173, -3027, -2852, -2669, -2461, -2233, -1979, -1713, -1455, - -1216, -996, -796, -610, -397, -198, 21, 272, 517, 775, 1037, - 1295, 1544, 1790, 2007, 2211, 2423, 2634, 2848, 3081, 3319, 3551, - 3792, 4000, 4171, 4303, 4418, 4518, 4596, 4679, 4807, 4913, 5044, - 5172, 5288, 5405, 5518, 5609, 5664, 5713, 5735, 5735, 5737, 5701, - 5691, 5656, 5633, 5611, 5552, 5475, 5394, 5293, 5177, 5064, 4924, - 4737, 4599, 4420, 4237, 4048, 3828, 3623, 3413, 3183, 2915, 2622, - 2308, 1980, 1657, 1261, 901, 549, 205, -85, -383, -688, -969, - -1246, -1530, -1850, -2206, -2561, -2915, -3224, -3482, -3713, -3921, -4107, - -4287, -4470, -4660, -4850, -5057, -5239, -5395, -5540, -5619, -5697, -5724, - -5697, -5675, -5633, -5590, -5579, -5530, -5486, -5442, -5426, -5391, -5348, - -5276, -5197, -5124, -5039, -4925, -4808, -4677, -4581, -4479, -4343, -4218, - -4087, -3970, -3858, -3729, -3570, -3384, -3206, -3020, -2839, -2636, -2453, - -2287, -2185, -2154, -1926, -1562, -1223, -758, -473, -64, 395, 599, - 880, 814, 938, 1172, 1498, 1928, 2127, 2422, 2608, 2841, 2937, - 2886, 2815, 2985, 3324, 3757, 4152, 4481, 4652, 4917, 4965, 4766, - 4583, 4328, 4503, 4815, 5118, 5408, 5682, 5956, 6082, 6055, 5744, - 5426, 5341, 5427, 5606, 5882, 6065, 6226, 6428, 6477, 6385, 6009, - 5728, 5552, 5439, 5339, 5200, 5008, 4947, 4835, 4614, 4330, 3887, - 3521, 3111, 2460, 1983, 1297, 650, 279, -353, -720, -1044, -1518, - -1668, -2117, -2496, -2743, -3266, -3607, -3790, -4149, -4075, -4042, -4096, - -3981, -4138, -4226, -4214, -4503, -4455, -4577, -4642, -4346, -4351, -4270, - -4263, -4522, -4521, -4673, -4814, -4731, -4950, -5011, -5004, -5288, -5341, - -5566, -5833, -5783, -5929, -5847, -5765, -5828, -5644, -5613, -5615, -5428, - -5291, -5014, -4554, -4277, -3964, -3854, -3829, -3612, -3603, -3438, -3137, - -2831, -2164, -1438, -939, -330, -156, 46, 242, 73, 242, 220, - 239, 542, 565, 739, 872, 801, 857, 676, 543, 586, 567, - 828, 1142, 1490, 1985, 2508, 2982, 3438, 3699, 3939, 4069, 4178, - 4420, 4622, 4917, 5338, 5801, 6285, 6658, 6963, 7213, 7233, 7328, - 7176, 7038, 7031, 6860, 6957, 6767, 6599, 6523, 6212, 6147, 6063, - 5860, 6020, 6015, 6033, 6184, 5722, 5607, 5016, 4337, 4063, 3229, - 3080, 3006, 2804, 3035, 2541, 2136, 1879, 1012, 401, -575, -1584, - -1930, -2278, -2485, -2477, -2712, -2747, -2766, -3320, -3592, -4188, -4669, - -4672, -4939, -4789, -4426, -4203, -3674, -3563, -3656, -3759, -4067, -4257, - -4522, -4970, -5204, -5237, -5139, -4907, -4911, -4917, -4921, -5007, -5230, - -5654, -6122, -6464, -6733, -6948, -7067, -6972, -6800, -6520, -6132, -5830, - -5382, -5091, -4797, -4546, -4472, -4362, -4350, -4235, -3851, -3454, -3144, - -2735, -2341, -1845, -1262, -958, -549, -166, 66, 382, 366, 352, - 341, 85, -13, -176, -303, -235, -341, -309, -227, -249, -50, - 143, 384, 874, 1149, 1552, 2155, 2767, 3499, 3994, 4460, 4920, - 5288, 5569, 5704, 5881, 6094, 6461, 6653, 6803, 7115, 7311, 7521, - 7612, 7443, 7380, 7124, 6742, 6495, 5964, 5656, 5415, 5167, 5656, - 5813, 6027, 6401, 6351, 6787, 7019, 6581, 6512, 5965, 5308, 5140, - 4336, 4147, 3899, 3398, 3360, 2830, 2624, 1968, 1026, 395, -699, - -1424, -2327, -3006, -3192, -3435, -3337, -3686, -3513, -3350, -3502, -3261, - -3878, -4005, -4063, -4187, -3767, -3598, -3384, -3300, -3094, -2857, -3023, - -3274, -3851, -4352, -4523, -4943, -5477, -5612, -5682, -5733, -5714, -5965, - -6110, -5950, -6158, -6548, -6897, -7165, -7281, -7352, -7258, -7185, -6659, - -5946, -5470, -4738, -4046, -3707, -3210, -3108, -3270, -3227, -3222, -3218, - -3017, -2943, -2668, -2296, -1593, -1061, -811, -403, -513, -361, -128, - -595, -633, -991, -1205, -1159, -1284, -1330, -1164, -999, -729, -538, - -336, 27, 350, 794, 1245, 1646, 2446, 3210, 4017, 4835, 5271, - 5739, 6028, 6140, 6212, 6161, 6066, 5984, 6081, 5995, 6152, 6301, - 6278, 6424, 6377, 6396, 6362, 6152, 5788, 5309, 5071, 4860, 4704, - 4804, 4919, 5258, 5869, 6121, 6365, 6694, 6692, 6694, 6532, 6187, - 5808, 5704, 5302, 4816, 4611, 4043, 3775, 3249, 2600, 1933, 982, - 336, -848, -1538, -2242, -3103, -3374, -3756, -3975, -4017, -4061, -3972, - -3749, -3609, -3853, -3850, -3714, -3760, -3736, -3914, -3923, -3830, -3541, - -3649, -3757, -3661, -3913, -4038, -4231, -4594, -4769, -5009, -5273, -5588, - -5676, -5937, -5997, -6060, -6164, -6414, -6623, -6765, -6857, -6771, -6921, - -6914, -6535, -6187, -5626, -5206, -4742, -4189, -3618, -3120, -2823, -2606, - -2550, -2703, -2736, -2626, -2498, -2406, -2133, -1852, -1348, -753, -318, - 162, 330, 524, 375, 9, -204, -866, -1249, -1532, -1669, -1455, - -1235, -723, -283, 262, 535, 862, 1340, 1712, 2316, 2625, 3171, - 4015, 4698, 5516, 6006, 6452, 6838, 6921, 7003, 6735, 6339, 6138, - 5768, 5575, 5593, 5568, 5728, 6041, 6233, 6260, 6175, 6048, 5728, - 5366, 4931, 4340, 4194, 4174, 4330, 4743, 5028, 5754, 6250, 6598, - 7120, 7114, 6962, 6675, 6157, 5373, 4797, 4081, 3237, 3153, 2588, - 2143, 1639, 1021, 681, -149, -816, -1987, -3003, -3493, -4138, -4420, - -4607, -4841, -4725, -4254, -4033, -3845, -3842, -4063, -4035, -4099, -4582, - -4718, -4779, -4689, -4437, -4327, -4352, -4119, -3881, -4061, -4345, -4768, - -5248, -5610, -5920, -6383, -6779, -6731, -6673, -6677, -6597, -6659, -6619, - -6417, -6516, -6862, -7017, -7069, -6944, -6715, -6376, -6000, -5162, -4333, - -3577, -2884, -2355, -1807, -1366, -1380, -1590, -1869, -1962, -1945, -2006, - -2141, -1960, -1516, -1025, -471, -135, 85, 348, 239, -8, -475, - -951, -1245, -1520, -1569, -1448, -1188, -517, 134, 827, 1585, 2114, - 2792, 3214, 3651, 4230, 4546, 4894, 5321, 5588, 6105, 6583, 6877, - 7014, 7087, 7068, 6876, 6695, 6280, 5684, 5385, 5205, 5064, 5033, - 5028, 5080, 5322, 5510, 5461, 5390, 5541, 5494, 5443, 5306, 5065, - 5193, 5338, 5513, 5818, 5911, 6345, 6506, 6514, 6543, 5981, 5703, - 5082, 4228, 3517, 2424, 1880, 1245, 562, -130, -864, -1156, -1561, - -1970, -2597, -3357, -3707, -4189, -4521, -4975, -5477, -5478, -5585, -5445, - -5353, -5327, -4971, -4580, -4431, -4469, -4432, -4422, -4275, -4227, -4507, - -4745, -4758, -4752, -4845, -4933, -5118, -5117, -5124, -5324, -5673, -5971, - -6152, -6366, -6702, -6970, -7159, -7136, -6929, -6917, -6703, -6520, -6302, - -5794, -5484, -5123, -4694, -4254, -3722, -3334, -2917, -2410, -1721, -1010, - -584, -312, 27, 321, 327, 214, -17, -363, -402, -550, -638, - -469, -315, -86, 142, 242, 387, 448, 458, 423, 321, 194, - 285, 417, 717, 1176, 1673, 2402, 3144, 3985, 4764, 5406, 6056, - 6507, 6783, 6891, 6868, 6850, 6717, 6532, 6359, 6248, 6303, 6279, - 6140, 6071, 5927, 5687, 5480, 5146, 4835, 4572, 4447, 4481, 4578, - 4840, 4936, 5246, 5659, 5732, 5856, 5658, 5403, 5282, 5004, 4949, - 4843, 4681, 4884, 4886, 4967, 5108, 4781, 4647, 4240, 3443, 2768, - 1830, 983, 309, -769, -1382, -1987, -2553, -2750, -3346, -3555, -4052, - -4400, -4599, -5196, -5437, -5945, -6340, -6343, -6554, -6611, -6381, -6184, - -5681, -5398, -5098, -4751, -4529, -4138, -4100, -4088, -4044, -4186, -4189, - -4263, -4453, -4465, -4598, -4651, -4726, -4919, -4926, -5142, -5286, -5490, - -5831, -6002, -6341, -6492, -6562, -6710, -6553, -6506, -6219, -5766, -5521, - -5008, -4556, -4002, -3293, -2769, -2069, -1467, -824, -34, 509, 1034, - 1385, 1560, 1650, 1664, 1419, 1016, 834, 511, 353, 381, 299, - 523, 833, 956, 1280, 1492, 1425, 1547, 1350, 1143, 1114, 931, - 1054, 1217, 1583, 2217, 2917, 4017, 4965, 5827, 6816, 7393, 7875, - 8197, 8175, 7924, 7578, 7040, 6566, 6242, 5746, 5530, 5334, 5222, - 5237, 5074, 5146, 5011, 4902, 4753, 4442, 4482, 4254, 4247, 4319, - 4187, 4516, 4690, 4935, 5193, 5229, 5350, 5332, 5486, 5386, 5143, - 4999, 4494, 4304, 3961, 3421, 2781, 2032, 1404, 614, -88, -956, - -1714, -2155, -2684, -3038, -3237, -3368, -3423, -3569, -3809, -4213, -4533, - -4973, -5514, -6011, -6663, -7084, -7258, -7158, -6947, -6639, -6111, -5548, - -4887, -4362, -4043, -3895, -3940, -4107, -4452, -4836, -5143, -5500, -5532, - -5510, -5485, -5096, -4739, -4375, -4065, -4063, -4094, -4252, -4576, -4904, - -5431, -5837, -6190, -6402, -6310, -6292, -5992, -5516, -5025, -4342, -3899, - -3386, -2697, -2077, -1493, -994, -392, 232, 931, 1608, 1988, 2360, - 2589, 2639, 2623, 2471, 2121, 1708, 1478, 1181, 1167, 1296, 1279, - 1648, 1859, 2107, 2368, 2359, 2390, 2122, 1904, 1629, 1418, 1502, - 1524, 1859, 2357, 3041, 3909, 4810, 5751, 6449, 7128, 7534, 7767, - 7908, 7699, 7460, 7032, 6647, 6301, 5876, 5556, 5190, 4948, 4762, - 4576, 4464, 4370, 4338, 4275, 4287, 4265, 4320, 4221, 4066, 3947, - 3514, 3379, 3003, 2635, 2534, 2078, 2040, 1950, 1958, 2152, 2085, - 2390, 2321, 2319, 2359, 1851, 1643, 877, 168, -527, -1245, -1704, - -2519, -2739, -3251, -3382, -3236, -3527, -3294, -3523, -3732, -3916, -4434, - -4888, -5615, -6161, -6729, -7283, -7543, -7920, -7865, -7660, -7430, -7034, - -6758, -6224, -5866, -5441, -5076, -4998, -4760, -4673, -4539, -4410, -4308, - -4131, -3992, -3791, -3611, -3448, -3213, -3070, -3046, -3048, -3168, -3244, - -3354, -3607, -3834, -4170, -4439, -4648, -4864, -4892, -4928, -4821, -4524, - -4211, -3576, -2819, -1968, -929, -19, 1029, 2064, 2949, 3716, 4159, - 4450, 4536, 4503, 4301, 3968, 3655, 3242, 2979, 2856, 2744, 2750, - 2771, 2749, 2859, 2850, 2793, 2702, 2402, 2179, 1877, 1672, 1581, - 1543, 1769, 1967, 2485, 3089, 3783, 4662, 5406, 6246, 6950, 7542, - 8016, 8200, 8245, 8027, 7584, 6958, 6241, 5494, 4710, 3974, 3255, - 2653, 2274, 2038, 1986, 1964, 2141, 2321, 2513, 2772, 2756, 2743, - 2636, 2406, 2125, 1836, 1456, 1247, 1145, 995, 1077, 1140, 1290, - 1561, 1685, 1762, 1609, 1391, 1147, 544, 84, -754, -1546, -2107, - -2806, -3137, -3522, -3732, -3826, -3834, -3609, -3493, -3340, -3254, -3499, - -3621, -3981, -4455, -4859, -5513, -6080, -6626, -7061, -7372, -7556, -7573, - -7515, -7366, -7091, -6799, -6366, -5887, -5484, -5098, -4746, -4334, -3941, - -3558, -3269, -3053, -2844, -2663, -2497, -2314, -2227, -2185, -2141, -2139, - -2070, -2037, -2031, -2062, -2205, -2348, -2544, -2774, -2979, -3298, -3520, - -3647, -3622, -3395, -3054, -2513, -1829, -948, 64, 1090, 2169, 3127, - 3987, 4712, 5229, 5560, 5754, 5741, 5619, 5401, 5005, 4666, 4287, - 3967, 3734, 3476, 3322, 3203, 3147, 3144, 3116, 3080, 3011, 2871, - 2735, 2544, 2363, 2245, 2075, 2032, 2118, 2263, 2688, 3066, 3605, - 4244, 4746, 5384, 5819, 6151, 6319, 6194, 5938, 5495, 4929, 4305, - 3581, 2924, 2279, 1713, 1372, 1086, 1006, 983, 1006, 1146, 1249, - 1349, 1360, 1231, 1084, 794, 502, 264, -85, -238, -411, -504, - -394, -322, -51, 188, 420, 589, 624, 666, 573, 338, -86, - -564, -1056, -1560, -1925, -2434, -2806, -3017, -3341, -3320, -3375, -3480, - -3410, -3567, -3553, -3595, -3805, -3919, -4284, -4482, -4754, -5190, -5354, - -5806, -6050, -6136, -6387, -6343, -6330, -6206, -5851, -5468, -4960, -4549, - -4080, -3542, -3150, -2698, -2440, -2318, -2132, -2067, -2081, -2017, -2099, - -2151, -2060, -2067, -1916, -1823, -1718, -1523, -1386, -1221, -1189, -1141, - -1014, -1008, -966, -996, -1015, -916, -809, -648, -467, -128, 237, - 735, 1358, 1969, 2697, 3399, 4060, 4732, 5295, 5720, 6077, 6169, - 6139, 5928, 5614, 5292, 4766, 4247, 3705, 3262, 3030, 2827, 2702, - 2684, 2728, 2887, 3092, 3216, 3310, 3313, 3214, 3098, 2873, 2620, - 2343, 2031, 1799, 1589, 1491, 1537, 1645, 1913, 2210, 2548, 2922, - 3295, 3650, 3951, 4100, 4099, 3972, 3740, 3421, 2948, 2427, 1762, - 1136, 574, 44, -330, -642, -846, -852, -751, -520, -229, 44, - 272, 446, 502, 443, 329, 66, -191, -492, -841, -1002, -1240, - -1237, -1199, -1177, -936, -867, -660, -456, -508, -464, -706, -997, - -1265, -1780, -2178, -2724, -3270, -3735, -4142, -4378, -4609, -4666, -4749, - -4575, -4355, -4137, -3767, -3563, -3218, -2970, -2834, -2630, -2716, -2776, - -2920, -3210, -3363, -3764, -4023, -4125, -4268, -4194, -4223, -4005, -3639, - -3258, -2891, -2644, -2297, -1987, -1751, -1587, -1570, -1485, -1415, -1342, - -1194, -1100, -889, -613, -267, 161, 482, 865, 1269, 1639, 2005, - 2202, 2381, 2549, 2628, 2700, 2625, 2559, 2481, 2357, 2319, 2192, - 2142, 2199, 2283, 2514, 2670, 2919, 3214, 3510, 3830, 3971, 4080, - 4073, 3911, 3700, 3359, 2954, 2549, 2094, 1766, 1556, 1442, 1462, - 1560, 1808, 2070, 2357, 2606, 2730, 2831, 2737, 2582, 2309, 1931, - 1585, 1178, 834, 529, 288, 214, 218, 302, 470, 679, 944, - 1211, 1420, 1562, 1674, 1631, 1548, 1355, 1072, 776, 375, 25, - -320, -614, -818, -992, -991, -906, -755, -525, -291, -17, 225, - 447, 528, 546, 466, 270, 96, -205, -536, -861, -1148, -1383, - -1586, -1688, -1814, -1783, -1772, -1745, -1630, -1611, -1505, -1488, -1462, - -1409, -1519, -1489, -1609, -1723, -1755, -1977, -2042, -2132, -2215, -2184, - -2268, -2205, -2170, -2107, -1978, -1990, -1909, -1886, -1943, -1997, -2152, - -2326, -2500, -2762, -2987, -3227, -3392, -3522, -3630, -3579, -3469, -3262, - -2916, -2555, -2103, -1581, -1090, -531, -20, 457, 873, 1228, 1561, - 1809, 1999, 2105, 2139, 2196, 2201, 2149, 2113, 2038, 1990, 1913, - 1787, 1705, 1595, 1490, 1372, 1201, 1113, 998, 917, 917, 894, - 961, 1007, 1098, 1321, 1470, 1681, 1882, 2067, 2317, 2465, 2626, - 2750, 2777, 2783, 2694, 2569, 2431, 2142, 1843, 1597, 1306, 1069, - 824, 622, 532, 430, 388, 357, 377, 438, 414, 481, 468, - 431, 454, 383, 374, 305, 207, 187, 133, 157, 115, 113, - 206, 244, 382, 475, 591, 753, 821, 916, 908, 855, 754, - 577, 399, 123, -159, -399, -647, -784, -923, -1010, -965, -918, - -806, -647, -504, -355, -253, -179, -130, -138, -156, -262, -339, - -401, -552, -600, -671, -697, -662, -673, -616, -597, -522, -495, - -513, -490, -624, -701, -804, -961, -1073, -1328, -1503, -1656, -1798, - -1801, -1913, -1863, -1785, -1720, -1453, -1309, -1051, -846, -715, -487, - -457, -357, -331, -400, -427, -627, -765, -873, -1021, -1105, -1255, - -1312, -1357, -1370, -1288, -1261, -1165, -1139, -1062, -917, -808, -680, - -597, -452, -277, -104, 122, 312, 558, 771, 919, 1110, 1205, - 1312, 1355, 1302, 1280, 1151, 1049, 946, 818, 733, 569, 451, - 429, 388, 408, 387, 376, 426, 463, 542, 576, 632, 666, - 673, 740, 766, 791, 845, 829, 857, 841, 822, 835, 796, - 773, 671, 600, 560, 484, 460, 371, 311, 284, 242, 277, - 261, 261, 277, 273, 358, 380, 410, 433, 435, 471, 432, - 414, 386, 330, 294, 194, 149, 108, 69, 84, 69, 92, - 83, 75, 88, 53, 12, -96, -194, -269, -369, -438, -523, - -553, -528, -500, -392, -277, -136, 53, 240, 466, 678, 870, - 1050, 1178, 1294, 1336, 1310, 1247, 1080, 916, 677, 387, 120, - -182, -471, -740, -972, -1148, -1273, -1343, -1402, -1363, -1263, -1129, - -922, -724, -518, -288, -79, 111, 250, 364, 405, 405, 395, - 284, 199, 83, -43, -126, -244, -313, -400, -451, -497, -610, - -672, -807, -951, -1087, -1325, -1517, -1736, -1929, -2086, -2260, -2318, - -2356, -2271, -2125, -1967, -1685, -1379, -1000, -598, -238, 149, 481, - 790, 1042, 1185, 1287, 1274, 1195, 1068, 868, 654, 386, 138, - -65, -273, -450, -598, -665, -670, -669, -620, -553, -425, -288, - -179, -72, 15, 122, 205, 263, 324, 357, 435, 518, 603, - 709, 779, 892, 1006, 1107, 1170, 1183, 1190, 1173, 1116, 1016, - 890, 750, 628, 488, 331, 197, 95, 43, 25, 1, 22, - 97, 209, 363, 495, 615, 724, 833, 937, 984, 990, 933, - 884, 851, 747, 678, 573, 497, 469, 401, 391, 352, 339, - 352, 337, 354, 361, 370, 402, 411, 418, 440, 468, 526, - 576, 619, 683, 766, 857, 965, 1038, 1114, 1159, 1172, 1167, - 1106, 1006, 840, 644, 426, 177, -110, -390, -665, -929, -1160, - -1375, -1497, -1550, -1592, -1553, -1507, -1394, -1201, -1084, -863, -685, - -540, -322, -234, -68, 29, 59, 160, 141, 170, 140, 79, - 77, -11, -53, -179, -274, -327, -480, -564, -736, -884, -995, - -1185, -1300, -1461, -1617, -1711, -1832, -1831, -1863, -1865, -1776, -1691, - -1516, -1353, -1168, -954, -729, -490, -305, -93, 81, 211, 322, - 364, 392, 384, 332, 264, 146, 29, -101, -230, -357, -486, - -616, -705, -752, -801, -809, -788, -750, -654, -546, -456, -328, - -200, -78, 45, 137, 232, 316, 388, 447, 485, 528, 578, - 630, 697, 760, 835, 910, 988, 1068, 1124, 1154, 1157, 1166, - 1163, 1116, 1070, 1024, 994, 986, 988, 1030, 1110, 1212, 1303, - 1411, 1498, 1551, 1599, 1587, 1565, 1481, 1336, 1212, 1028, 847, - 669, 466, 330, 187, 61, -9, -54, -55, -20, 11, 69, - 133, 195, 244, 253, 225, 182, 133, 62, -11, -96, -168, - -199, -214, -213, -197, -167, -127, -105, -86, -83, -109, -140, - -217, -323, -448, -588, -717, -854, -971, -1086, -1185, -1211, -1227, - -1180, -1135, -1099, -992, -918, -788, -704, -651, -562, -542, -470, - -421, -431, -391, -429, -386, -344, -336, -260, -257, -162, -61, - -6, 100, 120, 178, 215, 179, 132, 15, -106, -238, -416, - -595, -765, -929, -1066, -1170, -1252, -1278, -1290, -1258, -1173, -1114, - -1012, -945, -868, -741, -695, -612, -547, -494, -388, -332, -225, - -110, 22, 182, 318, 496, 677, 835, 992, 1104, 1162, 1166, - 1133, 1054, 916, 709, 430, 164, -90, -340, -600, -853, -1033, - -1135, -1177, -1146, -1079, -946, -746, -500, -208, 83, 377, 673, - 950, 1183, 1356, 1503, 1627, 1707, 1735, 1708, 1678, 1668, 1645, - 1588, 1494, 1419, 1354, 1291, 1194, 1052, 900, 718, 524, 325, - 110, -114, -330, -500, -630, -729, -803, -834, -795, -727, -627, - -492, -325, -125, 54, 238, 393, 528, 642, 691, 706, 661, - 585, 504, 380, 245, 87, -61, -195, -320, -435, -556, -663, - -742, -814, -883, -952, -1009, -1038, -1047, -1067, -1063, -1050, -1020, - -949, -888, -795, -698, -574, -405, -257, -70, 68, 203, 381, - 479, 580, 619, 623, 645, 565, 492, 364, 206, 106, -71, - -191, -331, -460, -469, -527, -471, -441, -386, -222, -123, 60, - 168, 245, 404, 470, 596, 605, 581, 633, 548, 562, 468, - 355, 334, 192, 161, 62, -36, -39, -146, -121, -167, -243, - -229, -302, -276, -327, -415, -419, -444, -396, -433, -455, -407, - -357, -244, -221, -158, -63, 36, 172, 210, 296, 326, 351, - 424, 367, 369, 300, 224, 235, 124, 54, -39, -122, -118, - -239, -304, -360, -403, -361, -418, -427, -394, -342, -259, -232, - -176, -110, -48, 27, 48, 78, 90, 86, 91, 76, 57, - -1, -34, -53, -103, -151, -209, -239, -261, -319, -354, -372, - -382, -385, -411, -432, -428, -431, -446, -471, -496, -512, -532, - -562, -570, -567, -543, -499, -457, -379, -290, -204, -94, -11, - 78, 155, 196, 234, 222, 198, 160, 113, 64, 5, -57, - -108, -136, -175, -186, -196, -184, -125, -90, -25, 58, 146, - 271, 372, 472, 562, 636, 709, 741, 760, 752, 730, 710, - 688, 655, 608, 595, 570, 556, 540, 517, 513, 511, 497, - 481, 449, 417, 401, 347, 325, 295, 248, 261, 238, 250, - 294, 295, 367, 380, 416, 454, 430, 479, 443, 431, 430, - 386, 397, 333, 292, 238, 176, 153, 54, 24, -37, -84, - -109, -172, -155, -199, -220, -219, -261, -227, -255, -280, -266, - -293, -277, -273, -243, -214, -221, -179, -153, -130, -109, -154, - -149, -151, -155, -186, -243, -253, -311, -326, -358, -434, -427, - -491, -533, -554, -598, -596, -655, -668, -679, -714, -671, -694, - -643, -607, -602, -532, -496, -409, -408, -377, -309, -289, -211, - -223, -196, -145, -147, -104, -157, -123, -125, -177, -152, -229, - -192, -204, -243, -213, -259, -194, -190, -172, -98, -123, -43, - -12, 41, 103, 87, 148, 150, 166, 154, 113, 118, 80, - 54, 8, 4, 25, 12, 59, 70, 162, 260, 305, 387, - 427, 501, 549, 564, 571, 517, 488, 423, 355, 294, 206, - 165, 113, 92, 77, 62, 115, 116, 154, 162, 171, 218, - 210, 221, 208, 192, 215, 176, 169, 114, 89, 89, 52, - 62, 29, 35, 73, 98, 167, 195, 261, 325, 349, 401, - 382, 393, 368, 302, 254, 174, 104, 6, -78, -136, -203, - -229, -291, -303, -284, -294, -241, -235, -222, -186, -187, -156, - -160, -149, -122, -114, -71, -44, -28, 6, 20, 47, 57, - 54, 52, 55, 53, 23, 9, -16, -59, -86, -158, -223, - -292, -372, -421, -498, -532, -561, -570, -531, -512, -456, -367, - -297, -206, -125, -37, 26, 88, 147, 157, 188, 169, 152, - 152, 131, 99, 62, 44, 46, 53, 61, 61, 79, 110, - 159, 175, 185, 237, 220, 278, 276, 239, 264, 203, 190, - 138, 70, 34, -9, 18, 1, 10, 71, 115, 191, 220, - 255, 265, 296, 319, 270, 266, 214, 189, 187, 155, 145, - 123, 149, 166, 172, 186, 179, 195, 213, 201, 182, 161, - 150, 116, 76, 41, -29, -58, -101, -183, -209, -269, -314, - -342, -385, -379, -380, -348, -304, -273, -197, -144, -88, -28, - -5, 11, 20, 27, -5, -24, -22, -61, -73, -87, -124, - -118, -133, -150, -160, -198, -196, -219, -228, -239, -281, -276, - -275, -288, -277, -305, -324, -302, -294, -292, -266, -261, -224, - -203, -210, -190, -198, -176, -180, -201, -196, -198, -175, -166, - -151, -127, -114, -59, -48, -8, 39, 75, 126, 131, 168, - 160, 152, 142, 82, 36, -13, -49, -81, -105, -105, -103, - -65, -38, -16, 19, 33, 67, 82, 95, 110, 98, 111, - 98, 87, 67, 54, 66, 52, 49, 53, 71, 106, 139, - 186, 224, 270, 320, 361, 413, 433, 462, 473, 478, 480, - 459, 441, 391, 339, 298, 239, 206, 159, 149, 120, 114, - 117, 95, 106, 81, 67, 61, 30, 11, -29, -42, -76, - -97, -98, -124, -107, -107, -103, -69, -71, -36, -12, 23, - 69, 86, 129, 152, 158, 162, 152, 127, 81, 48, -9, - -80, -120, -172, -201, -225, -276, -297, -311, -330, -339, -361, - -375, -389, -376, -365, -374, -378, -375, -370, -358, -347, -355, - -338, -314, -289, -244, -212, -168, -129, -80, -26, -12, 47, - 79, 92, 105, 105, 113, 99, 85, 29, -18, -53, -110, - -133, -167, -186, -196, -199, -176, -177, -150, -122, -106, -73, - -61, -30, -34, -29, -40, -68, -63, -85, -84, -71, -65, - -40, -16, 23, 56, 87, 144, 167, 196, 206, 221, 243, - 226, 233, 210, 192, 190, 150, 140, 110, 91, 77, 43, - 27, -10, -5, -5, -22, -9, -7, 27, 48, 59, 64, - 70, 87, 104, 139, 151, 188, 239, 270, 317, 311, 336, - 349, 341, 330, 274, 254, 223, 195, 163, 102, 81, 43, - 20, 8, -37, -28, -31, -29, -21, -39, -16, -22, -11, - -21, -41, -32, -47, -39, -60, -75, -71, -94, -98, -131, - -147, -139, -145, -146, -165, -150, -136, -112, -90, -106, -86, - -91, -87, -98, -136, -121, -135, -124, -132, -144, -114, -108, - -87, -74, -75, -50, -30, -5, -18, -24, -3, -3, -6, - -41, -76, -98, -127, -159, -215, -257, -263, -268, -266, -262, - -237, -194, -144, -113, -99, -61, -28, 12, 21, 46, 76, - 92, 130, 115, 123, 132, 135, 149, 134, 133, 132, 135, - 138, 94, 76, 51, 19, -15, -72, -98, -125, -135, -154, - -174, -171, -164, -139, -130, -99, -74, -40, 9, 34, 86, - 129, 176, 214, 226, 245, 250, 280, 271, 256, 250, 226, - 234, 212, 187, 178, 148, 144, 104, 79, 64, 37, 36, - 9, -10, -23, -38, -35, -62, -67, -67, -82, -70, -80, - -75, -59, -34, -3, 9, 48, 76, 101, 120, 120, 123, - 126, 131, 112, 92, 77, 61, 54, 32, 3, -18, -28, - -39, -56, -71, -91, -92, -100, -124, -134, -142, -144, -155, - -177, -178, -175, -171, -168, -160, -141, -123, -89, -73, -64, - -46, -39, -18, -19, -34, -32, -46, -51, -63, -74, -73, - -81, -70, -83, -71, -49, -39, -12, -1, 30, 48, 65, - 94, 100, 125, 136, 148, 156, 138, 140, 124, 115, 86, - 58, 57, 32, 43, 40, 44, 63, 60, 83, 90, 99, - 115, 113, 135, 140, 148, 164, 172, 187, 182, 190, 183, - 171, 171, 146, 139, 121, 105, 94, 61, 46, 17, -6, - -34, -70, -89, -121, -138, -158, -178, -190, -206, -206, -210, - -214, -204, -196, -173, -154, -128, -97, -81, -58, -51, -46, - -38, -47, -49, -57, -58, -57, -59, -49, -58, -58, -54, - -60, -48, -65, -72, -72, -78, -70, -77, -73, -76, -79, - -76, -90, -90, -91, -88, -76, -67, -43, -16, 6, 27, - 39, 55, 69, 71, 74, 65, 56, 60, 47, 37, 27, - 8, -5, -29, -50, -71, -89, -96, -114, -111, -113, -115, - -105, -112, -90, -78, -68, -49, -46, -26, -14, 5, 18, - 10, 14, 3, 5, -9, -20, -15, -30, -26, -33, -31, - -23, -23, -12, -21, -20, -16, -23, -20, -13, -7, 6, - 28, 47, 69, 96, 115, 134, 147, 154, 166, 174, 186, - 196, 202, 204, 198, 193, 181, 164, 144, 125, 113, 102, - 96, 90, 92, 91, 96, 99, 99, 100, 99, 99, 93, - 94, 86, 68, 55, 44, 36, 22, 13, 15, 13, 15, - 21, 16, 11, 3, -15, -31, -50, -75, -105, -125, -145, - -154, -155, -164, -178, -189, -186, -177, -174, -169, -152, -134, - -114, -93, -65, -42, -23, -4, -1, 6, 6, 2, -4, - -18, -26, -25, -25, -23, -32, -31, -33, -39, -50, -68, - -69, -74, -79, -78, -83, -85, -85, -77, -71, -61, -42, - -27, -3, 28, 59, 95, 123, 146, 155, 160, 162, 144, - 130, 112, 94, 82, 67, 60, 46, 35, 35, 22, 4, - -14, -27, -35, -45, -52, -61, -62, -65, -68, -55, -52, - -43, -38, -34, -20, -8, 8, 18, 24, 34, 36, 37, - 42, 46, 51, 50, 58, 76, 75, 70, 67, 58, 53, - 48, 36, 23, 18, 10, 3, 9, 14, 24, 39, 43, - 53, 62, 63, 66, 62, 66, 64, 59, 51, 25, 19, - 6, -10, -19, -26, -35, -43, -44, -37, -47, -43, -50, - -54, -60, -69, -75, -84, -91, -93, -98, -96, -99, -91, - -87, -91, -88, -84, -80, -75, -61, -48, -44, -40, -37, - -34, -45, -52, -58, -72, -82, -84, -78, -68, -65, -63, - -51, -42, -27, -22, -13, -3, 8, 20, 26, 31, 31, - 37, 33, 29, 33, 31, 32, 31, 34, 44, 55, 68, - 74, 69, 75, 73, 72, 65, 63, 67, 70, 83, 81, - 81, 85, 84, 80, 75, 69, 53, 44, 36, 27, 20, - 11, 1, -4, -19, -26, -27, -25, -21, -14, -12, -12, - -14, -9, -21, -29, -40, -50, -50, -54, -46, -35, -17, - -4, -1, 7, 20, 28, 26, 22, 23, 21, 23, 18, - 13, 12, 7, 6, 3, 2, -1, -1, 4, 6, 17, - 29, 35, 34, 34, 32, 28, 33, 26, 22, 16, 16, - 22, 20, 13, -1, -1, -7, -15, -20, -30, -32, -38, - -39, -45, -45, -53, -63, -70, -83, -96, -107, -113, -122, - -122, -118, -114, -114, -113, -112, -111, -110, -107, -103, -102, - -94, -80, -71, -58, -52, -47, -40, -43, -47, -48, -50, - -39, -46, -44, -44, -44, -43, -45, -41, -40, -34, -32, - -23, -12, -6, -1, -1, 6, 12, 18, 20, 22, 32, - 48, 65, 80, 93, 109, 122, 128, 131, 135, 135, 129, - 126, 130, 127, 124, 125, 121, 122, 115, 118, 122, 128, - 137, 143, 143, 141, 142, 134, 131, 121, 109, 105, 97, - 93, 99, 96, 96, 94, 83, 84, 80, 77, 66, 59, - 46, 42, 44, 32, 28, 20, 12, 8, 4, 4, 5, - 3, -4, -7, -6, -14, -19, -24, -34, -40, -45, -52, - -61, -62, -60, -57, -57, -61, -63, -61, -65, -73, -81, - -89, -94, -93, -89, -87, -82, -82, -84, -81, -86, -82, - -84, -86, -90, -86, -83, -82, -81, -80, -80, -76, -75, - -76, -70, -69, -68, -61, -53, -50, -43, -38, -42, -43, - -41, -41, -39, -34, -27, -21, -16, -20, -22, -27, -36, - -39, -38, -40, -37, -35, -28, -14, -6, -3, -2, 2, - 4, 5, 15, 18, 25, 35, 36, 41, 45, 48, 52, - 54, 52, 50, 60, 67, 76, 85, 85, 90, 86, 83, - 84, 77, 77, 72, 77, 81, 89, 91, 93, 99, 101, - 102, 98, 94, 87, 77, 70, 69, 63, 62, 55, 59, - 58, 54, 51, 53, 57, 62, 65, 60, 54, 48, 45, - 40, 29, 17, 8, -3, -14, -17, -18, -20, -25, -34, - -40, -44, -53, -56, -63, -71, -71, -69, -66, -62, -66, - -67, -68, -71, -75, -79, -79, -73, -67, -60, -49, -46, - -45, -45, -46, -55, -64, -67, -72, -74, -70, -68, -67, - -69, -70, -64, -56, -55, -54, -51, -41, -30, -26, -28, - -29, -30, -28, -25, -27, -20, -12, -5, -2, 2, 3, - -3, 0, -7, -8, -14, -15, -9, -7, 4, 12, 24, - 36, 41, 52, 58, 59, 51, 45, 48, 44, 46, 43, - 40, 42, 47, 53, 52, 52, 63, 69, 74, 75, 80, - 78, 69, 68, 59, 60, 54, 54, 54, 58, 66, 71, - 78, 78, 75, 78, 72, 71, 61, 55, 53, 42, 36, - 31, 28, 29, 23, 19, 25, 27, 27, 23, 29, 29, - 20, 11, 5, -4, -10, -31, -38, -39, -36, -33, -27, - -17, -15, -14, -17, -13, -14, -25, -33, -44, -51, -61, - -63, -63, -65, -67, -66, -63, -59, -52, -48, -45, -44, - -50, -62, -74, -84, -89, -100, -101, -102, -96, -95, -85, - -76, -78, -72, -71, -66, -61, -63, -60, -62, -72, -69, - -69, -58, -56, -50, -37, -28, -17, -17, -16, -17, -18, - -18, -13, -7, -4, 6, 17, 23, 25, 28, 24, 21, - 17, 21, 27, 30, 33, 35, 46, 49, 48, 54, 56, - 57, 58, 60, 64, 62, 64, 66, 67, 64, 70, 77, - 83, 82, 84, 88, 89, 95, 86, 75, 64, 51, 36, - 29, 26, 21, 26, 31, 38, 40, 55, 63, 65, 65, - 64, 60, 54, 54, 49, 41, 34, 26, 21, 9, 6, - 6, 5, -1, 3, 5, 3, 2, -4, -13, -13, -24, - -32, -33, -36, -33, -24, -18, -15, -9, -5, -5, -14, - -17, -24, -34, -36, -42, -43, -36, -42, -43, -43, -38, - -36, -27, -20, -23, -21, -28, -25, -22, -24, -25, -23, - -22, -30, -31, -26, -25, -20, -15, -8, -10, -11, -13, - -18, -22, -30, -36, -35, -39, -35, -34, -27, -24, -19, - -15, -7, -6, -7, -2, 0, 7, 12, 14, 19, 20, - 26, 26, 24, 16, 10, 4, 1, 3, 2, 9, 11, - 17, 19, 27, 31, 31, 32, 30, 27, 25, 28, 27, - 25, 22, 23, 23, 20, 21, 25, 36, 38, 40, 43, - 40, 32, 27, 20, 9, 4, 1, 12, 27, 37, 49, - 63, 73, 72, 73, 70, 67, 53, 39, 33, 26, 23, - 13, 9, 6, 0, -2, -3, 0, -1, 0, -1, -4, - -9, -16, -22, -21, -24, -21, -19, -12, -3, 0, 12, - 14, 13, 3, -6, -13, -27, -34, -42, -41, -44, -42, - -43, -46, -42, -40, -39, -36, -31, -29, -30, -22, -19, - -21, -20, -17, -17, -22, -31, -41, -45, -54, -65, -64, - -68, -70, -74, -70, -64, -62, -61, -60, -58, -52, -46, - -43, -37, -35, -40, -41, -47, -52, -58, -62, -61, -53, - -54, -46, -41, -40, -34, -29, -20, -15, -8, 2, 12, - 28, 35, 41, 42, 42, 43, 41, 43, 39, 45, 44, - 46, 55, 54, 55, 55, 51, 48, 42, 43, 39, 40, - 46, 54, 65, 70, 76, 81, 86, 89, 79, 73, 70, - 62, 56, 52, 39, 32, 28, 17, 18, 19, 18, 15, - 19, 20, 15, 13, 13, 10, 6, 5, 12, 10, 15, - 20, 24, 30, 31, 28, 22, 17, 2, -15, -24, -39, - -52, -53, -55, -46, -40, -34, -26, -21, -22, -31, -32, - -38, -36, -35, -32, -33, -34, -30, -28, -27, -35, -40, - -42, -45, -44, -45, -44, -52, -54, -57, -57, -53, -60, - -63, -63, -65, -51, -45, -40, -40, -39, -39, -43, -44, - -46, -52, -46, -51, -49, -45, -45, -47, -47, -45, -50, - -47, -40, -35, -32, -24, -17, -19, -14, -13, -9, -7, - -7, -7, -9, 0, 3, 7, 13, 12, 14, 15, 13, - 6, -1, -3, -9, -10, -5, -2, 6, 9, 11, 12, - 15, 19, 24, 37, 47, 47, 56, 53, 51, 52, 52, - 47, 39, 38, 40, 41, 43, 44, 42, 43, 42, 41, - 43, 40, 41, 35, 37, 39, 40, 41, 38, 30, 21, - 14, 5, 2, -1, -2, 1, -2, 6, 2, 4, 2, - -1, -11, -16, -23, -25, -20, -18, -25, -27, -32, -27, - -24, -16, -15, -11, -9, -3, -4, -2, -9, -10, -18, - -28, -33, -38, -37, -41, -41, -33, -24, -22, -25, -25, - -25, -24, -33, -38, -42, -52, -57, -55, -50, -51, -53, - -52, -48, -49, -49, -53, -55, -58, -51, -34, -19, -12, - -12, -5, 1, 1, 0, -6, -2, -10, -11, -11, -6, - 0, -6, 2, -2, -6, 2, 5, 16, 18, 18, 21, - 16, 18, 18, 20, 20, 13, 18, 9, 7, 12, 7, - 8, 10, 16, 17, 18, 23, 26, 36, 44, 51, 55, - 60, 64, 69, 68, 71, 70, 62, 58, 52, 44, 35, - 31, 34, 32, 33, 36, 37, 38, 41, 47, 55, 56, - 58, 60, 60, 57, 48, 41, 29, 19, 7, 4, 8, - 9, 10, 8, 13, 15, 13, 8, 8, 6, 4, 10, - 8, -4, -6, -9, -20, -28, -39, -38, -27, -24, -22, - -19, -23, -32, -35, -36, -41, -48, -51, -50, -52, -55, - -60, -67, -72, -76, -84, -82, -80, -81, -75, -64, -50, - -36, -28, -18, -14, -12, -15, -12, -18, -24, -21, -22, - -19, -21, -19, -22, -20, -18, -16, -17, -19, -15, -7, - 1, 0, 0, 9, 14, 20, 24, 20, 16, 17, 20, - 20, 25, 27, 26, 32, 33, 35, 38, 42, 38, 37, - 39, 46, 44, 43, 45, 45, 42, 37, 34, 25, 21, - 22, 33, 44, 49, 54, 53, 58, 54, 51, 46, 40, - 37, 37, 39, 34, 37, 39, 31, 39, 38, 36, 35, - 32, 33, 33, 32, 28, 23, 18, 22, 28, 31, 27, - 18, 3, 4, 0, -4, -7, -15, -18, -24, -32, -34, - -39, -42, -36, -31, -24, -12, -10, -10, -13, -20, -28, - -34, -44, -49, -50, -53, -56, -54, -52, -53, -47, -43, - -41, -45, -41, -38, -38, -33, -32, -34, -35, -33, -40, - -45, -53, -62, -61, -67, -72, -70, -67, -68, -59, -51, - -47, -38, -31, -20, -13, -13, -13, -14, -17, -21, -22, - -29, -31, -27, -23, -13, -6, 4, 12, 17, 25, 23, - 23, 25, 30, 30, 32, 31, 28, 27, 18, 14, 13, - 3, 5, 7, 19, 35, 47, 61, 70, 84, 90, 95, - 92, 94, 89, 77, 71, 66, 59, 50, 51, 50, 51, - 53, 56, 65, 67, 69, 75, 74, 69, 67, 56, 51, - 44, 34, 25, 17, 10, 6, 7, 7, 4, 6, -1, - -1, -2, -9, -9, -9, -7, -5, 1, -2, -5, -11, - -19, -27, -39, -38, -44, -45, -48, -48, -54, -59, -53, - -51, -49, -52, -50, -50, -47, -42, -32, -28, -28, -26, - -27, -34, -40, -40, -36, -37, -37, -34, -37, -36, -41, - -36, -40, -46, -48, -52, -47, -44, -40, -40, -38, -43, - -43, -47, -59, -62, -59, -59, -51, -41, -29, -19, -8, - -2, 1, 1, -4, -9, -19, -23, -29, -29, -25, -23, - -15, -7, -2, 6, 8, 15, 27, 35, 43, 40, 36, - 35, 32, 25, 22, 19, 17, 13, 13, 21, 25, 28, - 36, 44, 50, 57, 56, 58, 59, 62, 66, 70, 73, - 69, 66, 66, 66, 62, 53, 48, 44, 38, 39, 44, - 52, 51, 55, 57, 52, 49, 44, 36, 26, 16, 13, - 13, 14, 14, 17, 14, 10, 6, -5, -14, -23, -24, - -21, -28, -25, -27, -29, -29, -33, -33, -39, -42, -43, - -41, -40, -43, -46, -45, -43, -42, -41, -41, -46, -46, - -52, -52, -52, -59, -63, -70, -68, -73, -77, -73, -68, - -66, -62, -64, -66, -58, -54, -51, -52, -48, -47, -43, - -40, -39, -33, -26, -19, -17, -16, -17, -14, -9, -10, - -3, 5, 5, 9, 5, 9, 8, 4, 3, 0, -5, - -10, -3, 2, 8, 14, 16, 20, 27, 39, 40, 44, - 48, 43, 39, 34, 29, 22, 12, 8, 5, 0, -2, - -3, 5, 12, 16, 19, 22, 25, 28, 35, 28, 30, - 31, 30, 39, 43, 47, 43, 42, 41, 41, 41, 37, - 37, 39, 37, 38, 43, 44, 41, 43, 34, 28, 25, - 23, 30, 34, 32, 33, 29, 21, 18, 13, 14, 11, - 3, 2, 1, 3, 1, -1, 0, -3, -1, -3, -8, - -9, -7, -9, -2, 0, -3, 0, 1, 5, 0, -1, - -9, -13, -8, -11, -18, -23, -25, -29, -29, -26, -27, - -29, -25, -24, -23, -18, -19, -18, -17, -21, -22, -30, - -38, -42, -42, -42, -40, -41, -43, -39, -38, -37, -36, - -33, -31, -28, -27, -18, -15, -7, -8, -8, -1, 1, - 3, -5, 0, -4, -5, -4, -8, -10, -14, -21, -24, - -25, -20, -11, -4, 3, 6, 13, 15, 12, 17, 16, - 17, 17, 15, 21, 28, 33, 36, 35, 35, 29, 31, - 29, 28, 23, 21, 14, 15, 27, 36, 40, 40, 43, - 51, 56, 62, 69, 77, 80, 88, 88, 88, 82, 76, - 63, 52, 44, 36, 26, 23, 25, 24, 27, 26, 31, - 21, 13, 8, -8, -8, -11, -14, -18, -28, -28, -30, - -32, -29, -26, -26, -27, -24, -20, -14, -8, -6, -8, - -5, -10, -14, -18, -26, -34, -36, -38, -44, -51, -57, - -66, -64, -68, -72, -75, -75, -70, -68, -65, -64, -62, - -68, -63, -60, -65, -65, -69, -68, -67, -57, -46, -41, - -38, -34, -31, -39, -40, -45, -45, -48, -47, -40, -39, - -32, -26, -24, -14, -9, -7, -3, -2, 3, 4, 0, - -2, -2, -2, 1, 3, 2, 3, 8, 13, 20, 25, - 29, 31, 26, 17, 11, 3, -5, 2, 6, 9, 11, - 19, 26, 40, 51, 61, 60, 58, 61, 55, 55, 57, - 60, 54, 40, 42, 38, 34, 38, 37, 34, 32, 35, - 36, 35, 41, 36, 32, 29, 23, 22, 23, 22, 14, - 13, 19, 19, 20, 22, 22, 17, 13, 6, 9, 13, - 15, 17, 19, 11, 15, 8, 4, 6, -1, -3, 3, - 7, 11, 8, 10, 7, 6, 4, -4, -5, -11, -9, - -16, -14, -14, -16, -16, -22, -19, -19, -13, -9, -4, - 1, 1, 2, -6, -14, -25, -32, -41, -46, -50, -49, - -42, -39, -34, -24, -14, -18, -15, -17, -21, -23, -21, - -19, -21, -20, -19, -20, -19, -16, -17, -19, -20, -20, - -20, -20, -22, -22, -23, -22, -22, -14, -5, 5, 8, - 13, 16, 19, 23, 19, 21, 16, 16, 18, 13, 18, - 13, 15, 18, 12, 12, 6, 11, 8, 5, 5, 9, - 17, 14, 15, 14, 16, 14, 14, 12, 9, 7, 9, - 11, 13, 15, 15, 19, 17, 14, 8, 7, 4, 0, - 3, 8, 10, 7, 8, 19, 15, 19, 18, 19, 17, - 9, 14, 10, 4, -3, -11, -19, -25, -31, -35, -36, - -28, -21, -8, 5, 8, 11, 13, 7, 4, 1, -7, - -15, -17, -17, -21, -28, -33, -37, -40, -39, -41, -45, - -46, -44, -40, -41, -36, -31, -41, -40, -42, -44, -47, - -50, -49, -55, -52, -52, -52, -45, -50, -52, -56, -58, - -60, -69, -75, -82, -86, -91, -87, -80, -80, -72, -58, - -52, -45, -33, -21, -13, -12, -10, -6, -1, -2, -7, - -7, -5, -6, -3, 9, 15, 25, 36, 35, 39, 28, - 16, 11, 8, 11, 17, 27, 34, 36, 47, 49, 52, - 52, 42, 46, 49, 55, 65, 66, 67, 62, 56, 53, - 49, 50, 55, 53, 62, 69, 72, 73, 68, 61, 54, - 46, 43, 38, 34, 39, 43, 42, 39, 36, 31, 26, - 24, 17, 13, 14, 14, 21, 26, 29, 28, 26, 24, - 18, 19, 16, 11, 6, 2, -2, 1, 3, 2, -4, - -3, -1, -3, -2, -2, -5, -3, 0, 3, -3, -6, - -6, -15, -19, -25, -30, -35, -39, -34, -34, -34, -31, - -17, -17, -8, -2, -2, 8, 14, 25, 24, 26, 22, - 16, 10, 2, -3, -5, -12, -15, -11, -14, -16, -17, - -17, -16, -21, -18, -18, -21, -23, -21, -15, -11, -4, - -2, 3, 8, 10, 17, 18, 25, 24, 24, 24, 21, - 24, 23, 24, 22, 23, 31, 39, 49, 58, 64, 67, - 63, 57, 53, 52, 44, 45, 43, 40, 45, 42, 49, - 50, 49, 52, 51, 48, 46, 38, 37, 35, 36, 37, - 37, 37, 44, 45, 47, 42, 42, 36, 35, 44, 40, - 40, 28, 24, 23, 18, 12, 9, 8, 10, 17, 17, - 18, 12, 5, -2, -12, -16, -20, -27, -29, -29, -26, - -22, -17, -16, -15, -14, -15, -11, -11, -15, -19, -15, - -20, -22, -24, -37, -52, -62, -63, -68, -64, -59, -51, - -43, -42, -36, -32, -33, -33, -33, -41, -48, -51, -49, - -48, -47, -42, -45, -42, -41, -40, -39, -33, -29, -25, - -14, -1, -4, -6, -11, -16, -19, -26, -29, -28, -25, - -17, -10, -1, -1, 3, 7, -1, -3, -8, -18, -20, - -20, -16, -13, -11, -8, 0, 6, 8, 11, 14, 15, - 20, 26, 26, 26, 24, 23, 24, 30, 34, 41, 52, - 61, 70, 80, 85, 86, 89, 84, 87, 79, 67, 60, - 57, 59, 63, 68, 74, 78, 84, 89, 91, 87, 81, - 74, 69, 63, 59, 59, 56, 58, 60, 60, 59, 54, - 49, 41, 40, 34, 25, 19, 11, 1, 0, -1, -4, - -8, -12, -12, -17, -22, -31, -44, -54, -58, -68, -74, - -80, -80, -73, -65, -61, -61, -55, -50, -50, -59, -65, - -69, -73, -73, -78, -79, -83, -87, -87, -88, -94, -103, - -107, -107, -109, -106, -113, -115, -110, -105, -100, -100, -92, - -78, -62, -49, -39, -35, -27, -26, -25, -24, -22, -23, - -28, -26, -22, -15, -11, -4, 4, 13, 21, 32, 31, - 28, 30, 30, 28, 23, 25, 23, 21, 25, 21, 26, - 27, 32, 40, 48, 53, 55, 54, 55, 55, 54, 48, - 44, 47, 48, 54, 60, 71, 79, 79, 74, 72, 59, - 48, 42, 32, 26, 22, 21, 23, 22, 31, 42, 44, - 41, 36, 30, 30, 33, 38, 35, 30, 28, 20, 15, - 8, 4, 6, 9, 16, 26, 27, 23, 19, 16, 10, - 4, -4, -12, -12, -16, -16, -19, -24, -23, -23, -31, - -34, -38, -40, -41, -39, -39, -36, -36, -40, -45, -48, - -53, -66, -73, -76, -76, -78, -75, -71, -65, -59, -58, - -59, -56, -60, -62, -62, -62, -64, -68, -73, -79, -80, - -85, -87, -85, -78, -72, -66, -56, -48, -42, -37, -35, - -32, -33, -31, -25, -26, -27, -16, -18, -18, -13, -14, - -17, -22, -24, -25, -23, -19, -14, -12, -11, -7, -4, - -1, 2, 5, 8, 10, 10, 18, 28, 29, 25, 22, - 29, 21, 20, 21, 22, 30, 32, 41, 41, 45, 46, - 49, 52, 57, 59, 58, 52, 46, 47, 56, 58, 49, - 49, 46, 40, 33, 23, 14, 11, 16, 29, 34, 37, - 41, 42, 48, 54, 60, 61, 62, 62, 69, 79, 76, - 71, 72, 71, 64, 59, 54, 49, 40, 42, 34, 23, - 27, 18, 13, 9, 3, -4, -8, -16, -18, -20, -26, - -28, -30, -32, -29, -32, -35, -39, -41, -38, -34, -31, - -26, -18, -21, -20, -22, -28, -35, -34, -31, -33, -31, - -31, -40, -43, -45, -53, -64, -67, -74, -75, -74, -75, - -70, -61, -56, -45, -37, -30, -33, -35, -32, -31, -27, - -25, -19, -17, -14, -9, -4, -1, -3, -4, 1, 8, - 14, 20, 24, 25, 18, 11, 7, -3, -9, -3, 4, - 15, 30, 29, 33, 33, 36, 35, 31, 33, 34, 42, - 43, 42, 47, 49, 53, 61, 69, 73, 74, 79, 81, - 84, 76, 69, 62, 47, 39, 31, 19, 8, 2, -6, - -5, -3, -3, -1, 1, -2, -3, -3, -6, -12, -13, - -15, -11, -5, -4, -8, -14, -9, -3, 0, -3, -4, - 0, 3, 0, -6, -14, -23, -33, -38, -41, -38, -38, - -34, -30, -29, -29, -26, -31, -33, -41, -49, -50, -56, - -57, -58, -54, -46, -39, -39, -34, -31, -28, -30, -30, - -31, -29, -27, -16, -18, -17, -15, -13, -15, -12, -7, - -11, -9, -9, -4, -11, -7, -7, -8, -9, -10, -7, - -9, 1, 9, 15, 12, 19, 19, 18, 17, 13, 11, - 8, 6, 10, 17, 20, 26, 28, 33, 39, 30, 25, - 25, 18, 16, 21, 26, 30, 33, 32, 36, 42, 49, - 46, 39, 44, 44, 37, 35, 30, 24, 22, 23, 26, - 23, 25, 21, 24, 24, 22, -}; diff --git a/examples/micro_speech/no_1000ms_sample_data.h b/examples/micro_speech/no_1000ms_sample_data.h deleted file mode 100644 index ab2d67b..0000000 --- a/examples/micro_speech/no_1000ms_sample_data.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This data was created from the PCM data in a WAV file held in v2 of the -// Speech Commands test dataset, at the path: -// speech_commands_test_set_v0.02/no/f9643d42_nohash_4.wav -// This should contain all 16,000 samples from the one-second file. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_NO_1000MS_SAMPLE_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_NO_1000MS_SAMPLE_DATA_H_ - -#include - -extern const int g_no_1000ms_sample_data_size; -extern const int16_t g_no_1000ms_sample_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_NO_1000MS_SAMPLE_DATA_H_ diff --git a/examples/micro_speech/no_30ms_sample_data.cpp b/examples/micro_speech/no_30ms_sample_data.cpp deleted file mode 100644 index 674b30a..0000000 --- a/examples/micro_speech/no_30ms_sample_data.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "no_30ms_sample_data.h" - -const int g_no_30ms_sample_data_size = 480; -const int16_t g_no_30ms_sample_data[480] = { - 5713, 5735, 5735, 5737, 5701, 5691, 5656, 5633, 5611, 5552, 5475, - 5394, 5293, 5177, 5064, 4924, 4737, 4599, 4420, 4237, 4048, 3828, - 3623, 3413, 3183, 2915, 2622, 2308, 1980, 1657, 1261, 901, 549, - 205, -85, -383, -688, -969, -1246, -1530, -1850, -2206, -2561, -2915, - -3224, -3482, -3713, -3921, -4107, -4287, -4470, -4660, -4850, -5057, -5239, - -5395, -5540, -5619, -5697, -5724, -5697, -5675, -5633, -5590, -5579, -5530, - -5486, -5442, -5426, -5391, -5348, -5276, -5197, -5124, -5039, -4925, -4808, - -4677, -4581, -4479, -4343, -4218, -4087, -3970, -3858, -3729, -3570, -3384, - -3206, -3020, -2839, -2636, -2453, -2287, -2185, -2154, -1926, -1562, -1223, - -758, -473, -64, 395, 599, 880, 814, 938, 1172, 1498, 1928, - 2127, 2422, 2608, 2841, 2937, 2886, 2815, 2985, 3324, 3757, 4152, - 4481, 4652, 4917, 4965, 4766, 4583, 4328, 4503, 4815, 5118, 5408, - 5682, 5956, 6082, 6055, 5744, 5426, 5341, 5427, 5606, 5882, 6065, - 6226, 6428, 6477, 6385, 6009, 5728, 5552, 5439, 5339, 5200, 5008, - 4947, 4835, 4614, 4330, 3887, 3521, 3111, 2460, 1983, 1297, 650, - 279, -353, -720, -1044, -1518, -1668, -2117, -2496, -2743, -3266, -3607, - -3790, -4149, -4075, -4042, -4096, -3981, -4138, -4226, -4214, -4503, -4455, - -4577, -4642, -4346, -4351, -4270, -4263, -4522, -4521, -4673, -4814, -4731, - -4950, -5011, -5004, -5288, -5341, -5566, -5833, -5783, -5929, -5847, -5765, - -5828, -5644, -5613, -5615, -5428, -5291, -5014, -4554, -4277, -3964, -3854, - -3829, -3612, -3603, -3438, -3137, -2831, -2164, -1438, -939, -330, -156, - 46, 242, 73, 242, 220, 239, 542, 565, 739, 872, 801, - 857, 676, 543, 586, 567, 828, 1142, 1490, 1985, 2508, 2982, - 3438, 3699, 3939, 4069, 4178, 4420, 4622, 4917, 5338, 5801, 6285, - 6658, 6963, 7213, 7233, 7328, 7176, 7038, 7031, 6860, 6957, 6767, - 6599, 6523, 6212, 6147, 6063, 5860, 6020, 6015, 6033, 6184, 5722, - 5607, 5016, 4337, 4063, 3229, 3080, 3006, 2804, 3035, 2541, 2136, - 1879, 1012, 401, -575, -1584, -1930, -2278, -2485, -2477, -2712, -2747, - -2766, -3320, -3592, -4188, -4669, -4672, -4939, -4789, -4426, -4203, -3674, - -3563, -3656, -3759, -4067, -4257, -4522, -4970, -5204, -5237, -5139, -4907, - -4911, -4917, -4921, -5007, -5230, -5654, -6122, -6464, -6733, -6948, -7067, - -6972, -6800, -6520, -6132, -5830, -5382, -5091, -4797, -4546, -4472, -4362, - -4350, -4235, -3851, -3454, -3144, -2735, -2341, -1845, -1262, -958, -549, - -166, 66, 382, 366, 352, 341, 85, -13, -176, -303, -235, - -341, -309, -227, -249, -50, 143, 384, 874, 1149, 1552, 2155, - 2767, 3499, 3994, 4460, 4920, 5288, 5569, 5704, 5881, 6094, 6461, - 6653, 6803, 7115, 7311, 7521, 7612, 7443, 7380, 7124, 6742, 6495, - 5964, 5656, 5415, 5167, 5656, 5813, 6027, 6401, 6351, 6787, 7019, - 6581, 6512, 5965, 5308, 5140, 4336, 4147, 3899, 3398, 3360, 2830, - 2624, 1968, 1026, 395, -699, -1424, -2327, -3006, -3192, -3435, -3337, - -3686, -3513, -3350, -3502, -3261, -3878, -4005, -4063, -4187, -3767, -3598, - -3384, -3300, -3094, -2857, -3023, -3274, -3851, -4352, -4523, -4943, -5477, - -5612, -5682, -5733, -5714, -5965, -6110, -5950, -6158, -6548, -6897, -7165, - -7281, -7352, -7258, -7185, -6659, -5946, -5470, -}; diff --git a/examples/micro_speech/no_30ms_sample_data.h b/examples/micro_speech/no_30ms_sample_data.h deleted file mode 100644 index 1eecca2..0000000 --- a/examples/micro_speech/no_30ms_sample_data.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This data was created from the PCM data in a WAV file held in v2 of the -// Speech Commands test dataset, at the path: -// speech_commands_test_set_v0.02/no/f9643d42_nohash_4.wav -// The data was extracted starting at an offset of 8,960, which corresponds to -// the 29th spectrogram slice. It's designed to be used to test the -// preprocessing pipeline, to ensure that the expected spectrogram slice is -// produced given this input. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_NO_30MS_SAMPLE_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_NO_30MS_SAMPLE_DATA_H_ - -#include - -extern const int g_no_30ms_sample_data_size; -extern const int16_t g_no_30ms_sample_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_NO_30MS_SAMPLE_DATA_H_ diff --git a/examples/micro_speech/recognize_commands.cpp b/examples/micro_speech/recognize_commands.cpp deleted file mode 100644 index 37e8063..0000000 --- a/examples/micro_speech/recognize_commands.cpp +++ /dev/null @@ -1,142 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "recognize_commands.h" - -#include - -RecognizeCommands::RecognizeCommands(tflite::ErrorReporter* error_reporter, - int32_t average_window_duration_ms, - uint8_t detection_threshold, - int32_t suppression_ms, - int32_t minimum_count) - : error_reporter_(error_reporter), - average_window_duration_ms_(average_window_duration_ms), - detection_threshold_(detection_threshold), - suppression_ms_(suppression_ms), - minimum_count_(minimum_count), - previous_results_(error_reporter) { - previous_top_label_ = "silence"; - previous_top_label_time_ = std::numeric_limits::min(); -} - -TfLiteStatus RecognizeCommands::ProcessLatestResults( - const TfLiteTensor* latest_results, const int32_t current_time_ms, - const char** found_command, uint8_t* score, bool* is_new_command) { - if ((latest_results->dims->size != 2) || - (latest_results->dims->data[0] != 1) || - (latest_results->dims->data[1] != kCategoryCount)) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "The results for recognition should contain %d elements, but there are " - "%d in an %d-dimensional shape", - kCategoryCount, latest_results->dims->data[1], - latest_results->dims->size); - return kTfLiteError; - } - - if (latest_results->type != kTfLiteInt8) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "The results for recognition should be int8_t elements, but are %d", - latest_results->type); - return kTfLiteError; - } - - if ((!previous_results_.empty()) && - (current_time_ms < previous_results_.front().time_)) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Results must be fed in increasing time order, but received a " - "timestamp of %d that was earlier than the previous one of %d", - current_time_ms, previous_results_.front().time_); - return kTfLiteError; - } - - // Add the latest results to the head of the queue. - previous_results_.push_back({current_time_ms, latest_results->data.int8}); - - // Prune any earlier results that are too old for the averaging window. - const int64_t time_limit = current_time_ms - average_window_duration_ms_; - while ((!previous_results_.empty()) && - previous_results_.front().time_ < time_limit) { - previous_results_.pop_front(); - } - - // If there are too few results, assume the result will be unreliable and - // bail. - const int64_t how_many_results = previous_results_.size(); - const int64_t earliest_time = previous_results_.front().time_; - const int64_t samples_duration = current_time_ms - earliest_time; - if ((how_many_results < minimum_count_) || - (samples_duration < (average_window_duration_ms_ / 4))) { - *found_command = previous_top_label_; - *score = 0; - *is_new_command = false; - return kTfLiteOk; - } - - // Calculate the average score across all the results in the window. - int32_t average_scores[kCategoryCount]; - for (int offset = 0; offset < previous_results_.size(); ++offset) { - PreviousResultsQueue::Result previous_result = - previous_results_.from_front(offset); - const int8_t* scores = previous_result.scores; - for (int i = 0; i < kCategoryCount; ++i) { - if (offset == 0) { - average_scores[i] = scores[i] + 128; - } else { - average_scores[i] += scores[i] + 128; - } - } - } - for (int i = 0; i < kCategoryCount; ++i) { - average_scores[i] /= how_many_results; - } - - // Find the current highest scoring category. - int current_top_index = 0; - int32_t current_top_score = 0; - for (int i = 0; i < kCategoryCount; ++i) { - if (average_scores[i] > current_top_score) { - current_top_score = average_scores[i]; - current_top_index = i; - } - } - const char* current_top_label = kCategoryLabels[current_top_index]; - - // If we've recently had another label trigger, assume one that occurs too - // soon afterwards is a bad result. - int64_t time_since_last_top; - if ((previous_top_label_ == kCategoryLabels[0]) || - (previous_top_label_time_ == std::numeric_limits::min())) { - time_since_last_top = std::numeric_limits::max(); - } else { - time_since_last_top = current_time_ms - previous_top_label_time_; - } - if ((current_top_score > detection_threshold_) && - ((current_top_label != previous_top_label_) || - (time_since_last_top > suppression_ms_))) { - previous_top_label_ = current_top_label; - previous_top_label_time_ = current_time_ms; - *is_new_command = true; - } else { - *is_new_command = false; - } - *found_command = current_top_label; - *score = current_top_score; - - return kTfLiteOk; -} diff --git a/examples/micro_speech/recognize_commands.h b/examples/micro_speech/recognize_commands.h deleted file mode 100644 index e93eef3..0000000 --- a/examples/micro_speech/recognize_commands.h +++ /dev/null @@ -1,159 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_ - -#include - -#include "tensorflow/lite/c/common.h" -#include "micro_features/micro_model_settings.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -// Partial implementation of std::dequeue, just providing the functionality -// that's needed to keep a record of previous neural network results over a -// short time period, so they can be averaged together to produce a more -// accurate overall prediction. This doesn't use any dynamic memory allocation -// so it's a better fit for microcontroller applications, but this does mean -// there are hard limits on the number of results it can store. -class PreviousResultsQueue { - public: - PreviousResultsQueue(tflite::ErrorReporter* error_reporter) - : error_reporter_(error_reporter), front_index_(0), size_(0) {} - - // Data structure that holds an inference result, and the time when it - // was recorded. - struct Result { - Result() : time_(0), scores() {} - Result(int32_t time, int8_t* input_scores) : time_(time) { - for (int i = 0; i < kCategoryCount; ++i) { - scores[i] = input_scores[i]; - } - } - int32_t time_; - int8_t scores[kCategoryCount]; - }; - - int size() { return size_; } - bool empty() { return size_ == 0; } - Result& front() { return results_[front_index_]; } - Result& back() { - int back_index = front_index_ + (size_ - 1); - if (back_index >= kMaxResults) { - back_index -= kMaxResults; - } - return results_[back_index]; - } - - void push_back(const Result& entry) { - if (size() >= kMaxResults) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Couldn't push_back latest result, too many already!"); - return; - } - size_ += 1; - back() = entry; - } - - Result pop_front() { - if (size() <= 0) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Couldn't pop_front result, none present!"); - return Result(); - } - Result result = front(); - front_index_ += 1; - if (front_index_ >= kMaxResults) { - front_index_ = 0; - } - size_ -= 1; - return result; - } - - // Most of the functions are duplicates of dequeue containers, but this - // is a helper that makes it easy to iterate through the contents of the - // queue. - Result& from_front(int offset) { - if ((offset < 0) || (offset >= size_)) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Attempt to read beyond the end of the queue!"); - offset = size_ - 1; - } - int index = front_index_ + offset; - if (index >= kMaxResults) { - index -= kMaxResults; - } - return results_[index]; - } - - private: - tflite::ErrorReporter* error_reporter_; - static constexpr int kMaxResults = 50; - Result results_[kMaxResults]; - - int front_index_; - int size_; -}; - -// This class is designed to apply a very primitive decoding model on top of the -// instantaneous results from running an audio recognition model on a single -// window of samples. It applies smoothing over time so that noisy individual -// label scores are averaged, increasing the confidence that apparent matches -// are real. -// To use it, you should create a class object with the configuration you -// want, and then feed results from running a TensorFlow model into the -// processing method. The timestamp for each subsequent call should be -// increasing from the previous, since the class is designed to process a stream -// of data over time. -class RecognizeCommands { - public: - // labels should be a list of the strings associated with each one-hot score. - // The window duration controls the smoothing. Longer durations will give a - // higher confidence that the results are correct, but may miss some commands. - // The detection threshold has a similar effect, with high values increasing - // the precision at the cost of recall. The minimum count controls how many - // results need to be in the averaging window before it's seen as a reliable - // average. This prevents erroneous results when the averaging window is - // initially being populated for example. The suppression argument disables - // further recognitions for a set time after one has been triggered, which can - // help reduce spurious recognitions. - explicit RecognizeCommands(tflite::ErrorReporter* error_reporter, - int32_t average_window_duration_ms = 1000, - uint8_t detection_threshold = 200, - int32_t suppression_ms = 1500, - int32_t minimum_count = 3); - - // Call this with the results of running a model on sample data. - TfLiteStatus ProcessLatestResults(const TfLiteTensor* latest_results, - const int32_t current_time_ms, - const char** found_command, uint8_t* score, - bool* is_new_command); - - private: - // Configuration - tflite::ErrorReporter* error_reporter_; - int32_t average_window_duration_ms_; - uint8_t detection_threshold_; - int32_t suppression_ms_; - int32_t minimum_count_; - - // Working variables - PreviousResultsQueue previous_results_; - const char* previous_top_label_; - int32_t previous_top_label_time_; -}; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_ diff --git a/examples/micro_speech/recognize_commands_test.cpp b/examples/micro_speech/recognize_commands_test.cpp deleted file mode 100644 index 3cedfa2..0000000 --- a/examples/micro_speech/recognize_commands_test.cpp +++ /dev/null @@ -1,213 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "recognize_commands.h" - -#include "tensorflow/lite/micro/test_helpers.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(PreviousResultsQueueBasic) { - tflite::MicroErrorReporter micro_error_reporter; - - PreviousResultsQueue queue(µ_error_reporter); - TF_LITE_MICRO_EXPECT_EQ(0, queue.size()); - - int8_t scores_a[4] = {0, 0, 0, 1}; - queue.push_back({0, scores_a}); - TF_LITE_MICRO_EXPECT_EQ(1, queue.size()); - TF_LITE_MICRO_EXPECT_EQ(0, queue.front().time_); - TF_LITE_MICRO_EXPECT_EQ(0, queue.back().time_); - - int8_t scores_b[4] = {0, 0, 1, 0}; - queue.push_back({1, scores_b}); - TF_LITE_MICRO_EXPECT_EQ(2, queue.size()); - TF_LITE_MICRO_EXPECT_EQ(0, queue.front().time_); - TF_LITE_MICRO_EXPECT_EQ(1, queue.back().time_); - - PreviousResultsQueue::Result pop_result = queue.pop_front(); - TF_LITE_MICRO_EXPECT_EQ(0, pop_result.time_); - TF_LITE_MICRO_EXPECT_EQ(1, queue.size()); - TF_LITE_MICRO_EXPECT_EQ(1, queue.front().time_); - TF_LITE_MICRO_EXPECT_EQ(1, queue.back().time_); - - int8_t scores_c[4] = {0, 1, 0, 0}; - queue.push_back({2, scores_c}); - TF_LITE_MICRO_EXPECT_EQ(2, queue.size()); - TF_LITE_MICRO_EXPECT_EQ(1, queue.front().time_); - TF_LITE_MICRO_EXPECT_EQ(2, queue.back().time_); -} - -TF_LITE_MICRO_TEST(PreviousResultsQueuePushPop) { - tflite::MicroErrorReporter micro_error_reporter; - - PreviousResultsQueue queue(µ_error_reporter); - TF_LITE_MICRO_EXPECT_EQ(0, queue.size()); - - for (int i = 0; i < 123; ++i) { - int8_t scores[4] = {0, 0, 0, 1}; - queue.push_back({i, scores}); - TF_LITE_MICRO_EXPECT_EQ(1, queue.size()); - TF_LITE_MICRO_EXPECT_EQ(i, queue.front().time_); - TF_LITE_MICRO_EXPECT_EQ(i, queue.back().time_); - - PreviousResultsQueue::Result pop_result = queue.pop_front(); - TF_LITE_MICRO_EXPECT_EQ(i, pop_result.time_); - TF_LITE_MICRO_EXPECT_EQ(0, queue.size()); - } -} - -TF_LITE_MICRO_TEST(RecognizeCommandsTestBasic) { - tflite::MicroErrorReporter micro_error_reporter; - - RecognizeCommands recognize_commands(µ_error_reporter); - - const int8_t result_data[] = {127, -128, -128, -128}; - const int result_dims[] = {2, 1, 4}; - TfLiteTensor results = tflite::testing::CreateQuantizedTensor( - result_data, tflite::testing::IntArrayFromInts(result_dims), -128.0f, - 127.0f); - - const char* found_command; - uint8_t score; - bool is_new_command; - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, recognize_commands.ProcessLatestResults( - &results, 0, &found_command, &score, &is_new_command)); -} - -TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) { - tflite::MicroErrorReporter micro_error_reporter; - - RecognizeCommands recognize_commands(µ_error_reporter, 1000, 51); - - const int8_t yes_data[] = {-128, -128, 127, -128}; - const int yes_dims[] = {2, 1, 4}; - TfLiteTensor yes_results = tflite::testing::CreateQuantizedTensor( - yes_data, tflite::testing::IntArrayFromInts(yes_dims), -128.0f, 127.0f); - - bool has_found_new_command = false; - const char* new_command; - for (int i = 0; i < 10; ++i) { - const char* found_command; - uint8_t score; - bool is_new_command; - int32_t current_time_ms = 0 + (i * 100); - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, recognize_commands.ProcessLatestResults( - &yes_results, current_time_ms, &found_command, &score, - &is_new_command)); - if (is_new_command) { - TF_LITE_MICRO_EXPECT(!has_found_new_command); - has_found_new_command = true; - new_command = found_command; - } - } - TF_LITE_MICRO_EXPECT(has_found_new_command); - if (has_found_new_command) { - TF_LITE_MICRO_EXPECT_EQ(0, tflite::testing::TestStrcmp("yes", new_command)); - } - - const int8_t no_data[] = {-128, -128, -128, 127}; - const int no_dims[] = {2, 1, 4}; - TfLiteTensor no_results = tflite::testing::CreateQuantizedTensor( - no_data, tflite::testing::IntArrayFromInts(no_dims), -128.0f, 127.0f); - has_found_new_command = false; - new_command = ""; - uint8_t score; - for (int i = 0; i < 10; ++i) { - const char* found_command; - bool is_new_command; - int32_t current_time_ms = 1000 + (i * 100); - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, recognize_commands.ProcessLatestResults( - &no_results, current_time_ms, &found_command, &score, - &is_new_command)); - if (is_new_command) { - TF_LITE_MICRO_EXPECT(!has_found_new_command); - has_found_new_command = true; - new_command = found_command; - } - } - TF_LITE_MICRO_EXPECT(has_found_new_command); - if (has_found_new_command) { - TF_LITE_MICRO_EXPECT_EQ(231, score); - TF_LITE_MICRO_EXPECT_EQ(0, tflite::testing::TestStrcmp("no", new_command)); - } -} - -TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputLength) { - tflite::MicroErrorReporter micro_error_reporter; - - RecognizeCommands recognize_commands(µ_error_reporter, 1000, 51); - - const int8_t bad_data[] = {-128, -128, 127}; - const int bad_dims[] = {2, 1, 3}; - TfLiteTensor bad_results = tflite::testing::CreateQuantizedTensor( - bad_data, tflite::testing::IntArrayFromInts(bad_dims), -128.0f, 127.0f); - - const char* found_command; - uint8_t score; - bool is_new_command; - TF_LITE_MICRO_EXPECT_NE( - kTfLiteOk, recognize_commands.ProcessLatestResults( - &bad_results, 0, &found_command, &score, &is_new_command)); -} - -TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputTimes) { - tflite::MicroErrorReporter micro_error_reporter; - - RecognizeCommands recognize_commands(µ_error_reporter, 1000, 51); - - const int8_t result_data[] = {-128, -128, 127, -128}; - const int result_dims[] = {2, 1, 4}; - TfLiteTensor results = tflite::testing::CreateQuantizedTensor( - result_data, tflite::testing::IntArrayFromInts(result_dims), -128.0f, - 127.0f); - - const char* found_command; - uint8_t score; - bool is_new_command; - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, recognize_commands.ProcessLatestResults( - &results, 100, &found_command, &score, &is_new_command)); - TF_LITE_MICRO_EXPECT_NE( - kTfLiteOk, recognize_commands.ProcessLatestResults( - &results, 0, &found_command, &score, &is_new_command)); -} - -TF_LITE_MICRO_TEST(RecognizeCommandsTestTooFewInputs) { - tflite::MicroErrorReporter micro_error_reporter; - - RecognizeCommands recognize_commands(µ_error_reporter, 1000, 51); - - const int8_t result_data[] = {-128, -128, 127, -128}; - const int result_dims[] = {2, 1, 4}; - TfLiteTensor results = tflite::testing::CreateQuantizedTensor( - result_data, tflite::testing::IntArrayFromInts(result_dims), -128.0f, - 127.0f); - - const char* found_command; - uint8_t score; - bool is_new_command; - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, recognize_commands.ProcessLatestResults( - &results, 100, &found_command, &score, &is_new_command)); - TF_LITE_MICRO_EXPECT_EQ(0, score); - TF_LITE_MICRO_EXPECT_EQ(false, is_new_command); -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/micro_speech/simple_features/no_power_spectrum_data.cpp b/examples/micro_speech/simple_features/no_power_spectrum_data.cpp deleted file mode 100644 index 220f730..0000000 --- a/examples/micro_speech/simple_features/no_power_spectrum_data.cpp +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "simple_features/no_power_spectrum_data.h" - -const uint8_t g_no_power_spectrum_data[g_no_power_spectrum_data_size] = { - 255, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -}; diff --git a/examples/micro_speech/simple_features/no_power_spectrum_data.h b/examples/micro_speech/simple_features/no_power_spectrum_data.h deleted file mode 100644 index f203623..0000000 --- a/examples/micro_speech/simple_features/no_power_spectrum_data.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This data was extracted from the larger feature data held in -// no_features_data.cc and consists of the 29th spectrogram slice of 43 values. -// This is the expected result of running the sample data in -// no_30ms_sample_data.cc through the preprocessing pipeline. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_ - -#include - -constexpr int g_no_power_spectrum_data_size = 43; -extern const uint8_t g_no_power_spectrum_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_ diff --git a/examples/micro_speech/simple_features/simple_features_generator.cpp b/examples/micro_speech/simple_features/simple_features_generator.cpp deleted file mode 100644 index 8b26165..0000000 --- a/examples/micro_speech/simple_features/simple_features_generator.cpp +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Reference implementation of the preprocessing pipeline, with the same -// results as the audio tutorial at -// https://www.tensorflow.org/tutorials/sequences/audio_recognition -// This module takes 30ms of PCM-encoded signed 16-bit audio samples (at 16KHz, -// so 480 values), and extracts a power spectrum of frequencies. There are 43 -// frequency bands in the result, derived from the original 256 output from the -// discrete Fourier transform, and averaged together in groups of 6. -// It's expected that most platforms will have optimized versions of the -// functions used here, for example replacing the DFT with an FFT, so this -// version shouldn't be used where performance is critical. - -#include "simple_features/simple_features_generator.h" - -#include - -#include "simple_features/simple_model_settings.h" - -namespace { - -// Needed because some platforms don't have M_PI defined. -constexpr float kPi = 3.14159265358979323846f; - -// Performs a discrete Fourier transform on the real inputs. This corresponds to -// rdft() in the FFT package at http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html, -// and to kiss_fftr() in KISSFFT at https://github.com/mborgerding/kissfft. -// It takes in an array of float real values, and returns a result of the same -// length with float real and imaginary components interleaved, so -// fourier_output[0] is the first real value, fourier_output[1] is the first -// imaginary, fourier_output[2] is the second real, and so on. -// The calling function should ensure that the array passed in as fourier_output -// is at least time_series_size in length. Most optimized FFT implementations -// require the length to be a power of two as well, but this version doesn't -// enforce that. -void CalculateDiscreteFourierTransform(float* time_series, int time_series_size, - float* fourier_output) { - for (int i = 0; i < time_series_size / 2; ++i) { - float real = 0; - for (int j = 0; j < time_series_size; ++j) { - real += time_series[j] * std::cos(j * i * kPi * 2 / time_series_size); - } - float imaginary = 0; - for (int j = 0; j < time_series_size; ++j) { - imaginary -= - time_series[j] * std::sin(j * i * kPi * 2 / time_series_size); - } - fourier_output[(i * 2) + 0] = real; - fourier_output[(i * 2) + 1] = imaginary; - } -} - -// Produces a simple sine curve that is used to ensure frequencies at the center -// of the current sample window are weighted more heavily than those at the end. -void CalculatePeriodicHann(int window_length, float* window_function) { - for (int i = 0; i < window_length; ++i) { - window_function[i] = 0.5f - 0.5f * std::cos((2 * kPi * i) / window_length); - } -} - -} // namespace - -TfLiteStatus GenerateSimpleFeatures(tflite::ErrorReporter* error_reporter, - const int16_t* input, int input_size, - int output_size, uint8_t* output) { - // Ensure our input and output data arrays are valid. - if (input_size > kMaxAudioSampleSize) { - TF_LITE_REPORT_ERROR(error_reporter, "Input size %d larger than %d", - input_size, kMaxAudioSampleSize); - return kTfLiteError; - } - if (output_size != kFeatureSliceSize) { - TF_LITE_REPORT_ERROR(error_reporter, - "Requested output size %d doesn't match %d", - output_size, kFeatureSliceSize); - return kTfLiteError; - } - - // Pre-calculate the window function we'll be applying to the input data. - // In a real application, we'd calculate this table once in an initialization - // function and store it for repeated reuse. - float window_function[kMaxAudioSampleSize]; - CalculatePeriodicHann(input_size, window_function); - - // Apply the window function to our time series input, and pad it with zeroes - // to the next power of two. - float float_input[kMaxAudioSampleSize]; - for (int i = 0; i < kMaxAudioSampleSize; ++i) { - if (i < input_size) { - float_input[i] = - (input[i] * window_function[i]) / static_cast(1 << 15); - } else { - float_input[i] = 0.0f; - } - } - - // Pull the frequency data from the time series sample. - float fourier_values[kMaxAudioSampleSize]; - CalculateDiscreteFourierTransform(float_input, kMaxAudioSampleSize, - fourier_values); - - // We have the complex numbers giving us information about each frequency - // band, but all we want to know is how strong each frequency is, so calculate - // the squared magnitude by adding together the squares of each component. - float power_spectrum[kMaxAudioSampleSize / 2]; - for (int i = 0; i < (kMaxAudioSampleSize / 2); ++i) { - const float real = fourier_values[(i * 2) + 0]; - const float imaginary = fourier_values[(i * 2) + 1]; - power_spectrum[i] = (real * real) + (imaginary * imaginary); - } - - // Finally, reduce the size of the output by averaging together six adjacent - // frequencies into each slot, producing an array of 43 values. - for (int i = 0; i < kFeatureSliceSize; ++i) { - float total = 0.0f; - for (int j = 0; j < kAverageWindowSize; ++j) { - const int index = (i * kAverageWindowSize) + j; - if (index < (kMaxAudioSampleSize / 2)) { - total += power_spectrum[index]; - } - } - const float average = total / kAverageWindowSize; - // Quantize the result into eight bits, effectively multiplying by two. - // The 127.5 constant here has to match the features_max value defined in - // tensorflow/examples/speech_commands/input_data.py, and this also assumes - // that features_min is zero. If it wasn't, we'd have to subtract it first. - int quantized_average = roundf(average * (255.0f / 127.5f)); - if (quantized_average < 0) { - quantized_average = 0; - } - if (quantized_average > 255) { - quantized_average = 255; - } - output[i] = quantized_average; - } - return kTfLiteOk; -} diff --git a/examples/micro_speech/simple_features/simple_features_generator.h b/examples/micro_speech/simple_features/simple_features_generator.h deleted file mode 100644 index 6d47bc4..0000000 --- a/examples/micro_speech/simple_features/simple_features_generator.h +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -// Converts audio sample data into a more compact form that's appropriate for -// feeding into a neural network. There are reference implementations that use -// both floating point and fixed point available, but because the calculations -// involved can be time-consuming, it's recommended that you use or write -// specialized versions for your platform. -TfLiteStatus GenerateSimpleFeatures(tflite::ErrorReporter* error_reporter, - const int16_t* input, int input_size, - int output_size, uint8_t* output); - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_ diff --git a/examples/micro_speech/simple_features/simple_features_generator_test.cpp b/examples/micro_speech/simple_features/simple_features_generator_test.cpp deleted file mode 100644 index da957c6..0000000 --- a/examples/micro_speech/simple_features/simple_features_generator_test.cpp +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "simple_features/simple_features_generator.h" - -#include "tensorflow/lite/c/common.h" -#include "no_30ms_sample_data.h" -#include "simple_features/no_power_spectrum_data.h" -#include "simple_features/yes_power_spectrum_data.h" -#include "yes_30ms_sample_data.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(TestSimpleFeaturesGenerator) { - tflite::MicroErrorReporter micro_error_reporter; - - uint8_t yes_calculated_data[g_yes_power_spectrum_data_size]; - TfLiteStatus yes_status = GenerateSimpleFeatures( - µ_error_reporter, g_yes_30ms_sample_data, - g_yes_30ms_sample_data_size, g_yes_power_spectrum_data_size, - yes_calculated_data); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, yes_status); - - for (int i = 0; i < g_yes_power_spectrum_data_size; ++i) { - TF_LITE_MICRO_EXPECT_EQ(g_yes_power_spectrum_data[i], - yes_calculated_data[i]); - if (g_yes_power_spectrum_data[i] != yes_calculated_data[i]) { - TF_LITE_REPORT_ERROR( - µ_error_reporter, "Expected value %d but found %d", - g_yes_power_spectrum_data[i], yes_calculated_data[i]); - } - } - - uint8_t no_calculated_data[g_yes_power_spectrum_data_size]; - TfLiteStatus no_status = GenerateSimpleFeatures( - µ_error_reporter, g_no_30ms_sample_data, g_no_30ms_sample_data_size, - g_no_power_spectrum_data_size, no_calculated_data); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, no_status); - - for (int i = 0; i < g_no_power_spectrum_data_size; ++i) { - TF_LITE_MICRO_EXPECT_EQ(g_no_power_spectrum_data[i], no_calculated_data[i]); - if (g_no_power_spectrum_data[i] != no_calculated_data[i]) { - TF_LITE_REPORT_ERROR(µ_error_reporter, - "Expected value %d but found %d", - g_no_power_spectrum_data[i], no_calculated_data[i]); - } - } -} - -TF_LITE_MICRO_TESTS_END diff --git a/examples/micro_speech/simple_features/simple_model_settings.h b/examples/micro_speech/simple_features/simple_model_settings.h deleted file mode 100644 index 9d129c8..0000000 --- a/examples/micro_speech/simple_features/simple_model_settings.h +++ /dev/null @@ -1,43 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_ - -// Keeping these as constant expressions allow us to allocate fixed-sized arrays -// on the stack for our working memory. - -// The size of the input time series data we pass to the FFT to produce the -// frequency information. This has to be a power of two, and since we're dealing -// with 30ms of 16KHz inputs, which means 480 samples, this is the next value. -constexpr int kMaxAudioSampleSize = 512; -constexpr int kAudioSampleFrequency = 16000; - -// All of these values are derived from the values used during model training, -// if you change your model you'll need to update these constants. -constexpr int kAverageWindowSize = 6; -constexpr int kFeatureSliceSize = - ((kMaxAudioSampleSize / 2) + (kAverageWindowSize - 1)) / kAverageWindowSize; -constexpr int kFeatureSliceCount = 49; -constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount); -constexpr int kFeatureSliceStrideMs = 20; -constexpr int kFeatureSliceDurationMs = 30; - -constexpr int kCategoryCount = 4; -constexpr int kSilenceIndex = 0; -constexpr int kUnknownIndex = 1; -extern const char* kCategoryLabels[kCategoryCount]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_MODEL_SETTINGS_H_ diff --git a/examples/micro_speech/simple_features/yes_power_spectrum_data.cpp b/examples/micro_speech/simple_features/yes_power_spectrum_data.cpp deleted file mode 100644 index 302cf2d..0000000 --- a/examples/micro_speech/simple_features/yes_power_spectrum_data.cpp +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "simple_features/yes_power_spectrum_data.h" - -const uint8_t g_yes_power_spectrum_data[g_yes_power_spectrum_data_size] = { - 8, 89, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4, 13, 1, 6, 23, 20, 6, 4, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -}; diff --git a/examples/micro_speech/simple_features/yes_power_spectrum_data.h b/examples/micro_speech/simple_features/yes_power_spectrum_data.h deleted file mode 100644 index 5264e62..0000000 --- a/examples/micro_speech/simple_features/yes_power_spectrum_data.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This data was extracted from the larger feature data held in -// no_features_data.cc and consists of the 26th spectrogram slice of 43 values. -// This is the expected result of running the sample data in -// yes_30ms_sample_data.cc through the preprocessing pipeline. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_ - -#include - -constexpr int g_yes_power_spectrum_data_size = 43; -extern const uint8_t g_yes_power_spectrum_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/bits.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/bits.h deleted file mode 100644 index 04b3ba6..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/bits.h +++ /dev/null @@ -1,102 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_ - -#ifdef __cplusplus -#include - -extern "C" { -#endif - -static inline int CountLeadingZeros32Slow(uint64_t n) { - int zeroes = 28; - if (n >> 16) zeroes -= 16, n >>= 16; - if (n >> 8) zeroes -= 8, n >>= 8; - if (n >> 4) zeroes -= 4, n >>= 4; - return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; -} - -static inline int CountLeadingZeros32(uint32_t n) { -#if defined(_MSC_VER) - unsigned long result = 0; // NOLINT(runtime/int) - if (_BitScanReverse(&result, n)) { - return 31 - result; - } - return 32; -#elif defined(__GNUC__) - - // Handle 0 as a special case because __builtin_clz(0) is undefined. - if (n == 0) { - return 32; - } - return __builtin_clz(n); -#else - return CountLeadingZeros32Slow(n); -#endif -} - -static inline int MostSignificantBit32(uint32_t n) { - return 32 - CountLeadingZeros32(n); -} - -static inline int CountLeadingZeros64Slow(uint64_t n) { - int zeroes = 60; - if (n >> 32) zeroes -= 32, n >>= 32; - if (n >> 16) zeroes -= 16, n >>= 16; - if (n >> 8) zeroes -= 8, n >>= 8; - if (n >> 4) zeroes -= 4, n >>= 4; - return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; -} - -static inline int CountLeadingZeros64(uint64_t n) { -#if defined(_MSC_VER) && defined(_M_X64) - // MSVC does not have __builtin_clzll. Use _BitScanReverse64. - unsigned long result = 0; // NOLINT(runtime/int) - if (_BitScanReverse64(&result, n)) { - return 63 - result; - } - return 64; -#elif defined(_MSC_VER) - // MSVC does not have __builtin_clzll. Compose two calls to _BitScanReverse - unsigned long result = 0; // NOLINT(runtime/int) - if ((n >> 32) && _BitScanReverse(&result, n >> 32)) { - return 31 - result; - } - if (_BitScanReverse(&result, n)) { - return 63 - result; - } - return 64; -#elif defined(__GNUC__) - - // Handle 0 as a special case because __builtin_clzll(0) is undefined. - if (n == 0) { - return 64; - } - return __builtin_clzll(n); -#else - return CountLeadingZeros64Slow(n); -#endif -} - -static inline int MostSignificantBit64(uint64_t n) { - return 64 - CountLeadingZeros64(n); -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft.cpp b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft.cpp deleted file mode 100644 index 22c6ae9..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/fft.h" - -#include - -#define FIXED_POINT 16 -#include "kiss_fft.h" -#include "tools/kiss_fftr.h" - -void FftCompute(struct FftState* state, const int16_t* input, - int input_scale_shift) { - const size_t input_size = state->input_size; - const size_t fft_size = state->fft_size; - - int16_t* fft_input = state->input; - // First, scale the input by the given shift. - size_t i; - for (i = 0; i < input_size; ++i) { - fft_input[i] = static_cast(static_cast(input[i]) - << input_scale_shift); - } - // Zero out whatever else remains in the top part of the input. - for (; i < fft_size; ++i) { - fft_input[i] = 0; - } - - // Apply the FFT. - kiss_fftr(reinterpret_cast(state->scratch), - state->input, - reinterpret_cast(state->output)); -} - -void FftInit(struct FftState* state) { - // All the initialization is done in FftPopulateState() -} - -void FftReset(struct FftState* state) { - memset(state->input, 0, state->fft_size * sizeof(*state->input)); - memset(state->output, 0, (state->fft_size / 2 + 1) * sizeof(*state->output)); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft.h deleted file mode 100644 index aaffa69..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -struct complex_int16_t { - int16_t real; - int16_t imag; -}; - -struct FftState { - int16_t* input; - struct complex_int16_t* output; - size_t fft_size; - size_t input_size; - void* scratch; - size_t scratch_size; -}; - -void FftCompute(struct FftState* state, const int16_t* input, - int input_scale_shift); - -void FftInit(struct FftState* state); - -void FftReset(struct FftState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft_util.cpp b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft_util.cpp deleted file mode 100644 index d516b46..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft_util.cpp +++ /dev/null @@ -1,72 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h" - -#include - -#define FIXED_POINT 16 -#include "kiss_fft.h" -#include "tools/kiss_fftr.h" - -int FftPopulateState(struct FftState* state, size_t input_size) { - state->input_size = input_size; - state->fft_size = 1; - while (state->fft_size < state->input_size) { - state->fft_size <<= 1; - } - - state->input = reinterpret_cast( - malloc(state->fft_size * sizeof(*state->input))); - if (state->input == nullptr) { - fprintf(stderr, "Failed to alloc fft input buffer\n"); - return 0; - } - - state->output = reinterpret_cast( - malloc((state->fft_size / 2 + 1) * sizeof(*state->output) * 2)); - if (state->output == nullptr) { - fprintf(stderr, "Failed to alloc fft output buffer\n"); - return 0; - } - - // Ask kissfft how much memory it wants. - size_t scratch_size = 0; - kiss_fftr_cfg kfft_cfg = kiss_fftr_alloc( - state->fft_size, 0, nullptr, &scratch_size); - if (kfft_cfg != nullptr) { - fprintf(stderr, "Kiss memory sizing failed.\n"); - return 0; - } - state->scratch = malloc(scratch_size); - if (state->scratch == nullptr) { - fprintf(stderr, "Failed to alloc fft scratch buffer\n"); - return 0; - } - state->scratch_size = scratch_size; - // Let kissfft configure the scratch space we just allocated - kfft_cfg = kiss_fftr_alloc(state->fft_size, 0, - state->scratch, &scratch_size); - if (kfft_cfg != state->scratch) { - fprintf(stderr, "Kiss memory preallocation strategy failed.\n"); - return 0; - } - return 1; -} - -void FftFreeStateContents(struct FftState* state) { - free(state->input); - free(state->output); - free(state->scratch); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft_util.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft_util.h deleted file mode 100644 index 6a47130..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_ - -#include "tensorflow/lite/experimental/microfrontend/lib/fft.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// Prepares and FFT for the given input size. -int FftPopulateState(struct FftState* state, size_t input_size); - -// Frees any allocated buffers. -void FftFreeStateContents(struct FftState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank.c deleted file mode 100644 index 80f8738..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank.c +++ /dev/null @@ -1,134 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h" - -#include - -#include "tensorflow/lite/experimental/microfrontend/lib/bits.h" - -void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state, - struct complex_int16_t* fft_output, - int32_t* energy) { - const int end_index = state->end_index; - int i; - energy += state->start_index; - fft_output += state->start_index; - for (i = state->start_index; i < end_index; ++i) { - const int32_t real = fft_output->real; - const int32_t imag = fft_output->imag; - fft_output++; - const uint32_t mag_squared = (real * real) + (imag * imag); - *energy++ = mag_squared; - } -} - -void FilterbankAccumulateChannels(struct FilterbankState* state, - const int32_t* energy) { - uint64_t* work = state->work; - uint64_t weight_accumulator = 0; - uint64_t unweight_accumulator = 0; - - const int16_t* channel_frequency_starts = state->channel_frequency_starts; - const int16_t* channel_weight_starts = state->channel_weight_starts; - const int16_t* channel_widths = state->channel_widths; - - int num_channels_plus_1 = state->num_channels + 1; - int i; - for (i = 0; i < num_channels_plus_1; ++i) { - const int32_t* magnitudes = energy + *channel_frequency_starts++; - const int16_t* weights = state->weights + *channel_weight_starts; - const int16_t* unweights = state->unweights + *channel_weight_starts++; - const int width = *channel_widths++; - int j; - for (j = 0; j < width; ++j) { - weight_accumulator += *weights++ * ((uint64_t)*magnitudes); - unweight_accumulator += *unweights++ * ((uint64_t)*magnitudes); - ++magnitudes; - } - *work++ = weight_accumulator; - weight_accumulator = unweight_accumulator; - unweight_accumulator = 0; - } -} - -static uint16_t Sqrt32(uint32_t num) { - if (num == 0) { - return 0; - } - uint32_t res = 0; - int max_bit_number = 32 - MostSignificantBit32(num); - max_bit_number |= 1; - uint32_t bit = 1U << (31 - max_bit_number); - int iterations = (31 - max_bit_number) / 2 + 1; - while (iterations--) { - if (num >= res + bit) { - num -= res + bit; - res = (res >> 1U) + bit; - } else { - res >>= 1U; - } - bit >>= 2U; - } - // Do rounding - if we have the bits. - if (num > res && res != 0xFFFF) { - ++res; - } - return res; -} - -static uint32_t Sqrt64(uint64_t num) { - // Take a shortcut and just use 32 bit operations if the upper word is all - // clear. This will cause a slight off by one issue for numbers close to 2^32, - // but it probably isn't going to matter (and gives us a big performance win). - if ((num >> 32) == 0) { - return Sqrt32((uint32_t)num); - } - uint64_t res = 0; - int max_bit_number = 64 - MostSignificantBit64(num); - max_bit_number |= 1; - uint64_t bit = 1ULL << (63 - max_bit_number); - int iterations = (63 - max_bit_number) / 2 + 1; - while (iterations--) { - if (num >= res + bit) { - num -= res + bit; - res = (res >> 1U) + bit; - } else { - res >>= 1U; - } - bit >>= 2U; - } - // Do rounding - if we have the bits. - if (num > res && res != 0xFFFFFFFFLL) { - ++res; - } - return res; -} - -uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift) { - const int num_channels = state->num_channels; - const uint64_t* work = state->work + 1; - // Reuse the work buffer since we're fine clobbering it at this point to hold - // the output. - uint32_t* output = (uint32_t*)state->work; - int i; - for (i = 0; i < num_channels; ++i) { - *output++ = Sqrt64(*work++) >> scale_down_shift; - } - return (uint32_t*)state->work; -} - -void FilterbankReset(struct FilterbankState* state) { - memset(state->work, 0, (state->num_channels + 1) * sizeof(*state->work)); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank.h deleted file mode 100644 index 1e6d388..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +++ /dev/null @@ -1,63 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_ - -#include -#include - -#include "tensorflow/lite/experimental/microfrontend/lib/fft.h" - -#define kFilterbankBits 12 - -#ifdef __cplusplus -extern "C" { -#endif - -struct FilterbankState { - int num_channels; - int start_index; - int end_index; - int16_t* channel_frequency_starts; - int16_t* channel_weight_starts; - int16_t* channel_widths; - int16_t* weights; - int16_t* unweights; - uint64_t* work; -}; - -// Converts the relevant complex values of an FFT output into energy (the -// square magnitude). -void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state, - struct complex_int16_t* fft_output, - int32_t* energy); - -// Computes the mel-scale filterbank on the given energy array. Output is cached -// internally - to fetch it, you need to call FilterbankSqrt. -void FilterbankAccumulateChannels(struct FilterbankState* state, - const int32_t* energy); - -// Applies an integer square root to the 64 bit intermediate values of the -// filterbank, and returns a pointer to them. Memory will be invalidated the -// next time FilterbankAccumulateChannels is called. -uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift); - -void FilterbankReset(struct FilterbankState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c deleted file mode 100644 index f18ebf5..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c +++ /dev/null @@ -1,220 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h" - -#include -#include -#include - -#define kFilterbankIndexAlignment 4 -#define kFilterbankChannelBlockSize 4 - -void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config) { - config->num_channels = 32; - config->lower_band_limit = 125.0f; - config->upper_band_limit = 7500.0f; - config->output_scale_shift = 7; -} - -static float FreqToMel(float freq) { return 1127.0 * log1p(freq / 700.0); } - -static void CalculateCenterFrequencies(const int num_channels, - const float lower_frequency_limit, - const float upper_frequency_limit, - float* center_frequencies) { - assert(lower_frequency_limit >= 0.0f); - assert(upper_frequency_limit > lower_frequency_limit); - - const float mel_low = FreqToMel(lower_frequency_limit); - const float mel_hi = FreqToMel(upper_frequency_limit); - const float mel_span = mel_hi - mel_low; - const float mel_spacing = mel_span / ((float)num_channels); - int i; - for (i = 0; i < num_channels; ++i) { - center_frequencies[i] = mel_low + (mel_spacing * (i + 1)); - } -} - -static void QuantizeFilterbankWeights(const float float_weight, int16_t* weight, - int16_t* unweight) { - *weight = floor(float_weight * (1 << kFilterbankBits) + 0.5); - *unweight = floor((1.0 - float_weight) * (1 << kFilterbankBits) + 0.5); -} - -int FilterbankPopulateState(const struct FilterbankConfig* config, - struct FilterbankState* state, int sample_rate, - int spectrum_size) { - state->num_channels = config->num_channels; - const int num_channels_plus_1 = config->num_channels + 1; - - // How should we align things to index counts given the byte alignment? - const int index_alignment = - (kFilterbankIndexAlignment < sizeof(int16_t) - ? 1 - : kFilterbankIndexAlignment / sizeof(int16_t)); - - state->channel_frequency_starts = - malloc(num_channels_plus_1 * sizeof(*state->channel_frequency_starts)); - state->channel_weight_starts = - malloc(num_channels_plus_1 * sizeof(*state->channel_weight_starts)); - state->channel_widths = - malloc(num_channels_plus_1 * sizeof(*state->channel_widths)); - state->work = malloc(num_channels_plus_1 * sizeof(*state->work)); - - float* center_mel_freqs = - malloc(num_channels_plus_1 * sizeof(*center_mel_freqs)); - int16_t* actual_channel_starts = - malloc(num_channels_plus_1 * sizeof(*actual_channel_starts)); - int16_t* actual_channel_widths = - malloc(num_channels_plus_1 * sizeof(*actual_channel_widths)); - - if (state->channel_frequency_starts == NULL || - state->channel_weight_starts == NULL || state->channel_widths == NULL || - center_mel_freqs == NULL || actual_channel_starts == NULL || - actual_channel_widths == NULL) { - free(center_mel_freqs); - free(actual_channel_starts); - free(actual_channel_widths); - fprintf(stderr, "Failed to allocate channel buffers\n"); - return 0; - } - - CalculateCenterFrequencies(num_channels_plus_1, config->lower_band_limit, - config->upper_band_limit, center_mel_freqs); - - // Always exclude DC. - const float hz_per_sbin = 0.5 * sample_rate / ((float)spectrum_size - 1); - state->start_index = 1.5 + config->lower_band_limit / hz_per_sbin; - state->end_index = 0; // Initialized to zero here, but actually set below. - - // For each channel, we need to figure out what frequencies belong to it, and - // how much padding we need to add so that we can efficiently multiply the - // weights and unweights for accumulation. To simplify the multiplication - // logic, all channels will have some multiplication to do (even if there are - // no frequencies that accumulate to that channel) - they will be directed to - // a set of zero weights. - int chan_freq_index_start = state->start_index; - int weight_index_start = 0; - int needs_zeros = 0; - - int chan; - for (chan = 0; chan < num_channels_plus_1; ++chan) { - // Keep jumping frequencies until we overshoot the bound on this channel. - int freq_index = chan_freq_index_start; - while (FreqToMel((freq_index)*hz_per_sbin) <= center_mel_freqs[chan]) { - ++freq_index; - } - - const int width = freq_index - chan_freq_index_start; - actual_channel_starts[chan] = chan_freq_index_start; - actual_channel_widths[chan] = width; - - if (width == 0) { - // This channel doesn't actually get anything from the frequencies, it's - // always zero. We need then to insert some 'zero' weights into the - // output, and just redirect this channel to do a single multiplication at - // this point. For simplicity, the zeros are placed at the beginning of - // the weights arrays, so we have to go and update all the other - // weight_starts to reflect this shift (but only once). - state->channel_frequency_starts[chan] = 0; - state->channel_weight_starts[chan] = 0; - state->channel_widths[chan] = kFilterbankChannelBlockSize; - if (!needs_zeros) { - needs_zeros = 1; - int j; - for (j = 0; j < chan; ++j) { - state->channel_weight_starts[j] += kFilterbankChannelBlockSize; - } - weight_index_start += kFilterbankChannelBlockSize; - } - } else { - // How far back do we need to go to ensure that we have the proper - // alignment? - const int aligned_start = - (chan_freq_index_start / index_alignment) * index_alignment; - const int aligned_width = (chan_freq_index_start - aligned_start + width); - const int padded_width = - (((aligned_width - 1) / kFilterbankChannelBlockSize) + 1) * - kFilterbankChannelBlockSize; - - state->channel_frequency_starts[chan] = aligned_start; - state->channel_weight_starts[chan] = weight_index_start; - state->channel_widths[chan] = padded_width; - weight_index_start += padded_width; - } - chan_freq_index_start = freq_index; - } - - // Allocate the two arrays to store the weights - weight_index_start contains - // the index of what would be the next set of weights that we would need to - // add, so that's how many weights we need to allocate. - state->weights = calloc(weight_index_start, sizeof(*state->weights)); - state->unweights = calloc(weight_index_start, sizeof(*state->unweights)); - - // If the alloc failed, we also need to nuke the arrays. - if (state->weights == NULL || state->unweights == NULL) { - free(center_mel_freqs); - free(actual_channel_starts); - free(actual_channel_widths); - fprintf(stderr, "Failed to allocate weights or unweights\n"); - return 0; - } - - // Next pass, compute all the weights. Since everything has been memset to - // zero, we only need to fill in the weights that correspond to some frequency - // for a channel. - const float mel_low = FreqToMel(config->lower_band_limit); - for (chan = 0; chan < num_channels_plus_1; ++chan) { - int frequency = actual_channel_starts[chan]; - const int num_frequencies = actual_channel_widths[chan]; - const int frequency_offset = - frequency - state->channel_frequency_starts[chan]; - const int weight_start = state->channel_weight_starts[chan]; - const float denom_val = (chan == 0) ? mel_low : center_mel_freqs[chan - 1]; - - int j; - for (j = 0; j < num_frequencies; ++j, ++frequency) { - const float weight = - (center_mel_freqs[chan] - FreqToMel(frequency * hz_per_sbin)) / - (center_mel_freqs[chan] - denom_val); - - // Make the float into an integer for the weights (and unweights). - const int weight_index = weight_start + frequency_offset + j; - QuantizeFilterbankWeights(weight, state->weights + weight_index, - state->unweights + weight_index); - } - if (frequency > state->end_index) { - state->end_index = frequency; - } - } - - free(center_mel_freqs); - free(actual_channel_starts); - free(actual_channel_widths); - if (state->end_index >= spectrum_size) { - fprintf(stderr, "Filterbank end_index is above spectrum size.\n"); - return 0; - } - return 1; -} - -void FilterbankFreeStateContents(struct FilterbankState* state) { - free(state->channel_frequency_starts); - free(state->channel_weight_starts); - free(state->channel_widths); - free(state->weights); - free(state->unweights); - free(state->work); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h deleted file mode 100644 index 781d102..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_ - -#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct FilterbankConfig { - // number of frequency channel buckets for filterbank - int num_channels; - // maximum frequency to include - float upper_band_limit; - // minimum frequency to include - float lower_band_limit; - // unused - int output_scale_shift; -}; - -// Fills the frontendConfig with "sane" defaults. -void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config); - -// Allocates any buffers. -int FilterbankPopulateState(const struct FilterbankConfig* config, - struct FilterbankState* state, int sample_rate, - int spectrum_size); - -// Frees any allocated buffers. -void FilterbankFreeStateContents(struct FilterbankState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend.c deleted file mode 100644 index 9de2a87..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend.c +++ /dev/null @@ -1,72 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h" - -#include "tensorflow/lite/experimental/microfrontend/lib/bits.h" - -struct FrontendOutput FrontendProcessSamples(struct FrontendState* state, - const int16_t* samples, - size_t num_samples, - size_t* num_samples_read) { - struct FrontendOutput output; - output.values = NULL; - output.size = 0; - - // Try to apply the window - if it fails, return and wait for more data. - if (!WindowProcessSamples(&state->window, samples, num_samples, - num_samples_read)) { - return output; - } - - // Apply the FFT to the window's output (and scale it so that the fixed point - // FFT can have as much resolution as possible). - int input_shift = - 15 - MostSignificantBit32(state->window.max_abs_output_value); - FftCompute(&state->fft, state->window.output, input_shift); - - // We can re-ruse the fft's output buffer to hold the energy. - int32_t* energy = (int32_t*)state->fft.output; - - FilterbankConvertFftComplexToEnergy(&state->filterbank, state->fft.output, - energy); - - FilterbankAccumulateChannels(&state->filterbank, energy); - uint32_t* scaled_filterbank = FilterbankSqrt(&state->filterbank, input_shift); - - // Apply noise reduction. - NoiseReductionApply(&state->noise_reduction, scaled_filterbank); - - if (state->pcan_gain_control.enable_pcan) { - PcanGainControlApply(&state->pcan_gain_control, scaled_filterbank); - } - - // Apply the log and scale. - int correction_bits = - MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2); - uint16_t* logged_filterbank = - LogScaleApply(&state->log_scale, scaled_filterbank, - state->filterbank.num_channels, correction_bits); - - output.size = state->filterbank.num_channels; - output.values = logged_filterbank; - return output; -} - -void FrontendReset(struct FrontendState* state) { - WindowReset(&state->window); - FftReset(&state->fft); - FilterbankReset(&state->filterbank); - NoiseReductionReset(&state->noise_reduction); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend.h deleted file mode 100644 index 883df5f..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_ - -#include -#include - -#include "tensorflow/lite/experimental/microfrontend/lib/fft.h" -#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h" -#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h" -#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h" -#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h" -#include "tensorflow/lite/experimental/microfrontend/lib/window.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct FrontendState { - struct WindowState window; - struct FftState fft; - struct FilterbankState filterbank; - struct NoiseReductionState noise_reduction; - struct PcanGainControlState pcan_gain_control; - struct LogScaleState log_scale; -}; - -struct FrontendOutput { - const uint16_t* values; - size_t size; -}; - -// Main entry point to processing frontend samples. Updates num_samples_read to -// contain the number of samples that have been consumed from the input array. -// Returns a struct containing the generated output. If not enough samples were -// added to generate a feature vector, the returned size will be 0 and the -// values pointer will be NULL. Note that the output pointer will be invalidated -// as soon as FrontendProcessSamples is called again, so copy the contents -// elsewhere if you need to use them later. -struct FrontendOutput FrontendProcessSamples(struct FrontendState* state, - const int16_t* samples, - size_t num_samples, - size_t* num_samples_read); - -void FrontendReset(struct FrontendState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c deleted file mode 100644 index 27224f6..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c +++ /dev/null @@ -1,85 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h" - -#include -#include - -#include "tensorflow/lite/experimental/microfrontend/lib/bits.h" - -void FrontendFillConfigWithDefaults(struct FrontendConfig* config) { - WindowFillConfigWithDefaults(&config->window); - FilterbankFillConfigWithDefaults(&config->filterbank); - NoiseReductionFillConfigWithDefaults(&config->noise_reduction); - PcanGainControlFillConfigWithDefaults(&config->pcan_gain_control); - LogScaleFillConfigWithDefaults(&config->log_scale); -} - -int FrontendPopulateState(const struct FrontendConfig* config, - struct FrontendState* state, int sample_rate) { - memset(state, 0, sizeof(*state)); - - if (!WindowPopulateState(&config->window, &state->window, sample_rate)) { - fprintf(stderr, "Failed to populate window state\n"); - return 0; - } - - if (!FftPopulateState(&state->fft, state->window.size)) { - fprintf(stderr, "Failed to populate fft state\n"); - return 0; - } - FftInit(&state->fft); - - if (!FilterbankPopulateState(&config->filterbank, &state->filterbank, - sample_rate, state->fft.fft_size / 2 + 1)) { - fprintf(stderr, "Failed to populate filterbank state\n"); - return 0; - } - - if (!NoiseReductionPopulateState(&config->noise_reduction, - &state->noise_reduction, - state->filterbank.num_channels)) { - fprintf(stderr, "Failed to populate noise reduction state\n"); - return 0; - } - - int input_correction_bits = - MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2); - if (!PcanGainControlPopulateState( - &config->pcan_gain_control, &state->pcan_gain_control, - state->noise_reduction.estimate, state->filterbank.num_channels, - state->noise_reduction.smoothing_bits, input_correction_bits)) { - fprintf(stderr, "Failed to populate pcan gain control state\n"); - return 0; - } - - if (!LogScalePopulateState(&config->log_scale, &state->log_scale)) { - fprintf(stderr, "Failed to populate log scale state\n"); - return 0; - } - - FrontendReset(state); - - // All good, return a true value. - return 1; -} - -void FrontendFreeStateContents(struct FrontendState* state) { - WindowFreeStateContents(&state->window); - FftFreeStateContents(&state->fft); - FilterbankFreeStateContents(&state->filterbank); - NoiseReductionFreeStateContents(&state->noise_reduction); - PcanGainControlFreeStateContents(&state->pcan_gain_control); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h deleted file mode 100644 index 895ce6c..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_ - -#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h" -#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h" -#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h" -#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h" -#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h" -#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h" -#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct FrontendConfig { - struct WindowConfig window; - struct FilterbankConfig filterbank; - struct NoiseReductionConfig noise_reduction; - struct PcanGainControlConfig pcan_gain_control; - struct LogScaleConfig log_scale; -}; - -// Fills the frontendConfig with "sane" defaults. -void FrontendFillConfigWithDefaults(struct FrontendConfig* config); - -// Allocates any buffers. -int FrontendPopulateState(const struct FrontendConfig* config, - struct FrontendState* state, int sample_rate); - -// Frees any allocated buffers. -void FrontendFreeStateContents(struct FrontendState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_lut.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_lut.c deleted file mode 100644 index f59618e..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_lut.c +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h" -const uint16_t kLogLut[] -#ifndef _MSC_VER - __attribute__((aligned(4))) -#endif // _MSV_VER - = {0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163, - 2329, 2490, 2646, 2797, 2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848, - 3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633, 4714, 4791, 4864, 4934, - 5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507, - 5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633, - 5626, 5615, 5602, 5586, 5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370, - 5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000, 4944, 4885, 4825, 4762, - 4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848, - 3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659, - 2549, 2437, 2323, 2207, 2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224, - 1094, 963, 830, 695, 559, 421, 282, 142, 0, 0}; diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_lut.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_lut.h deleted file mode 100644 index b2448a3..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -// Number of segments in the log lookup table. The table will be kLogSegments+1 -// in length (with some padding). -#define kLogSegments 128 -#define kLogSegmentsLog2 7 - -// Scale used by lookup table. -#define kLogScale 65536 -#define kLogScaleLog2 16 -#define kLogCoeff 45426 - -extern const uint16_t kLogLut[]; - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale.c deleted file mode 100644 index c27a50a..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale.c +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h" - -#include "tensorflow/lite/experimental/microfrontend/lib/bits.h" -#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h" - -#define kuint16max 0x0000FFFF - -// The following functions implement integer logarithms of various sizes. The -// approximation is calculated according to method described in -// www.inti.gob.ar/electronicaeinformatica/instrumentacion/utic/ -// publicaciones/SPL2007/Log10-spl07.pdf -// It first calculates log2 of the input and then converts it to natural -// logarithm. - -static uint32_t Log2FractionPart(const uint32_t x, const uint32_t log2x) { - // Part 1 - int32_t frac = x - (1LL << log2x); - if (log2x < kLogScaleLog2) { - frac <<= kLogScaleLog2 - log2x; - } else { - frac >>= log2x - kLogScaleLog2; - } - // Part 2 - const uint32_t base_seg = frac >> (kLogScaleLog2 - kLogSegmentsLog2); - const uint32_t seg_unit = - (((uint32_t)1) << kLogScaleLog2) >> kLogSegmentsLog2; - - const int32_t c0 = kLogLut[base_seg]; - const int32_t c1 = kLogLut[base_seg + 1]; - const int32_t seg_base = seg_unit * base_seg; - const int32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> kLogScaleLog2; - return frac + c0 + rel_pos; -} - -static uint32_t Log(const uint32_t x, const uint32_t scale_shift) { - const uint32_t integer = MostSignificantBit32(x) - 1; - const uint32_t fraction = Log2FractionPart(x, integer); - const uint32_t log2 = (integer << kLogScaleLog2) + fraction; - const uint32_t round = kLogScale / 2; - const uint32_t loge = (((uint64_t)kLogCoeff) * log2 + round) >> kLogScaleLog2; - // Finally scale to our output scale - const uint32_t loge_scaled = ((loge << scale_shift) + round) >> kLogScaleLog2; - return loge_scaled; -} - -uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal, - int signal_size, int correction_bits) { - const int scale_shift = state->scale_shift; - uint16_t* output = (uint16_t*)signal; - uint16_t* ret = output; - int i; - for (i = 0; i < signal_size; ++i) { - uint32_t value = *signal++; - if (state->enable_log) { - if (correction_bits < 0) { - value >>= -correction_bits; - } else { - value <<= correction_bits; - } - if (value > 1) { - value = Log(value, scale_shift); - } else { - value = 0; - } - } - *output++ = (value < kuint16max) ? value : kuint16max; - } - return ret; -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale.h deleted file mode 100644 index a383f32..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -struct LogScaleState { - int enable_log; - int scale_shift; -}; - -// Applies a fixed point logarithm to the signal and converts it to 16 bit. Note -// that the signal array will be modified. -uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal, - int signal_size, int correction_bits); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c deleted file mode 100644 index 0e3dd1d..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h" - -void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config) { - config->enable_log = 1; - config->scale_shift = 6; -} - -int LogScalePopulateState(const struct LogScaleConfig* config, - struct LogScaleState* state) { - state->enable_log = config->enable_log; - state->scale_shift = config->scale_shift; - return 1; -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h deleted file mode 100644 index 11f7d9e..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_ - -#include -#include - -#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct LogScaleConfig { - // set to false (0) to disable this module - int enable_log; - // scale results by 2^(scale_shift) - int scale_shift; -}; - -// Populates the LogScaleConfig with "sane" default values. -void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config); - -// Allocates any buffers. -int LogScalePopulateState(const struct LogScaleConfig* config, - struct LogScaleState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c deleted file mode 100644 index 16b30e6..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h" - -#include - -void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal) { - int i; - for (i = 0; i < state->num_channels; ++i) { - const uint32_t smoothing = - ((i & 1) == 0) ? state->even_smoothing : state->odd_smoothing; - const uint32_t one_minus_smoothing = (1 << kNoiseReductionBits) - smoothing; - - // Update the estimate of the noise. - const uint32_t signal_scaled_up = signal[i] << state->smoothing_bits; - uint32_t estimate = - (((uint64_t)signal_scaled_up * smoothing) + - ((uint64_t)state->estimate[i] * one_minus_smoothing)) >> - kNoiseReductionBits; - state->estimate[i] = estimate; - - // Make sure that we can't get a negative value for the signal - estimate. - if (estimate > signal_scaled_up) { - estimate = signal_scaled_up; - } - - const uint32_t floor = - ((uint64_t)signal[i] * state->min_signal_remaining) >> - kNoiseReductionBits; - const uint32_t subtracted = - (signal_scaled_up - estimate) >> state->smoothing_bits; - const uint32_t output = subtracted > floor ? subtracted : floor; - signal[i] = output; - } -} - -void NoiseReductionReset(struct NoiseReductionState* state) { - memset(state->estimate, 0, sizeof(*state->estimate) * state->num_channels); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h deleted file mode 100644 index 46d3f52..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_ - -#define kNoiseReductionBits 14 - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -struct NoiseReductionState { - int smoothing_bits; - uint16_t even_smoothing; - uint16_t odd_smoothing; - uint16_t min_signal_remaining; - int num_channels; - uint32_t* estimate; -}; - -// Removes stationary noise from each channel of the signal using a low pass -// filter. -void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal); - -void NoiseReductionReset(struct NoiseReductionState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c deleted file mode 100644 index a6c9234..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h" - -#include - -void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config) { - config->smoothing_bits = 10; - config->even_smoothing = 0.025; - config->odd_smoothing = 0.06; - config->min_signal_remaining = 0.05; -} - -int NoiseReductionPopulateState(const struct NoiseReductionConfig* config, - struct NoiseReductionState* state, - int num_channels) { - state->smoothing_bits = config->smoothing_bits; - state->odd_smoothing = config->odd_smoothing * (1 << kNoiseReductionBits); - state->even_smoothing = config->even_smoothing * (1 << kNoiseReductionBits); - state->min_signal_remaining = - config->min_signal_remaining * (1 << kNoiseReductionBits); - state->num_channels = num_channels; - state->estimate = calloc(state->num_channels, sizeof(*state->estimate)); - if (state->estimate == NULL) { - fprintf(stderr, "Failed to alloc estimate buffer\n"); - return 0; - } - return 1; -} - -void NoiseReductionFreeStateContents(struct NoiseReductionState* state) { - free(state->estimate); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h deleted file mode 100644 index fa55539..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_ - -#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct NoiseReductionConfig { - // scale the signal up by 2^(smoothing_bits) before reduction - int smoothing_bits; - // smoothing coefficient for even-numbered channels - float even_smoothing; - // smoothing coefficient for odd-numbered channels - float odd_smoothing; - // fraction of signal to preserve (1.0 disables this module) - float min_signal_remaining; -}; - -// Populates the NoiseReductionConfig with "sane" default values. -void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config); - -// Allocates any buffers. -int NoiseReductionPopulateState(const struct NoiseReductionConfig* config, - struct NoiseReductionState* state, - int num_channels); - -// Frees any allocated buffers. -void NoiseReductionFreeStateContents(struct NoiseReductionState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c deleted file mode 100644 index 22d5876..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c +++ /dev/null @@ -1,56 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h" - -#include "tensorflow/lite/experimental/microfrontend/lib/bits.h" - -int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) { - if (x <= 2) { - return lut[x]; - } - - const int16_t interval = MostSignificantBit32(x); - lut += 4 * interval - 6; - - const int16_t frac = - ((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) & - 0x3FF; - - int32_t result = ((int32_t)lut[2] * frac) >> 5; - result += (int32_t)((uint32_t)lut[1] << 5); - result *= frac; - result = (result + (1 << 14)) >> 15; - result += lut[0]; - return (int16_t)result; -} - -uint32_t PcanShrink(const uint32_t x) { - if (x < (2 << kPcanSnrBits)) { - return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits); - } else { - return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits); - } -} - -void PcanGainControlApply(struct PcanGainControlState* state, - uint32_t* signal) { - int i; - for (i = 0; i < state->num_channels; ++i) { - const uint32_t gain = - WideDynamicFunction(state->noise_estimate[i], state->gain_lut); - const uint32_t snr = ((uint64_t)signal[i] * gain) >> state->snr_shift; - signal[i] = PcanShrink(snr); - } -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h deleted file mode 100644 index 3f6222b..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_ - -#include -#include - -#define kPcanSnrBits 12 -#define kPcanOutputBits 6 - -#ifdef __cplusplus -extern "C" { -#endif - -// Details at https://research.google/pubs/pub45911.pdf -struct PcanGainControlState { - int enable_pcan; - uint32_t* noise_estimate; - int num_channels; - int16_t* gain_lut; - int32_t snr_shift; -}; - -int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut); - -uint32_t PcanShrink(const uint32_t x); - -void PcanGainControlApply(struct PcanGainControlState* state, uint32_t* signal); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c deleted file mode 100644 index e850d43..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c +++ /dev/null @@ -1,92 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h" - -#include -#include - -#define kint16max 0x00007FFF - -void PcanGainControlFillConfigWithDefaults( - struct PcanGainControlConfig* config) { - config->enable_pcan = 0; - config->strength = 0.95; - config->offset = 80.0; - config->gain_bits = 21; -} - -int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config, - int32_t input_bits, uint32_t x) { - const float x_as_float = ((float)x) / ((uint32_t)1 << input_bits); - const float gain_as_float = - ((uint32_t)1 << config->gain_bits) * - powf(x_as_float + config->offset, -config->strength); - - if (gain_as_float > kint16max) { - return kint16max; - } - return (int16_t)(gain_as_float + 0.5f); -} - -int PcanGainControlPopulateState(const struct PcanGainControlConfig* config, - struct PcanGainControlState* state, - uint32_t* noise_estimate, - const int num_channels, - const uint16_t smoothing_bits, - const int32_t input_correction_bits) { - state->enable_pcan = config->enable_pcan; - if (!state->enable_pcan) { - return 1; - } - state->noise_estimate = noise_estimate; - state->num_channels = num_channels; - state->gain_lut = malloc(kWideDynamicFunctionLUTSize * sizeof(int16_t)); - if (state->gain_lut == NULL) { - fprintf(stderr, "Failed to allocate gain LUT\n"); - return 0; - } - state->snr_shift = config->gain_bits - input_correction_bits - kPcanSnrBits; - - const int32_t input_bits = smoothing_bits - input_correction_bits; - state->gain_lut[0] = PcanGainLookupFunction(config, input_bits, 0); - state->gain_lut[1] = PcanGainLookupFunction(config, input_bits, 1); - state->gain_lut -= 6; - int interval; - for (interval = 2; interval <= kWideDynamicFunctionBits; ++interval) { - const uint32_t x0 = (uint32_t)1 << (interval - 1); - const uint32_t x1 = x0 + (x0 >> 1); - const uint32_t x2 = - (interval == kWideDynamicFunctionBits) ? x0 + (x0 - 1) : 2 * x0; - - const int16_t y0 = PcanGainLookupFunction(config, input_bits, x0); - const int16_t y1 = PcanGainLookupFunction(config, input_bits, x1); - const int16_t y2 = PcanGainLookupFunction(config, input_bits, x2); - - const int32_t diff1 = (int32_t)y1 - y0; - const int32_t diff2 = (int32_t)y2 - y0; - const int32_t a1 = 4 * diff1 - diff2; - const int32_t a2 = diff2 - a1; - - state->gain_lut[4 * interval] = y0; - state->gain_lut[4 * interval + 1] = (int16_t)a1; - state->gain_lut[4 * interval + 2] = (int16_t)a2; - } - state->gain_lut += 6; - return 1; -} - -void PcanGainControlFreeStateContents(struct PcanGainControlState* state) { - free(state->gain_lut); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h deleted file mode 100644 index d4bfaa2..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_ - -#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h" - -#define kWideDynamicFunctionBits 32 -#define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3) - -#ifdef __cplusplus -extern "C" { -#endif - -struct PcanGainControlConfig { - // set to false (0) to disable this module - int enable_pcan; - // gain normalization exponent (0.0 disables, 1.0 full strength) - float strength; - // positive value added in the normalization denominator - float offset; - // number of fractional bits in the gain - int gain_bits; -}; - -void PcanGainControlFillConfigWithDefaults( - struct PcanGainControlConfig* config); - -int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config, - int32_t input_bits, uint32_t x); - -int PcanGainControlPopulateState(const struct PcanGainControlConfig* config, - struct PcanGainControlState* state, - uint32_t* noise_estimate, - const int num_channels, - const uint16_t smoothing_bits, - const int32_t input_correction_bits); - -void PcanGainControlFreeStateContents(struct PcanGainControlState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window.c deleted file mode 100644 index 10da676..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window.c +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/window.h" - -#include - -int WindowProcessSamples(struct WindowState* state, const int16_t* samples, - size_t num_samples, size_t* num_samples_read) { - const int size = state->size; - - // Copy samples from the samples buffer over to our local input. - size_t max_samples_to_copy = state->size - state->input_used; - if (max_samples_to_copy > num_samples) { - max_samples_to_copy = num_samples; - } - memcpy(state->input + state->input_used, samples, - max_samples_to_copy * sizeof(*samples)); - *num_samples_read = max_samples_to_copy; - state->input_used += max_samples_to_copy; - - if (state->input_used < state->size) { - // We don't have enough samples to compute a window. - return 0; - } - - // Apply the window to the input. - const int16_t* coefficients = state->coefficients; - const int16_t* input = state->input; - int16_t* output = state->output; - int i; - int16_t max_abs_output_value = 0; - for (i = 0; i < size; ++i) { - int16_t new_value = - (((int32_t)*input++) * *coefficients++) >> kFrontendWindowBits; - *output++ = new_value; - if (new_value < 0) { - new_value = -new_value; - } - if (new_value > max_abs_output_value) { - max_abs_output_value = new_value; - } - } - // Shuffle the input down by the step size, and update how much we have used. - memmove(state->input, state->input + state->step, - sizeof(*state->input) * (state->size - state->step)); - state->input_used -= state->step; - state->max_abs_output_value = max_abs_output_value; - - // Indicate that the output buffer is valid for the next stage. - return 1; -} - -void WindowReset(struct WindowState* state) { - memset(state->input, 0, state->size * sizeof(*state->input)); - memset(state->output, 0, state->size * sizeof(*state->output)); - state->input_used = 0; - state->max_abs_output_value = 0; -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window.h deleted file mode 100644 index bad8151..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window.h +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_ - -#include -#include - -#define kFrontendWindowBits 12 - -#ifdef __cplusplus -extern "C" { -#endif - -struct WindowState { - size_t size; - int16_t* coefficients; - size_t step; - - int16_t* input; - size_t input_used; - int16_t* output; - int16_t max_abs_output_value; -}; - -// Applies a window to the samples coming in, stepping forward at the given -// rate. -int WindowProcessSamples(struct WindowState* state, const int16_t* samples, - size_t num_samples, size_t* num_samples_read); - -void WindowReset(struct WindowState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_ diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window_util.c b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window_util.c deleted file mode 100644 index eee6e7b..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window_util.c +++ /dev/null @@ -1,73 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h" - -#include -#include -#include -#include - -// Some platforms don't have M_PI -#ifndef M_PI -#define M_PI 3.14159265358979323846 -#endif - -void WindowFillConfigWithDefaults(struct WindowConfig* config) { - config->size_ms = 25; - config->step_size_ms = 10; -} - -int WindowPopulateState(const struct WindowConfig* config, - struct WindowState* state, int sample_rate) { - state->size = config->size_ms * sample_rate / 1000; - state->step = config->step_size_ms * sample_rate / 1000; - - state->coefficients = malloc(state->size * sizeof(*state->coefficients)); - if (state->coefficients == NULL) { - fprintf(stderr, "Failed to allocate window coefficients\n"); - return 0; - } - - // Populate the window values. - const float arg = M_PI * 2.0 / ((float)state->size); - int i; - for (i = 0; i < state->size; ++i) { - float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5))); - // Scale it to fixed point and round it. - state->coefficients[i] = - floor(float_value * (1 << kFrontendWindowBits) + 0.5); - } - - state->input_used = 0; - state->input = malloc(state->size * sizeof(*state->input)); - if (state->input == NULL) { - fprintf(stderr, "Failed to allocate window input\n"); - return 0; - } - - state->output = malloc(state->size * sizeof(*state->output)); - if (state->output == NULL) { - fprintf(stderr, "Failed to allocate window output\n"); - return 0; - } - - return 1; -} - -void WindowFreeStateContents(struct WindowState* state) { - free(state->coefficients); - free(state->input); - free(state->output); -} diff --git a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window_util.h b/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window_util.h deleted file mode 100644 index 68e4de9..0000000 --- a/examples/micro_speech/tensorflow/lite/experimental/microfrontend/lib/window_util.h +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_ -#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_ - -#include "tensorflow/lite/experimental/microfrontend/lib/window.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct WindowConfig { - // length of window frame in milliseconds - size_t size_ms; - // length of step for next frame in milliseconds - size_t step_size_ms; -}; - -// Populates the WindowConfig with "sane" default values. -void WindowFillConfigWithDefaults(struct WindowConfig* config); - -// Allocates any buffers. -int WindowPopulateState(const struct WindowConfig* config, - struct WindowState* state, int sample_rate); - -// Frees any allocated buffers. -void WindowFreeStateContents(struct WindowState* state); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_ diff --git a/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING b/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING deleted file mode 100644 index 2fc6685..0000000 --- a/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/COPYING +++ /dev/null @@ -1,11 +0,0 @@ -Copyright (c) 2003-2010 Mark Borgerding - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/_kiss_fft_guts.h b/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/_kiss_fft_guts.h deleted file mode 100644 index ba66144..0000000 --- a/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/_kiss_fft_guts.h +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright (c) 2003-2010, Mark Borgerding - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -/* kiss_fft.h - defines kiss_fft_scalar as either short or a float type - and defines - typedef struct { kiss_fft_scalar r; kiss_fft_scalar i; }kiss_fft_cpx; */ -#include "kiss_fft.h" -#include - -#define MAXFACTORS 32 -/* e.g. an fft of length 128 has 4 factors - as far as kissfft is concerned - 4*4*4*2 - */ - -struct kiss_fft_state{ - int nfft; - int inverse; - int factors[2*MAXFACTORS]; - kiss_fft_cpx twiddles[1]; -}; - -/* - Explanation of macros dealing with complex math: - - C_MUL(m,a,b) : m = a*b - C_FIXDIV( c , div ) : if a fixed point impl., c /= div. noop otherwise - C_SUB( res, a,b) : res = a - b - C_SUBFROM( res , a) : res -= a - C_ADDTO( res , a) : res += a - * */ -#ifdef FIXED_POINT -#if (FIXED_POINT==32) -# define FRACBITS 31 -# define SAMPPROD int64_t -#define SAMP_MAX 2147483647 -#else -# define FRACBITS 15 -# define SAMPPROD int32_t -#define SAMP_MAX 32767 -#endif - -#define SAMP_MIN -SAMP_MAX - -#if defined(CHECK_OVERFLOW) -# define CHECK_OVERFLOW_OP(a,op,b) \ - if ( (SAMPPROD)(a) op (SAMPPROD)(b) > SAMP_MAX || (SAMPPROD)(a) op (SAMPPROD)(b) < SAMP_MIN ) { \ - fprintf(stderr,"WARNING:overflow @ " __FILE__ "(%d): (%d " #op" %d) = %ld\n",__LINE__,(a),(b),(SAMPPROD)(a) op (SAMPPROD)(b) ); } -#endif - - -# define smul(a,b) ( (SAMPPROD)(a)*(b) ) -# define sround( x ) (kiss_fft_scalar)( ( (x) + (1<<(FRACBITS-1)) ) >> FRACBITS ) - -# define S_MUL(a,b) sround( smul(a,b) ) - -# define C_MUL(m,a,b) \ - do{ (m).r = sround( smul((a).r,(b).r) - smul((a).i,(b).i) ); \ - (m).i = sround( smul((a).r,(b).i) + smul((a).i,(b).r) ); }while(0) - -# define DIVSCALAR(x,k) \ - (x) = sround( smul( x, SAMP_MAX/k ) ) - -# define C_FIXDIV(c,div) \ - do { DIVSCALAR( (c).r , div); \ - DIVSCALAR( (c).i , div); }while (0) - -# define C_MULBYSCALAR( c, s ) \ - do{ (c).r = sround( smul( (c).r , s ) ) ;\ - (c).i = sround( smul( (c).i , s ) ) ; }while(0) - -#else /* not FIXED_POINT*/ - -# define S_MUL(a,b) ( (a)*(b) ) -#define C_MUL(m,a,b) \ - do{ (m).r = (a).r*(b).r - (a).i*(b).i;\ - (m).i = (a).r*(b).i + (a).i*(b).r; }while(0) -# define C_FIXDIV(c,div) /* NOOP */ -# define C_MULBYSCALAR( c, s ) \ - do{ (c).r *= (s);\ - (c).i *= (s); }while(0) -#endif - -#ifndef CHECK_OVERFLOW_OP -# define CHECK_OVERFLOW_OP(a,op,b) /* noop */ -#endif - -#define C_ADD( res, a,b)\ - do { \ - CHECK_OVERFLOW_OP((a).r,+,(b).r)\ - CHECK_OVERFLOW_OP((a).i,+,(b).i)\ - (res).r=(a).r+(b).r; (res).i=(a).i+(b).i; \ - }while(0) -#define C_SUB( res, a,b)\ - do { \ - CHECK_OVERFLOW_OP((a).r,-,(b).r)\ - CHECK_OVERFLOW_OP((a).i,-,(b).i)\ - (res).r=(a).r-(b).r; (res).i=(a).i-(b).i; \ - }while(0) -#define C_ADDTO( res , a)\ - do { \ - CHECK_OVERFLOW_OP((res).r,+,(a).r)\ - CHECK_OVERFLOW_OP((res).i,+,(a).i)\ - (res).r += (a).r; (res).i += (a).i;\ - }while(0) - -#define C_SUBFROM( res , a)\ - do {\ - CHECK_OVERFLOW_OP((res).r,-,(a).r)\ - CHECK_OVERFLOW_OP((res).i,-,(a).i)\ - (res).r -= (a).r; (res).i -= (a).i; \ - }while(0) - - -#ifdef FIXED_POINT -# define KISS_FFT_COS(phase) floor(.5+SAMP_MAX * cos (phase)) -# define KISS_FFT_SIN(phase) floor(.5+SAMP_MAX * sin (phase)) -# define HALF_OF(x) ((x)>>1) -#elif defined(USE_SIMD) -# define KISS_FFT_COS(phase) _mm_set1_ps( cos(phase) ) -# define KISS_FFT_SIN(phase) _mm_set1_ps( sin(phase) ) -# define HALF_OF(x) ((x)*_mm_set1_ps(.5)) -#else -# define KISS_FFT_COS(phase) (kiss_fft_scalar) cos(phase) -# define KISS_FFT_SIN(phase) (kiss_fft_scalar) sin(phase) -# define HALF_OF(x) ((x)*.5) -#endif - -#define kf_cexp(x,phase) \ - do{ \ - (x)->r = KISS_FFT_COS(phase);\ - (x)->i = KISS_FFT_SIN(phase);\ - }while(0) - - -/* a debugging function */ -#define pcpx(c)\ - fprintf(stderr,"%g + %gi\n",(double)((c)->r),(double)((c)->i) ) - - -#ifdef KISS_FFT_USE_ALLOCA -// define this to allow use of alloca instead of malloc for temporary buffers -// Temporary buffers are used in two case: -// 1. FFT sizes that have "bad" factors. i.e. not 2,3 and 5 -// 2. "in-place" FFTs. Notice the quotes, since kissfft does not really do an in-place transform. -#include -#define KISS_FFT_TMP_ALLOC(nbytes) alloca(nbytes) -#define KISS_FFT_TMP_FREE(ptr) -#else -#define KISS_FFT_TMP_ALLOC(nbytes) KISS_FFT_MALLOC(nbytes) -#define KISS_FFT_TMP_FREE(ptr) KISS_FFT_FREE(ptr) -#endif diff --git a/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.h b/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.h deleted file mode 100644 index db1f186..0000000 --- a/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/kiss_fft.h +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef KISS_FFT_H -#define KISS_FFT_H - -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - ATTENTION! - If you would like a : - -- a utility that will handle the caching of fft objects - -- real-only (no imaginary time component ) FFT - -- a multi-dimensional FFT - -- a command-line utility to perform ffts - -- a command-line utility to perform fast-convolution filtering - - Then see kfc.h kiss_fftr.h kiss_fftnd.h fftutil.c kiss_fastfir.c - in the tools/ directory. -*/ - -#ifdef USE_SIMD -# include -# define kiss_fft_scalar __m128 -#define KISS_FFT_MALLOC(nbytes) _mm_malloc(nbytes,16) -#define KISS_FFT_FREE _mm_free -#else -#define KISS_FFT_MALLOC(X) (void*)(0) /* Patched. */ -#define KISS_FFT_FREE(X) /* Patched. */ -#endif - - -// Patched automatically by download_dependencies.sh so default is 16 bit. -#ifndef FIXED_POINT -#define FIXED_POINT (16) -#endif -// End patch. - -#ifdef FIXED_POINT -#include /* Patched. */ -# if (FIXED_POINT == 32) -# define kiss_fft_scalar int32_t -# else -# define kiss_fft_scalar int16_t -# endif -#else -# ifndef kiss_fft_scalar -/* default is float */ -# define kiss_fft_scalar float -# endif -#endif - -typedef struct { - kiss_fft_scalar r; - kiss_fft_scalar i; -}kiss_fft_cpx; - -typedef struct kiss_fft_state* kiss_fft_cfg; - -/* - * kiss_fft_alloc - * - * Initialize a FFT (or IFFT) algorithm's cfg/state buffer. - * - * typical usage: kiss_fft_cfg mycfg=kiss_fft_alloc(1024,0,NULL,NULL); - * - * The return value from fft_alloc is a cfg buffer used internally - * by the fft routine or NULL. - * - * If lenmem is NULL, then kiss_fft_alloc will allocate a cfg buffer using malloc. - * The returned value should be free()d when done to avoid memory leaks. - * - * The state can be placed in a user supplied buffer 'mem': - * If lenmem is not NULL and mem is not NULL and *lenmem is large enough, - * then the function places the cfg in mem and the size used in *lenmem - * and returns mem. - * - * If lenmem is not NULL and ( mem is NULL or *lenmem is not large enough), - * then the function returns NULL and places the minimum cfg - * buffer size in *lenmem. - * */ - -kiss_fft_cfg kiss_fft_alloc(int nfft,int inverse_fft,void * mem,size_t * lenmem); - -/* - * kiss_fft(cfg,in_out_buf) - * - * Perform an FFT on a complex input buffer. - * for a forward FFT, - * fin should be f[0] , f[1] , ... ,f[nfft-1] - * fout will be F[0] , F[1] , ... ,F[nfft-1] - * Note that each element is complex and can be accessed like - f[k].r and f[k].i - * */ -void kiss_fft(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout); - -/* - A more generic version of the above function. It reads its input from every Nth sample. - * */ -void kiss_fft_stride(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout,int fin_stride); - -/* If kiss_fft_alloc allocated a buffer, it is one contiguous - buffer and can be simply free()d when no longer needed*/ -#define kiss_fft_free free - -/* - Cleans up some memory that gets managed internally. Not necessary to call, but it might clean up - your compiler output to call this before you exit. -*/ -void kiss_fft_cleanup(void); - - -/* - * Returns the smallest integer k, such that k>=n and k has only "fast" factors (2,3,5) - */ -int kiss_fft_next_fast_size(int n); - -/* for real ffts, we need an even size */ -#define kiss_fftr_next_fast_size_real(n) \ - (kiss_fft_next_fast_size( ((n)+1)>>1)<<1) - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h b/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h deleted file mode 100644 index 72e5a57..0000000 --- a/examples/micro_speech/tensorflow/lite/micro/tools/make/downloads/kissfft/tools/kiss_fftr.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef KISS_FTR_H -#define KISS_FTR_H - -#include "kiss_fft.h" -#ifdef __cplusplus -extern "C" { -#endif - - -/* - - Real optimized version can save about 45% cpu time vs. complex fft of a real seq. - - - - */ - -typedef struct kiss_fftr_state *kiss_fftr_cfg; - - -kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem); -/* - nfft must be even - - If you don't care to allocate space, use mem = lenmem = NULL -*/ - - -void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata); -/* - input timedata has nfft scalar points - output freqdata has nfft/2+1 complex points -*/ - -void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata); -/* - input freqdata has nfft/2+1 complex points - output timedata has nfft scalar points -*/ - -#define kiss_fftr_free free - -#ifdef __cplusplus -} -#endif -#endif diff --git a/examples/micro_speech/yes_1000ms_sample_data.cpp b/examples/micro_speech/yes_1000ms_sample_data.cpp deleted file mode 100644 index f480afb..0000000 --- a/examples/micro_speech/yes_1000ms_sample_data.cpp +++ /dev/null @@ -1,1800 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "yes_1000ms_sample_data.h" - -const int g_yes_1000ms_sample_data_size = 16000; -const int16_t g_yes_1000ms_sample_data[16000] = { - -7, -12, -18, -20, -20, -21, -21, -25, -29, - -31, -31, -30, -30, -29, -30, -30, -29, -28, - -24, -22, -17, -12, -8, -7, -6, -1, 2, - 5, 7, 8, 11, 15, 18, 19, 23, 24, - 24, 27, 27, 26, 25, 28, 30, 32, 33, - 31, 29, 27, 28, 30, 28, 26, 26, 24, - 22, 17, 16, 15, 13, 10, 5, 0, -4, - -4, -7, -9, -12, -14, -14, -13, -11, -10, - -8, -6, -3, 3, 7, 8, 12, 15, 18, - 21, 19, 19, 21, 23, 24, 23, 22, 19, - 17, 11, 5, -3, -12, -22, -28, -35, -45, - -54, -62, -69, -76, -84, -92, -100, -109, -116, - -117, -120, -120, -120, -122, -124, -126, -123, -121, - -116, -113, -107, -97, -88, -75, -61, -50, -41, - -27, -12, 4, 21, 37, 58, 76, 93, 108, - 121, 137, 156, 172, 184, 196, 205, 215, 224, - 235, 242, 245, 242, 240, 238, 231, 223, 214, - 205, 195, 178, 158, 135, 112, 90, 69, 46, - 19, -11, -45, -76, -105, -133, -159, -186, -211, - -236, -260, -280, -294, -308, -320, -331, -336, -338, - -335, -326, -316, -301, -286, -267, -246, -225, -203, - -180, -154, -124, -91, -59, -34, -8, 19, 42, - 64, 87, 103, 119, 134, 148, 162, 174, 182, - 188, 190, 189, 187, 184, 180, 177, 171, 162, - 154, 144, 137, 129, 118, 106, 95, 81, 69, - 58, 48, 37, 26, 14, 3, -7, -22, -31, - -42, -52, -62, -69, -75, -79, -82, -87, -88, - -92, -94, -91, -87, -85, -81, -74, -70, -64, - -55, -47, -40, -33, -25, -19, -12, -6, -4, - -1, 1, 1, -2, -9, -15, -17, -18, -20, - -22, -22, -26, -31, -33, -35, -31, -26, -17, - -4, 8, 19, 31, 44, 54, 64, 71, 79, - 86, 92, 102, 109, 111, 109, 104, 96, 84, - 70, 60, 51, 38, 27, 13, 4, -3, -9, - -13, -18, -26, -33, -32, -27, -20, -10, -4, - 2, 6, 10, 14, 16, 21, 25, 29, 31, - 33, 35, 37, 33, 22, 15, 13, 11, 12, - 9, 5, 2, 1, -3, -9, -17, -27, -32, - -35, -36, -36, -42, -50, -56, -66, -77, -85, - -96, -100, -106, -113, -118, -121, -119, -117, -119, - -122, -124, -123, -112, -94, -77, -64, -51, -37, - -22, -3, 17, 37, 54, 68, 86, 100, 114, - 134, 154, 167, 174, 178, 182, 189, 189, 187, - 185, 179, 177, 174, 171, 157, 138, 123, 108, - 94, 76, 50, 25, 6, -8, -20, -37, -59, - -86, -110, -132, -147, -159, -169, -178, -191, -203, - -213, -217, -215, -208, -199, -194, -195, -190, -178, - -165, -155, -144, -134, -123, -103, -80, -56, -35, - -18, -4, 11, 23, 36, 50, 65, 78, 93, - 111, 122, 129, 132, 131, 127, 125, 126, 126, - 128, 127, 125, 122, 118, 111, 108, 104, 99, - 93, 89, 90, 87, 82, 78, 75, 68, 65, - 67, 69, 66, 61, 54, 39, 28, 15, 3, - -7, -18, -25, -29, -35, -42, -52, -66, -78, - -83, -85, -86, -86, -82, -83, -84, -83, -81, - -75, -62, -57, -53, -49, -46, -41, -34, -26, - -16, -10, -7, -2, 2, 6, 12, 15, 19, - 18, 15, 17, 21, 24, 30, 33, 27, 22, - 21, 20, 23, 24, 21, 15, 13, 8, 3, - 1, -1, -3, -4, -6, -9, -11, -11, -8, - -10, -13, -15, -19, -17, -11, -2, 1, 2, - 6, 9, 10, 12, 13, 9, 8, 10, 13, - 20, 18, 13, 10, 4, 1, -2, -6, -11, - -13, -16, -18, -15, -18, -21, -21, -22, -23, - -25, -23, -22, -20, -19, -16, -12, -10, -9, - -11, -15, -19, -22, -19, -14, -11, -9, -11, - -17, -20, -18, -19, -15, -11, -8, -2, 8, - 19, 30, 36, 37, 36, 38, 45, 57, 69, - 77, 81, 79, 75, 76, 74, 69, 66, 60, - 53, 45, 36, 28, 22, 17, 10, 0, -5, - -11, -15, -18, -26, -31, -33, -34, -34, -35, - -37, -37, -35, -28, -24, -29, -37, -45, -46, - -41, -36, -31, -32, -33, -37, -37, -36, -36, - -34, -27, -19, -14, -11, -8, -1, 6, 14, - 19, 21, 25, 30, 34, 38, 38, 33, 26, - 22, 19, 20, 18, 17, 15, 10, 2, -3, - -5, -10, -13, -13, -13, -16, -16, -16, -15, - -13, -14, -13, -16, -19, -20, -18, -17, -18, - -16, -16, -24, -28, -28, -28, -23, -21, -21, - -20, -24, -27, -23, -18, -14, -7, 4, 11, - 15, 19, 21, 25, 33, 39, 41, 45, 47, - 50, 56, 58, 57, 59, 59, 55, 50, 47, - 39, 34, 30, 24, 18, 11, 8, 3, 0, - -3, -8, -14, -15, -13, -13, -12, -14, -17, - -17, -12, -10, -4, -7, -12, -10, -14, -17, - -17, -19, -25, -28, -27, -29, -30, -31, -35, - -38, -43, -47, -51, -52, -50, -49, -48, -47, - -45, -39, -32, -30, -31, -35, -35, -31, -24, - -17, -12, -11, -14, -15, -17, -16, -9, -5, - -3, -1, 0, 1, 0, 3, 12, 21, 26, - 33, 35, 38, 45, 50, 53, 53, 54, 58, - 61, 64, 69, 67, 66, 64, 58, 54, 51, - 46, 44, 45, 41, 35, 31, 27, 25, 27, - 25, 20, 13, 12, 16, 17, 17, 12, 7, - 3, 2, -2, -4, -8, -14, -19, -25, -29, - -38, -49, -60, -69, -73, -71, -74, -82, -89, - -98, -103, -104, -103, -99, -98, -98, -98, -99, - -97, -94, -91, -85, -82, -78, -74, -74, -71, - -68, -61, -54, -52, -47, -41, -36, -32, -21, - -12, -3, 11, 26, 36, 44, 48, 55, 64, - 77, 92, 100, 108, 117, 120, 122, 128, 130, - 129, 130, 127, 124, 122, 121, 118, 114, 110, - 102, 92, 85, 80, 77, 68, 55, 46, 39, - 36, 34, 31, 27, 15, 5, -1, -5, -11, - -20, -29, -37, -43, -46, -47, -54, -61, -65, - -74, -82, -84, -91, -94, -96, -104, -109, -111, - -111, -112, -113, -111, -112, -110, -104, -99, -96, - -93, -89, -87, -81, -71, -63, -54, -45, -43, - -37, -30, -24, -17, -12, -8, -2, 2, 15, - 23, 28, 35, 41, 42, 44, 52, 58, 66, - 74, 78, 80, 82, 85, 88, 90, 92, 92, - 88, 87, 87, 79, 73, 69, 64, 62, 55, - 50, 45, 41, 36, 29, 24, 20, 16, 12, - 8, 5, 2, 1, 1, 0, 1, -4, -4, - -4, -4, -1, 1, 2, 1, -3, -6, -1, - 5, 6, 7, 8, 4, 2, 0, -2, -3, - 0, -3, -4, -3, -4, -5, -8, -15, -20, - -25, -28, -32, -37, -38, -39, -43, -48, -55, - -62, -69, -75, -75, -78, -81, -83, -89, -89, - -92, -91, -91, -89, -83, -81, -74, -66, -63, - -54, -45, -39, -31, -23, -15, -4, 6, 14, - 23, 29, 35, 41, 45, 49, 55, 61, 69, - 75, 75, 76, 75, 74, 74, 73, 74, 72, - 69, 69, 65, 62, 57, 52, 44, 35, 33, - 29, 24, 14, 7, 3, -4, -12, -17, -20, - -22, -27, -32, -34, -39, -42, -43, -42, -43, - -40, -38, -36, -36, -37, -36, -33, -31, -27, - -24, -23, -22, -17, -11, -7, -7, -7, -3, - 5, 13, 19, 25, 27, 25, 27, 35, 40, - 40, 41, 45, 47, 50, 54, 52, 50, 45, - 43, 44, 40, 34, 28, 24, 18, 11, 6, - -2, -9, -14, -21, -27, -35, -39, -43, -50, - -57, -62, -66, -68, -71, -72, -73, -74, -76, - -76, -77, -75, -75, -74, -67, -61, -55, -49, - -45, -40, -30, -21, -11, -4, 4, 13, 23, - 34, 44, 52, 59, 65, 70, 77, 84, 87, - 88, 90, 91, 90, 89, 85, 80, 75, 72, - 71, 64, 56, 48, 41, 34, 27, 21, 12, - 1, -11, -19, -28, -33, -39, -46, -50, -53, - -58, -63, -66, -71, -73, -76, -76, -74, -73, - -71, -67, -65, -62, -60, -55, -51, -45, -39, - -35, -31, -27, -20, -13, -6, -3, 1, 8, - 12, 18, 24, 26, 30, 35, 38, 44, 47, - 47, 51, 53, 52, 53, 52, 50, 51, 49, - 50, 51, 50, 48, 48, 45, 43, 42, 37, - 34, 31, 31, 30, 26, 24, 21, 15, 12, - 11, 7, 4, 1, -3, -5, -7, -9, -15, - -21, -26, -28, -31, -35, -39, -46, -48, -49, - -53, -58, -63, -67, -69, -71, -72, -74, -75, - -77, -77, -73, -72, -69, -65, -60, -55, -50, - -47, -43, -38, -30, -25, -20, -12, -4, 4, - 9, 16, 20, 24, 28, 35, 43, 50, 58, - 61, 65, 72, 74, 74, 76, 79, 78, 76, - 78, 76, 76, 74, 70, 64, 59, 52, 46, - 41, 33, 26, 19, 12, 5, -2, -8, -15, - -20, -26, -31, -37, -39, -41, -44, -44, -47, - -51, -52, -52, -48, -45, -46, -48, -45, -42, - -40, -36, -32, -27, -24, -22, -18, -16, -11, - -10, -5, 0, 3, 8, 11, 16, 18, 21, - 23, 25, 26, 27, 28, 30, 31, 31, 30, - 29, 27, 26, 23, 19, 17, 13, 10, 6, - 0, -2, -5, -10, -12, -15, -19, -23, -26, - -29, -30, -30, -32, -33, -34, -35, -34, -31, - -29, -29, -28, -28, -23, -19, -17, -12, -12, - -10, -5, -2, 3, 7, 10, 13, 14, 19, - 22, 26, 31, 34, 34, 35, 36, 39, 43, - 45, 47, 47, 48, 49, 51, 48, 47, 50, - 45, 41, 41, 38, 34, 34, 30, 23, 17, - 11, 7, 4, -4, -9, -15, -23, -28, -32, - -35, -39, -45, -46, -49, -53, -52, -53, -55, - -56, -56, -55, -54, -53, -53, -51, -47, -44, - -42, -40, -37, -33, -28, -25, -23, -18, -15, - -8, -6, -2, 3, 8, 15, 18, 23, 26, - 27, 32, 36, 36, 36, 39, 38, 38, 40, - 39, 35, 31, 29, 25, 23, 19, 15, 11, - 7, 5, 3, 1, -1, -6, -8, -7, -10, - -9, -10, -11, -10, -7, -6, -8, -6, -5, - -4, 1, 2, 4, 7, 7, 9, 11, 11, - 9, 9, 10, 11, 13, 17, 15, 15, 15, - 17, 19, 17, 17, 17, 15, 15, 13, 11, - 12, 8, 7, 5, 3, 0, -4, -4, -6, - -9, -12, -14, -15, -15, -16, -20, -19, -20, - -20, -20, -18, -18, -21, -22, -21, -21, -23, - -20, -20, -23, -24, -23, -25, -25, -25, -25, - -26, -24, -23, -23, -23, -23, -22, -19, -18, - -15, -14, -10, -8, -4, -1, 1, 3, 6, - 8, 9, 14, 19, 22, 24, 26, 29, 32, - 31, 34, 39, 42, 42, 46, 49, 50, 50, - 52, 53, 52, 49, 49, 48, 48, 46, 45, - 40, 34, 30, 25, 21, 17, 13, 10, 6, - 2, -4, -9, -12, -15, -18, -21, -26, -28, - -31, -32, -33, -35, -35, -38, -37, -36, -34, - -35, -35, -33, -33, -34, -30, -26, -27, -25, - -23, -22, -18, -15, -16, -12, -9, -9, -6, - -1, 2, 3, 5, 8, 7, 9, 12, 15, - 17, 18, 18, 19, 18, 20, 19, 18, 21, - 20, 19, 18, 16, 15, 15, 15, 14, 12, - 9, 9, 10, 8, 6, 4, 2, 1, -1, - -3, -1, -3, -2, -4, -5, -5, -8, -8, - -10, -10, -8, -8, -8, -7, -8, -8, -8, - -9, -11, -12, -11, -9, -7, -8, -8, -8, - -10, -8, -7, -8, -7, -6, -7, -5, -3, - -3, -3, -3, -2, 0, 3, 3, 5, 7, - 10, 11, 10, 10, 12, 13, 16, 16, 16, - 17, 15, 16, 17, 16, 14, 16, 13, 11, - 11, 9, 9, 6, 4, 4, 3, 0, -2, - -4, -7, -7, -7, -13, -15, -13, -14, -16, - -15, -15, -17, -16, -16, -18, -19, -19, -20, - -19, -16, -15, -13, -12, -10, -7, -6, -4, - -4, -2, 0, 2, 6, 8, 10, 12, 14, - 15, 14, 13, 13, 13, 15, 15, 17, 17, - 17, 18, 17, 16, 15, 15, 14, 11, 9, - 8, 8, 9, 8, 5, 5, 3, -1, -1, - -4, -5, -7, -8, -8, -8, -9, -10, -8, - -11, -12, -12, -12, -12, -13, -11, -11, -9, - -8, -7, -8, -7, -6, -7, -6, -5, -4, - -4, -2, -2, -3, -2, -2, -3, 0, -1, - -3, 1, 1, 2, 4, 3, 5, 6, 3, - 3, 4, 3, 3, 4, 5, 4, 6, 7, - 7, 7, 6, 3, 3, 5, 3, 3, 6, - 6, 7, 6, 4, 5, 2, 1, 1, 0, - 0, 2, 1, 1, 1, -1, -2, -3, -5, - -4, -5, -4, -4, -6, -4, -4, -4, -5, - -6, -5, -6, -5, -4, -5, -4, -3, -4, - 0, 2, 2, 2, 2, 2, 2, 3, 3, - 5, 6, 6, 5, 6, 7, 6, 8, 6, - 5, 5, 5, 6, 6, 6, 5, 5, 2, - 2, 1, 2, 0, -1, -1, -1, -1, 0, - -1, -4, -6, -8, -8, -9, -8, -7, -6, - -5, -5, -6, -3, -4, -5, -4, -7, -6, - -4, -2, -1, -1, 1, 1, 1, 1, 1, - 2, 2, 1, 3, 4, 4, 6, 6, 6, - 6, 4, 4, 4, 4, 3, 2, 2, 2, - 2, 1, 1, 1, 0, 1, 1, 0, -2, - -2, -3, -3, -3, -3, -5, -4, -3, -5, - -5, -3, -5, -4, -4, -2, -2, -2, -1, - -3, -2, -2, -1, -3, -2, -1, -2, -2, - -2, 0, 0, 0, 0, 0, 1, 0, 0, - 1, 2, 3, 3, 3, 4, 5, 4, 3, - 4, 5, 5, 7, 7, 6, 9, 8, 6, - 7, 8, 6, 5, 7, 8, 8, 8, 7, - 6, 5, 4, 4, 4, 5, 4, 2, 1, - 2, 1, 0, -2, -3, -2, -4, -6, -6, - -7, -7, -8, -9, -9, -9, -9, -9, -9, - -9, -10, -10, -10, -8, -7, -8, -6, -5, - -4, -3, -5, -2, -2, -2, -1, -1, 0, - 1, 1, 2, 3, 2, 4, 3, 3, 5, - 3, 3, 5, 4, 5, 6, 5, 4, 5, - 3, 2, 2, 3, 4, 4, 4, 4, 4, - 3, 4, 4, 4, 3, 2, 2, 2, 2, - 2, 2, 2, 2, 1, 1, 1, 2, 1, - 1, 2, 1, 1, 2, 1, 1, 1, -1, - 0, 1, 0, -1, 1, -1, -1, -1, -2, - -1, -1, -1, -1, -1, -1, -1, -1, -2, - -1, 0, -1, -1, 1, 1, 2, 0, -1, - 0, -1, -1, 0, 0, 1, 2, 2, 2, - 1, 1, 0, 0, 0, 0, 1, 1, 0, - 0, 0, 0, 0, -1, -2, -1, -3, -4, - -4, -4, -4, -4, -4, -4, -3, -3, -5, - -6, -4, -2, -2, -1, -1, -1, -2, 1, - -1, 1, 0, 0, 1, 1, 1, 1, 2, - 1, 2, 2, 3, 3, 3, 3, 4, 5, - 5, 5, 5, 5, 5, 5, 5, 6, 6, - 5, 5, 5, 6, 6, 5, 3, 6, 5, - 4, 5, 3, 2, 2, 2, 2, 1, 1, - 2, 0, -1, 0, -1, -1, -1, -1, -1, - -1, -1, -3, -3, -3, -3, -4, -4, -5, - -6, -6, -6, -6, -6, -6, -5, -5, -6, - -5, -4, -4, -4, -4, -2, -2, -2, -1, - -2, 0, 1, 0, 1, 3, 4, 4, 4, - 4, 4, 4, 5, 4, 4, 4, 5, 7, - 5, 4, 4, 4, 4, 3, 2, 2, 2, - 2, 2, 0, 1, 1, 0, 1, 1, -1, - 0, -1, -2, -1, -3, -4, -4, -3, -5, - -5, -5, -5, -5, -5, -4, -3, -3, -2, - -3, -2, -2, -5, -3, -3, -3, -2, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 3, - 3, 4, 4, 4, 4, 5, 5, 2, 3, - 4, 3, 5, 4, 3, 4, 3, 3, 5, - 5, 3, 4, 2, 1, 1, 3, 4, 3, - 1, 3, 2, 1, 2, 1, 0, 1, 0, - 1, 0, 1, 1, 1, 1, 0, -1, 0, - 0, -1, -1, -2, -1, -1, -2, 0, -1, - -2, -1, -1, -2, -2, -1, -3, -3, -3, - -3, -3, -4, -3, -5, -6, -4, -4, -5, - -4, -3, -5, -6, -4, -5, -6, -4, -3, - -5, -4, -3, -4, -3, -2, -2, -2, 0, - 0, 1, 1, 0, 0, 0, 1, 1, 3, - 3, 3, 4, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, - 1, 1, 1, 1, 1, 1, 1, 0, 0, - 0, 1, -2, -1, 1, 0, -1, -2, -2, - 0, 1, 0, 1, 1, 1, 1, 0, 0, - 1, 0, 0, 2, 1, 0, 1, 1, 1, - 1, 3, 3, 3, 4, 3, 3, 4, 2, - 2, 2, 2, 2, 2, 2, 1, 2, 2, - 2, 2, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -3, -3, -3, -5, -4, -5, -5, - -5, -5, -7, -7, -7, -8, -7, -8, -7, - -8, -8, -7, -8, -8, -8, -8, -7, -6, - -6, -6, -7, -6, -6, -5, -5, -3, -2, - -2, -1, 0, -1, 0, 1, 2, 2, 3, - 3, 3, 6, 7, 7, 7, 8, 9, 8, - 10, 10, 9, 10, 11, 9, 10, 12, 11, - 10, 9, 9, 9, 9, 10, 9, 6, 6, - 5, 5, 6, 3, 1, 1, 0, 1, 0, - 0, 1, -1, -2, -2, -1, -3, -3, -2, - -4, -4, -3, -2, -4, -4, -4, -5, -3, - -3, -5, -3, -3, -5, -4, -2, -2, -3, - -3, -1, 0, -1, 0, 0, 0, -2, -1, - 0, -1, -2, -2, -2, -2, -1, -3, -2, - -3, -4, -3, -3, -3, -3, -3, -3, -3, - -2, -4, -6, -5, -3, -2, -4, -3, -2, - -4, -4, -4, -3, -4, -5, -4, -5, -3, - -2, -5, -2, -4, -4, -3, -2, -1, -1, - -1, 0, 2, 2, 1, 1, 3, 3, 3, - 3, 4, 4, 5, 6, 5, 5, 6, 7, - 7, 7, 8, 8, 7, 9, 9, 9, 9, - 10, 9, 9, 9, 9, 9, 9, 8, 7, - 9, 9, 6, 7, 5, 2, 3, 2, 1, - 1, 0, -2, -2, -2, -3, -3, -2, -2, - -4, -5, -4, -4, -4, -4, -5, -4, -4, - -5, -4, -5, -4, -5, -6, -4, -4, -5, - -5, -5, -5, -6, -4, -4, -4, -3, -2, - -3, -3, -2, -2, -1, -2, -3, -1, 0, - -1, 0, 0, 0, 0, 1, 0, 0, 0, - 0, -1, 1, 1, 1, 0, -2, -2, -3, - -3, -4, -4, -6, -7, -5, -4, -5, -5, - -4, -6, -8, -7, -6, -5, -5, -5, -4, - -4, -5, -4, -3, -3, 0, 0, -2, -1, - 0, 0, 1, 1, 2, 2, 2, 2, 2, - 4, 5, 5, 5, 6, 7, 7, 9, 10, - 10, 10, 12, 12, 13, 14, 14, 14, 15, - 15, 15, 15, 15, 15, 14, 15, 15, 12, - 13, 13, 12, 10, 11, 11, 11, 10, 8, - 6, 5, 7, 6, 6, 4, 3, 4, 5, - 3, 2, 2, 1, 1, 2, 3, 1, 0, - 0, 1, 0, -2, -1, -2, -3, -3, -3, - -3, -4, -6, -8, -9, -9, -10, -12, -14, - -15, -18, -21, -21, -21, -21, -22, -24, -26, - -26, -27, -27, -28, -26, -25, -26, -28, -27, - -24, -23, -23, -24, -21, -17, -17, -15, -12, - -12, -12, -12, -9, -7, -6, -5, -3, -3, - -2, 0, 0, 1, 3, 7, 6, 4, 6, - 7, 8, 11, 10, 10, 13, 15, 14, 13, - 18, 20, 18, 19, 21, 23, 24, 23, 22, - 24, 26, 26, 26, 27, 25, 23, 25, 27, - 28, 28, 28, 23, 19, 23, 24, 20, 20, - 21, 15, 13, 15, 16, 14, 11, 8, 7, - 8, 11, 11, 6, 4, 8, 7, 6, 7, - 6, 4, 7, 13, 12, 7, 8, 8, 4, - 1, 1, 1, 2, -4, -12, -18, -24, -25, - -25, -32, -41, -55, -59, -61, -75, -87, -96, - -109, -122, -133, -141, -148, -157, -168, -180, -191, - -198, -202, -207, -206, -207, -211, -211, -208, -203, - -189, -171, -153, -132, -114, -96, -75, -54, -30, - -5, 19, 43, 61, 77, 93, 106, 123, 143, - 161, 182, 198, 202, 201, 209, 229, 242, 240, - 235, 239, 249, 258, 255, 242, 233, 245, 268, - 278, 256, 223, 223, 253, 263, 235, 198, 178, - 188, 215, 230, 200, 143, 113, 128, 158, 158, - 128, 99, 90, 82, 70, 56, 32, 7, 14, - 46, 36, -23, -71, -76, -54, -36, -39, -74, - -118, -134, -122, -101, -104, -129, -164, -174, -129, - -86, -109, -184, -219, -191, -147, -141, -183, -249, - -290, -269, -236, -266, -346, -394, -366, -325, -353, - -431, -472, -406, -313, -316, -398, -449, -401, -287, - -194, -164, -193, -245, -212, -55, 75, 67, 26, - 67, 165, 237, 269, 293, 319, 333, 368, 414, - 432, 463, 488, 448, 404, 391, 377, 361, 365, - 376, 308, 197, 150, 129, 73, 53, 91, 43, - -107, -165, -54, 1, -148, -312, -273, -125, -62, - -128, -258, -294, -141, 70, 57, -217, -378, -145, - 198, 289, 169, -47, -219, -101, 264, 458, 217, - -163, -199, 13, 121, 101, -51, -293, -319, -62, - 24, -274, -474, -296, -170, -336, -422, -285, -248, - -302, -130, 98, -11, -257, -146, 184, 278, 264, - 331, 192, -35, 235, 805, 830, 315, 82, 322, - 503, 522, 619, 557, 242, 163, 399, 507, 489, - 618, 602, 156, -164, 112, 476, 406, 94, -154, - -242, -132, 56, 5, -325, -566, -527, -478, -624, - -692, -561, -551, -744, -836, -671, -520, -626, -736, - -647, -581, -639, -687, -702, -739, -665, -383, -236, - -414, -513, -321, -114, -43, 32, 65, -98, -236, - 34, 608, 924, 680, 218, 56, 329, 847, 1214, - 1006, 341, 11, 340, 667, 553, 353, 355, 415, - 416, 364, 257, 108, 6, 113, 293, 233, 46, - 4, 25, -10, -12, 55, 40, -65, -56, -26, - -101, -61, 143, 229, 78, -161, -210, 103, 424, - 377, 86, -274, -491, -328, -37, 60, 128, 188, - -105, -625, -823, -464, 138, 389, 111, -343, -526, - -306, 13, 205, 250, -35, -554, -764, -498, -42, - 167, -210, -639, -448, -101, -110, -171, -74, -39, - 47, 424, 616, 324, 98, 367, 853, 942, 416, - -184, -130, 339, 472, 369, 239, -165, -418, 101, - 742, 659, 325, 365, 476, 233, -14, 270, 785, - 719, -29, -533, -220, 237, 305, 179, -190, -644, - -610, -380, -526, -601, -237, 48, -36, -124, -49, - -6, 23, 117, 55, -199, -428, -512, -338, -238, - -424, -323, -135, -464, -657, -189, 100, -379, -964, - -893, -346, -64, -322, -650, -480, 32, 238, 201, - 386, 616, 611, 400, 195, 357, 842, 1051, 832, - 712, 829, 1070, 1307, 1081, 551, 363, 544, 623, - 239, -374, -609, -230, 375, 486, -52, -446, -270, - 181, 645, 601, -135, -654, -256, 567, 840, 380, - -54, 18, 334, 386, 21, -214, 83, 243, -316, - -937, -1074, -1006, -896, -674, -424, -331, -354, -380, - -481, -392, 80, 358, 171, -170, -624, -796, -130, - 706, 803, 381, 152, 367, 620, 685, 655, 347, - 36, 180, 417, 412, 358, 288, 189, 150, 16, - -240, -428, -428, -266, -335, -819, -1150, -946, -587, - -437, -580, -961, -1218, -1065, -704, -431, -350, -315, - -214, -162, -81, 26, -8, -52, -117, -226, -40, - 285, 241, -2, -69, 57, 207, 81, -144, -69, - 65, 84, 49, -168, -248, 126, 502, 472, 192, - 120, 442, 667, 551, 512, 634, 814, 1014, 1098, - 1156, 1112, 974, 1144, 1330, 1099, 825, 847, 877, - 555, 2, -243, -102, -196, -471, -377, -235, -439, - -622, -547, -470, -495, -431, -197, -21, 21, -9, - -246, -438, -238, -31, 0, 96, 137, -25, -211, - -181, -149, -350, -368, -33, 21, -308, -323, 32, - 379, 605, 531, 85, -374, -367, 9, 277, 147, - -356, -698, -494, -140, -126, -354, -549, -673, -642, - -428, -269, -273, -246, -216, -349, -323, -16, 32, - -387, -742, -662, -434, -223, 41, 140, -58, -227, - -80, 93, 20, -166, -360, -536, -555, -305, -33, - -23, -86, -75, -9, 82, -1, -156, 24, 532, - 916, 956, 835, 901, 1127, 1279, 1417, 1435, 1144, - 822, 862, 1214, 1352, 1001, 611, 539, 532, 369, - 189, 170, 308, 465, 430, 232, 64, 14, 51, - -37, -244, -321, -276, -144, 57, 77, -215, -467, - -335, -186, -245, -133, -81, -588, -1130, -959, -520, - -631, -1122, -1270, -971, -873, -1118, -1157, -1078, -1296, - -1365, -1010, -873, -1138, -1061, -379, 89, 51, 177, - 372, 185, -14, 63, 197, 125, -123, -60, 243, - 195, 88, 201, 115, -63, -12, -79, -492, -751, - -489, 49, 163, -293, -424, -52, 229, 302, 212, - 217, 315, 70, -207, -210, -173, 129, 619, 556, - 213, 181, 170, 112, 167, 322, 451, 206, -136, - 58, 426, 526, 524, 394, 387, 568, 481, 297, - 164, 8, 263, 664, 777, 943, 989, 934, 1283, - 1495, 1153, 861, 738, 582, 614, 692, 655, 629, - 432, 127, -119, -338, -313, -138, -204, -561, -994, - -1168, -948, -700, -658, -788, -1053, -1027, -684, -566, - -528, -355, -335, -323, -28, 206, 87, 56, 387, - 585, 296, 24, 261, 492, 248, -132, -469, -674, - -502, -235, -255, -517, -847, -1038, -965, -707, -630, - -767, -639, -298, -193, -290, -310, -118, 74, -77, - -337, -324, -120, 187, 323, -72, -552, -454, -14, - 29, -427, -803, -735, -586, -762, -918, -783, -649, - -723, -857, -786, -626, -591, -417, -83, 167, 262, - 49, -161, 157, 842, 1298, 1356, 1206, 1041, 1194, - 1461, 1323, 1070, 1221, 1687, 2051, 2002, 1673, 1464, - 1550, 1851, 1907, 1531, 1327, 1399, 1342, 1287, 1264, - 1152, 1030, 878, 716, 601, 454, 264, 264, 352, - 151, -193, -296, -161, -93, -215, -423, -617, -668, - -547, -416, -464, -807, -1175, -1174, -1045, -1076, -1023, - -829, -710, -745, -1069, -1443, -1417, -1099, -939, -1165, - -1307, -1056, -843, -638, -304, -190, -334, -578, -770, - -705, -675, -947, -957, -565, -437, -617, -843, -1015, - -813, -489, -584, -904, -1054, -797, -229, -26, -208, - -66, 398, 710, 644, 390, 413, 726, 992, 1204, - 1337, 1234, 1104, 1038, 1001, 1043, 982, 847, 885, - 1024, 1098, 1138, 1108, 1038, 966, 885, 882, 878, - 929, 1005, 944, 1008, 1284, 1415, 1289, 1007, 760, - 812, 947, 806, 455, 111, -72, -290, -611, -626, - -559, -765, -1034, -1375, -1632, -1565, -1588, -1728, -1585, - -1477, -1547, -1533, -1371, -1103, -995, -1090, -1102, -947, - -686, -403, -295, -250, -107, -86, -171, -150, 12, - 234, 283, 185, 300, 461, 393, 382, 434, 378, - 306, 202, 195, 253, -8, -307, -105, 264, 342, - 212, 34, -57, 78, 435, 571, 180, -165, -51, - 339, 705, 683, 464, 658, 958, 825, 579, 465, - 390, 241, 61, 202, 429, 128, -122, 241, 406, - 39, -167, -60, 15, -31, -68, 146, 402, 344, - 227, 208, 87, -25, -31, -66, -169, -249, -87, - 75, -181, -438, -249, 49, 87, -40, -16, 53, - -86, -74, 98, 78, 110, 169, -84, -323, -251, - -102, -172, -513, -750, -675, -568, -587, -583, -523, - -450, -302, -245, -356, -480, -590, -495, -183, -105, - -191, -215, -308, -206, 39, 4, -77, -21, 74, - 186, 218, 356, 611, 489, 83, 13, 246, 371, - 348, 240, 61, -66, -107, -170, -205, -74, 200, - 277, 45, -11, 180, 263, 100, -74, 102, 246, - 6, -154, -162, -197, -128, -189, -227, -49, -238, - -490, -333, -188, 1, 215, 150, 144, 128, -33, - 187, 532, 676, 911, 773, 283, 351, 673, 620, - 349, 105, 205, 425, 325, 295, 372, 340, 511, - 628, 394, 224, 187, 91, -174, -556, -482, -37, - -9, -226, -382, -568, -466, -208, -241, -426, -656, - -814, -788, -902, -1065, -946, -860, -896, -831, -744, - -672, -685, -743, -723, -783, -813, -570, -341, -239, - -57, 137, 348, 576, 593, 454, 429, 503, 449, - 238, 173, 350, 423, 419, 530, 501, 272, 156, - 207, 295, 404, 568, 676, 419, 30, 113, 463, - 550, 473, 349, 126, 33, 144, 207, 193, 267, - 304, 81, -252, -401, -368, -347, -404, -452, -408, - -272, -40, 234, 281, 48, -72, -18, 54, 208, - 309, 285, 245, 164, 38, -20, 148, 430, 563, - 655, 679, 453, 300, 319, 219, 25, -15, 54, - -117, -444, -431, -135, -147, -468, -667, -722, -593, - -301, -217, -428, -642, -598, -400, -422, -602, -628, - -554, -509, -501, -541, -488, -250, -129, -284, -441, - -358, -161, -82, 4, 134, 157, 290, 516, 582, - 702, 859, 871, 858, 759, 615, 616, 754, 839, - 725, 464, 259, 187, 127, 150, 280, 238, 92, - 78, 5, -86, 6, 67, -14, -92, -143, -211, - -89, 213, 300, 107, -91, -154, -153, -238, -355, - -314, -227, -168, -92, -142, -219, -156, -47, 53, - -15, -195, -161, -186, -382, -395, -297, -238, -240, - -390, -502, -336, -97, -29, -116, -290, -289, -67, - 74, 112, 119, 182, 358, 382, 315, 341, 290, - 218, 190, 101, -51, -168, -132, -41, -39, -15, - 104, 186, 151, 68, 89, 154, 67, 10, 143, - 120, -185, -382, -365, -263, -145, -111, -159, -190, - -53, 151, 177, 179, 384, 553, 502, 490, 572, - 600, 573, 442, 119, -212, -260, -166, -318, -506, - -413, -279, -285, -354, -390, -278, -142, -85, -18, - -19, -121, -143, -32, 88, 118, 42, -96, -187, - -167, -113, -172, -270, -256, -178, -192, -249, -128, - 103, 132, -47, -147, -104, -56, -9, 45, 35, - 109, 315, 381, 326, 336, 457, 667, 786, 675, - 489, 460, 569, 595, 470, 303, 272, 448, 620, - 545, 226, -92, -128, 91, 172, -98, -385, -378, - -264, -284, -362, -314, -148, -72, -198, -350, -353, - -344, -389, -353, -292, -327, -413, -473, -519, -588, - -577, -546, -737, -989, -1030, -997, -1010, -861, -683, - -731, -690, -419, -197, -47, 112, 167, 74, 41, - 176, 309, 438, 671, 781, 793, 868, 904, 991, - 1099, 987, 812, 816, 869, 766, 605, 633, 728, - 592, 424, 460, 405, 170, 75, 30, -105, -58, - 63, -58, -242, -359, -415, -255, -44, -127, -266, - -191, -187, -296, -273, -260, -341, -345, -324, -384, - -467, -421, -233, -125, -227, -341, -256, -168, -217, - -249, -302, -447, -425, -274, -289, -299, -229, -275, - -272, -103, -57, -117, -106, -162, -256, -184, -31, - 51, 69, 31, -19, 72, 256, 318, 331, 254, - 28, -7, 121, 48, -64, 58, 183, 152, 161, - 201, 167, 190, 287, 278, 157, 56, 103, 332, - 460, 299, 166, 238, 308, 374, 508, 509, 373, - 275, 270, 298, 229, 185, 192, 23, -160, -80, - 67, 31, -170, -378, -384, -330, -500, -648, -615, - -686, -716, -510, -510, -771, -752, -475, -434, -556, - -480, -403, -515, -464, -255, -177, -105, 29, 95, - 152, 210, 190, 180, 279, 408, 325, 225, 462, - 607, 537, 759, 1022, 973, 945, 964, 846, 818, - 952, 907, 584, 313, 302, 428, 533, 479, 260, - 178, 262, 185, 18, -77, -263, -370, -208, -240, - -589, -739, -572, -444, -405, -357, -475, -738, -771, - -542, -441, -529, -651, -803, -823, -556, -285, -227, - -233, -202, -168, -110, -78, -220, -302, -56, 129, - -60, -149, 54, 130, 169, 324, 231, 24, 89, - 269, 320, 262, 231, 225, 138, 67, 153, 310, - 399, 269, -21, -197, -183, -59, 144, 234, -13, - -274, -168, 32, -37, -277, -417, -441, -416, -324, - -312, -467, -540, -373, -166, -161, -297, -365, -341, - -246, -69, 81, 99, -3, 11, 305, 540, 449, - 394, 586, 667, 606, 685, 665, 425, 410, 585, - 509, 360, 424, 538, 583, 482, 250, 159, 310, - 423, 217, -131, -280, -204, -51, -12, -204, -338, - -232, -143, -201, -306, -374, -336, -229, -257, -453, - -576, -497, -379, -326, -302, -372, -504, -453, -229, - -133, -226, -328, -326, -261, -151, -6, 97, 143, - 164, 143, 138, 267, 433, 500, 470, 297, 143, - 279, 504, 556, 475, 333, 233, 225, 228, 198, - 128, 24, -17, 4, -55, -187, -251, -213, -119, - -94, -214, -357, -349, -246, -195, -183, -261, -440, - -533, -476, -341, -213, -170, -220, -299, -220, -8, - 51, -11, 19, 172, 292, 189, 9, -6, 102, - 238, 384, 477, 448, 353, 304, 354, 473, 543, - 400, 229, 275, 380, 425, 415, 371, 398, 460, - 377, 202, 154, 199, 110, -123, -365, -524, -524, - -360, -134, -47, -182, -348, -453, -542, -503, -376, - -398, -521, -595, -621, -560, -439, -284, -115, -80, - -123, -57, 28, -15, -60, -9, 47, 119, 203, - 288, 435, 571, 635, 706, 750, 627, 436, 345, - 330, 398, 460, 368, 213, 127, 140, 215, 202, - 58, -99, -244, -387, -470, -527, -637, -754, -791, - -768, -742, -739, -735, -704, -649, -552, -479, -491, - -494, -454, -433, -422, -398, -315, -115, 75, 175, - 244, 307, 360, 398, 460, 532, 529, 446, 422, - 497, 541, 504, 541, 702, 803, 744, 645, 621, - 727, 877, 873, 734, 593, 513, 523, 516, 412, - 336, 334, 274, 199, 163, 123, 125, 117, 107, - 140, 72, -73, -114, -68, -15, 13, -122, -338, - -367, -325, -386, -497, -608, -634, -546, -477, -427, - -377, -412, -464, -436, -343, -276, -327, -390, -313, - -149, -17, 2, -93, -146, -104, -76, -87, -131, - -224, -280, -194, -46, 12, -76, -189, -151, 18, - 160, 200, 99, -81, -149, -95, -31, -6, -45, - -93, -97, -71, 0, 73, 34, -82, -129, -102, - -84, -96, -107, -69, -5, 6, 18, 48, 35, - 27, 32, -4, -71, -30, 119, 205, 266, 352, - 325, 237, 282, 352, 358, 342, 265, 203, 200, - 159, 120, 159, 195, 185, 133, 37, 20, 152, - 312, 363, 316, 255, 251, 259, 211, 160, 86, - -4, -30, -79, -154, -213, -271, -243, -146, -147, - -211, -283, -319, -219, -157, -207, -237, -252, -245, - -136, 0, 42, -22, -108, -82, 34, 130, 179, - 152, 98, 105, 110, 116, 180, 175, 66, -9, - -9, 36, 82, 75, 12, -39, -14, 23, 1, - 12, 31, -61, -155, -184, -158, -86, -60, -67, - -63, -84, -100, -81, -115, -171, -157, -150, -179, - -191, -209, -245, -217, -128, -54, -42, -73, -100, - -88, -10, 104, 199, 249, 227, 201, 204, 151, - 83, 75, 87, 84, 67, 34, 18, 44, 110, - 218, 275, 232, 190, 209, 263, 294, 256, 174, - 108, 37, -54, -110, -129, -179, -293, -360, -339, - -282, -190, -135, -188, -239, -234, -227, -182, -127, - -89, -51, -73, -136, -151, -85, 0, 72, 129, - 122, 65, 44, 103, 202, 272, 252, 170, 148, - 167, 152, 130, 127, 79, 14, 70, 157, 142, - 109, 70, -25, -57, -6, 46, 98, 135, 135, - 82, 16, 10, 68, 87, -20, -120, -116, -98, - -102, -129, -204, -271, -282, -252, -216, -215, -221, - -156, -70, -66, -120, -156, -146, -126, -84, -15, - -21, -76, -8, 131, 146, 86, 42, 12, 44, - 110, 169, 171, 91, 68, 173, 262, 248, 160, - 36, -90, -109, -24, -12, -57, -64, -78, -89, - -75, -87, -101, -82, -72, -76, -81, -63, -34, - -4, 61, 87, 46, 23, -1, -8, 40, 63, - 46, 45, 39, 14, -11, -25, -16, 36, 78, - 85, 110, 120, 132, 189, 228, 217, 154, 89, - 57, 14, -14, -6, 0, 13, 8, -50, -68, - -60, -107, -140, -126, -122, -151, -147, -118, -105, - -85, -83, -100, -139, -195, -194, -168, -183, -173, - -148, -166, -168, -123, -59, -11, 20, 64, 98, - 80, 58, 83, 111, 143, 176, 171, 152, 146, - 165, 174, 143, 93, 30, 5, 21, 42, 35, - -37, -94, -61, -12, -5, -27, -58, -85, -81, - -11, 79, 65, -14, -17, 15, -4, -2, 39, - 20, -29, -19, 3, -11, -39, -62, -43, -34, - -60, -77, -119, -163, -128, -5, 87, 73, 51, - 116, 189, 217, 240, 234, 177, 192, 295, 344, - 313, 263, 236, 240, 230, 179, 99, 19, -25, - -16, -9, -35, -66, -53, -16, -40, -70, -81, - -102, -86, -87, -156, -225, -228, -145, -52, -22, - -57, -171, -255, -247, -208, -165, -187, -242, -275, - -261, -168, -75, -13, 8, -62, -125, -136, -133, - -81, -11, -17, -80, -115, -103, -27, 71, 134, - 137, 44, -48, -24, 69, 156, 194, 175, 112, - 55, 54, 101, 148, 157, 142, 100, 44, 27, - 63, 106, 107, 89, 67, 37, 17, 30, 63, - 69, 61, 21, -37, -55, -72, -53, -26, -53, - -77, -87, -109, -119, -80, -36, -29, -38, -48, - -57, -65, -16, 52, 83, 83, 24, -27, -14, - 9, 27, 52, 50, 45, 90, 132, 117, 75, - 16, -1, 60, 95, 55, 25, 26, 20, 61, - 119, 89, 1, -61, -68, -46, -36, -40, -39, - -49, -58, -16, 30, 13, -12, 18, 35, 6, - 3, 30, 22, 25, 52, 32, 12, 9, -5, - -16, -25, -33, -38, -44, -76, -118, -118, -96, - -54, -3, 9, -31, -82, -84, -35, 18, 25, - -26, -72, -48, 8, 25, 8, -20, -66, -105, - -102, -80, -73, -79, -80, -70, -59, -55, -82, - -113, -85, -51, -59, -57, -38, -13, -7, -18, - -6, 20, 51, 55, 18, -8, -7, 24, 78, - 119, 137, 135, 139, 153, 144, 155, 179, 166, - 128, 56, 8, 38, 85, 94, 72, 20, -32, - -9, 25, 17, -15, -84, -123, -106, -82, -62, - -60, -43, -4, -12, -45, -68, -108, -100, -47, - -49, -64, -50, -9, 37, 59, 68, 62, 53, - 49, 25, 13, 32, 40, 60, 109, 82, 18, - 10, -1, 21, 102, 111, 40, -10, -9, 20, - 31, 0, -51, -108, -135, -89, -21, 1, -54, - -125, -129, -113, -144, -205, -227, -167, -118, -114, - -100, -71, 5, 34, -51, -119, -120, -72, 10, - 56, 51, 58, 65, 98, 135, 84, 20, -3, - -1, 57, 135, 137, 90, 88, 107, 102, 45, - -4, 9, 48, 95, 99, 65, 42, 44, 78, - 80, 29, 11, 39, 27, 0, 7, 19, 10, - -45, -99, -86, -77, -74, -57, -74, -84, -92, - -134, -114, -65, -73, -76, -96, -105, -50, -31, - -17, 17, 9, 18, 62, 75, 55, 63, 76, - 61, 61, 80, 103, 107, 110, 131, 134, 120, - 94, 66, 70, 78, 59, 52, 57, 53, 72, - 76, 31, -18, -53, -57, -35, -17, -9, -27, - -34, -7, -17, -26, -13, -60, -86, -53, -42, - -36, -36, -46, -13, 19, -16, -47, -15, 11, - -9, -18, -26, -24, 14, 8, -53, -54, 15, - 43, 15, -9, -5, 5, -12, -40, -57, -74, - -94, -105, -91, -20, 30, -10, -50, -58, -52, - -42, -47, -54, -61, -83, -64, -30, -3, 31, - 9, -35, -43, -31, 6, 50, 54, 55, 67, - 53, 43, 30, 27, 62, 37, -26, -52, -54, - -29, 3, -12, -23, 11, 26, 23, 31, 57, - 66, 46, 32, 35, 83, 124, 111, 124, 157, - 143, 101, 80, 60, 27, 11, 21, 22, 9, - -4, -26, -41, -35, -50, -103, -138, -116, -90, - -89, -90, -79, -74, -58, -18, -12, -29, -36, - -17, 22, 30, -1, -8, 8, 10, 19, 31, - 36, 38, 41, 28, -7, -14, -6, -20, -30, - -11, -2, -9, 0, 25, 56, 78, 68, 40, - 34, 47, 50, 40, 37, 26, 28, 53, 61, - 57, 25, -35, -75, -65, -48, -65, -81, -67, - -53, -41, 3, 19, -3, -9, -2, -1, -24, - -36, -23, -26, -29, -9, 0, -15, -17, -9, - 12, 50, 45, 14, 19, 37, 24, 9, 16, - 13, -16, -19, 3, -3, -12, -10, -23, -43, - -47, -38, -46, -44, -7, 3, -19, -13, -26, - -52, -29, -19, -32, 0, 11, -26, -24, -20, - -41, -30, -24, -53, -67, -26, 23, 20, 9, - 6, -8, 3, 16, 7, 3, -5, 2, 33, - 53, 72, 94, 86, 69, 96, 118, 95, 91, - 78, 32, 26, 48, 48, 37, 21, 7, -6, - -8, 8, 1, -17, -2, 18, 1, -28, -51, - -84, -93, -74, -46, -18, -19, -31, -10, 10, - 10, 7, -5, -30, -39, -28, -9, 10, 17, - 11, 14, 20, -1, 2, 18, 7, 15, 40, - 40, 32, 27, 23, 31, 43, 33, 7, -3, - 18, 51, 53, 31, 21, 14, 16, 14, 4, - 11, 16, 1, -24, -38, -33, -27, -50, -74, - -70, -60, -54, -44, -22, -22, -43, -33, -16, - -35, -36, -18, -27, -42, -46, -36, -17, -15, - -22, -21, -20, -2, 15, 12, 22, 27, 22, - 41, 57, 60, 63, 54, 56, 65, 62, 68, - 58, 34, 53, 70, 58, 60, 51, 33, 41, - 39, 16, -3, -16, -18, -15, -18, -32, -76, - -85, -62, -82, -87, -68, -84, -75, -40, -48, - -55, -45, -42, -24, -14, -1, 27, 23, -1, - -2, 12, 15, 32, 55, 52, 55, 82, 81, - 58, 62, 59, 37, 24, 20, 17, 18, 19, - 15, 14, 5, -18, -27, -20, -19, -34, -39, - -29, -30, -27, -27, -48, -52, -54, -77, -48, - -18, -36, -34, -13, -21, -38, -28, -15, -7, - -6, -20, -18, 2, 4, -11, -5, 7, 1, - 1, 12, -2, -17, 7, 15, 2, 15, 34, - 48, 78, 94, 82, 66, 66, 64, 47, 44, - 57, 64, 74, 65, 34, 26, 31, 32, 33, - 18, 5, -1, -18, -22, -31, -54, -37, -32, - -74, -89, -77, -73, -65, -72, -75, -39, -21, - -31, -31, -24, -19, -8, -4, 7, 26, 22, - 15, 13, 11, 28, 47, 42, 35, 28, 5, - 18, 55, 55, 45, 44, 18, 9, 18, -2, - -5, 6, -15, -16, -12, -20, -4, 4, -15, - -18, -10, -5, -2, -16, -24, -14, -7, -14, - -33, -33, -20, -17, -17, -18, -30, -37, -35, - -34, -13, -3, -28, -28, -10, -21, -17, -4, - -12, -16, -20, -27, -16, -8, -4, 14, 24, - 11, 17, 30, 27, 14, 7, 28, 30, 22, - 45, 47, 23, 31, 23, -5, 10, 17, -5, - 2, 15, 9, 20, 29, 11, -9, -8, 8, - 10, -1, -14, -30, -30, -8, -9, -20, -17, - -17, -12, 1, 6, -7, -18, -6, 10, -6, - -7, 29, 35, 21, 16, 9, 25, 44, 26, - 21, 34, 28, 40, 41, 9, -2, 1, 12, - 34, 18, -12, -10, -16, -29, -24, -25, -20, - -17, -35, -29, -12, -29, -39, -32, -30, -17, - -12, -28, -20, -5, -4, 7, 14, 10, 3, - -3, 0, 19, 27, 4, -21, -18, -7, -4, - 0, 1, -6, -17, -30, -24, -11, -9, 0, - -1, 0, -3, -12, 1, 15, -2, 3, 16, - -3, -8, 7, 3, 13, 32, 23, 10, -6, - -11, 8, 4, -12, -9, 3, 12, -2, -31, - -36, -33, -37, -17, -5, -20, -14, 4, 5, - 4, 6, 17, 31, 27, 23, 16, -1, -4, - 15, 24, 21, 18, 7, -7, -14, 18, 41, - 25, 14, 13, 2, 5, 12, 8, 15, 10, - 2, 13, 10, 3, 5, -1, 0, 11, 10, - 6, 2, 7, 10, -4, -3, 2, -13, -4, - 14, -4, -17, -11, -4, 8, 3, -8, -1, - -7, -20, -4, 23, 23, 8, 5, 24, 21, - -5, -2, 7, -9, -15, -8, -6, 6, 2, - -26, -19, 1, -19, -31, -27, -34, -41, -47, - -39, -12, -12, -29, -32, -41, -36, -26, -36, - -35, -33, -29, -1, 5, -13, -21, -21, -3, - 12, 1, -7, -1, 2, 12, 9, -1, 15, - 21, 18, 25, 4, -13, 5, 12, 16, 33, - 33, 19, 21, 26, 30, 30, 24, 23, 19, - 22, 34, 39, 28, 15, 14, 24, 24, 18, - 12, 10, 4, 8, 28, 29, 2, -7, 6, - 8, 10, 2, -13, -8, -2, 0, 12, 13, - -1, 3, 21, 26, 24, 17, 11, 15, 19, - 19, 19, 11, 1, 3, 3, 0, -5, -11, - -16, -26, -18, 3, -5, -17, 2, 10, 6, - 6, -8, -11, 4, -3, -17, -10, -17, -37, - -31, -17, -26, -37, -42, -53, -49, -34, -40, - -39, -21, -17, -23, -23, -25, -30, -24, -13, - -10, -10, 1, 1, -7, 7, 19, 11, 4, - -3, -8, 1, 6, 7, 25, 22, -5, 3, - 20, 7, -1, 14, 17, 18, 20, 12, 25, - 41, 23, 19, 37, 39, 21, 17, 23, 17, - 6, 9, 15, 4, -15, -8, 8, 7, 1, - -12, -18, -14, -15, -10, 0, -3, 3, 13, - -8, -21, -8, -26, -29, -1, -9, -24, -19, - -22, -24, -18, -25, -27, -28, -34, -26, -9, - -14, -14, -8, -8, -5, 4, 4, -10, -12, - -7, -8, -10, -15, -19, -10, -5, -9, -9, - -19, -33, -27, -14, -15, -14, -16, -25, -10, - 5, -7, -11, 2, 3, 7, 17, 28, 33, - 32, 33, 39, 49, 57, 63, 62, 64, 67, - 59, 55, 67, 71, 58, 53, 53, 44, 38, - 44, 51, 51, 45, 35, 34, 46, 55, 48, - 36, 21, 3, -5, 2, 7, 0, -17, -30, - -34, -48, -62, -64, -66, -66, -62, -79, -90, - -85, -88, -88, -85, -88, -103, -112, -112, -102, - -99, -102, -103, -110, -100, -80, -60, -57, -68, - -59, -45, -35, -6, 9, -3, 2, 32, 45, - 48, 51, 40, 51, 78, 85, 83, 87, 94, - 101, 104, 105, 100, 86, 82, 96, 102, 96, - 85, 68, 63, 65, 55, 50, 46, 28, 32, - 43, 33, 30, 27, 8, 18, 36, 27, 20, - 13, -14, -19, 8, 12, 0, -1, -12, -24, - -20, -27, -39, -39, -39, -44, -38, -32, -42, - -38, -33, -43, -55, -57, -60, -61, -56, -57, - -55, -43, -46, -58, -55, -50, -50, -51, -48, - -46, -44, -36, -26, -20, -13, -11, -8, 1, - 5, 0, 8, 21, 31, 42, 39, 43, 56, - 48, 37, 45, 45, 47, 52, 46, 40, 26, - 18, 28, 30, 22, 14, 0, -3, 8, 0, - -7, 0, -10, -13, -9, -13, -13, -18, -33, - -32, -26, -37, -41, -32, -26, -30, -34, -31, - -38, -40, -24, -25, -29, -15, -18, -23, -4, - 2, -7, 0, 5, 10, 22, 23, 25, 31, - 33, 37, 38, 39, 43, 46, 41, 44, 46, - 37, 35, 46, 63, 67, 52, 38, 30, 35, - 41, 41, 41, 29, 15, 16, 4, -4, 3, - -12, -18, -13, -27, -39, -47, -55, -44, -43, - -53, -45, -36, -37, -37, -38, -40, -49, -57, - -41, -24, -28, -31, -26, -20, -15, -21, -23, - -18, -19, -14, -10, -11, 1, -6, -26, -14, - -1, -7, -10, -11, -9, 0, -4, -9, 3, - 8, 0, -2, 1, 16, 20, 7, 9, 10, - 8, 18, 12, 11, 17, -6, -19, 0, 0, - -10, -6, -12, -14, -11, -9, -2, -10, -19, - -9, -11, -4, 18, 7, -3, 9, 17, 23, - 28, 25, 19, 19, 24, 33, 37, 30, 28, - 35, 44, 43, 33, 31, 30, 26, 33, 39, - 35, 31, 27, 19, 23, 24, 19, 13, 0, - 0, 2, -7, -9, -10, -13, -6, -6, -23, - -28, -15, -9, -20, -34, -30, -15, -12, -11, - -3, -4, -4, 6, 15, 9, -11, -20, 3, - 26, 23, 1, -16, -3, 12, 2, -22, -36, - -35, -28, -20, -13, -19, -38, -43, -29, -11, - -5, -15, -37, -40, -9, 12, -1, -23, -30, - -16, 12, 21, -1, -25, -21, 4, 34, 55, - 34, -12, -11, 47, 99, 107, 58, 0, 8, - 78, 148, 151, 56, -40, -2, 142, 215, 99, - -67, -64, 76, 153, 99, -21, -107, -92, -1, - 106, 107, -123, -395, -334, 60, 274, -69, -597, - -626, -126, 238, 18, -447, -577, -312, -34, 20, - -89, -242, -332, -222, 74, 262, 64, -285, -232, - 259, 563, 294, -138, -130, 312, 642, 515, 189, - 57, 187, 415, 538, 467, 277, 109, 134, 334, - 441, 299, 59, -7, 128, 228, 146, -20, -99, - -34, 60, 24, -108, -188, -147, -57, -48, -142, - -224, -210, -144, -122, -175, -212, -176, -150, -199, - -256, -210, -100, -79, -195, -298, -248, -107, -48, - -110, -192, -224, -189, -112, -40, -31, -124, -238, - -193, -3, 87, -53, -221, -165, 48, 132, -2, - -150, -109, 61, 147, 83, -20, -60, -13, 85, - 157, 130, 17, -68, -10, 147, 217, 116, -20, - -21, 103, 200, 158, 52, 35, 105, 155, 132, - 81, 74, 110, 114, 74, 48, 68, 100, 77, - 27, 30, 48, 19, -15, 7, 63, 53, -56, - -123, -41, 81, 75, -61, -154, -84, 45, 68, - -24, -105, -76, 22, 53, -13, -63, -21, 54, - 59, -1, -34, 16, 80, 81, 48, 37, 61, - 89, 88, 101, 134, 132, 100, 83, 125, 188, - 173, 101, 95, 172, 214, 149, 68, 94, 181, - 177, 103, 83, 132, 165, 122, 83, 140, 191, - 153, 92, 106, 198, 226, 138, 85, 146, 215, - 187, 110, 77, 115, 146, 115, 91, 96, 78, - 27, -3, 42, 102, 71, -23, -46, 30, 95, - 63, -18, -25, 77, 174, 138, 13, -25, 96, - 218, 181, 34, -70, -45, 17, 2, -67, -174, - -346, -516, -553, -446, -455, -789, -1213, -1308, -1046, - -878, -1179, -1691, -1839, -1528, -1219, -1292, -1623, -1772, - -1538, -1147, -921, -951, -1038, -929, -549, -95, 155, - 127, 97, 387, 931, 1339, 1380, 1234, 1276, 1661, - 2102, 2223, 2027, 1848, 1942, 2198, 2295, 2119, 1856, - 1725, 1745, 1752, 1601, 1335, 1102, 993, 952, 830, - 570, 286, 139, 133, 85, -135, -436, -638, -645, - -571, -620, -835, -1064, -1151, -1069, -951, -964, -1109, - -1209, -1162, -1044, -961, -944, -977, -1001, -912, -687, - -517, -623, -887, -897, -469, 10, -35, -590, -934, - -545, 184, 427, -53, -619, -563, 40, 489, 339, - -128, -306, -6, 403, 497, 232, -55, 0, 388, - 704, 584, 145, -76, 260, 816, 942, 485, 2, - 65, 575, 923, 744, 290, 76, 276, 596, 662, - 419, 134, 92, 280, 434, 344, 88, -66, 8, - 151, 126, -81, -239, -176, -29, -74, -351, -574, - -487, -208, -132, -426, -780, -797, -577, -595, -978, - -1169, -667, -36, -548, -2285, -3281, -1756, 927, 1236, - -1911, -5006, -4073, -66, 2017, -295, -3701, -3797, -892, - 975, -165, -1978, -1636, 374, 1482, 679, -567, -591, - 706, 2337, 3224, 2743, 1269, 287, 1221, 3597, 5083, - 4106, 1858, 972, 2334, 4096, 4167, 2806, 1916, 2383, - 3045, 2508, 1220, 820, 1784, 2669, 1981, 204, -876, - -470, 510, 803, 170, -787, -1568, -1893, -1598, -1027, - -992, -1803, -2610, -2484, -1905, -2113, -3113, -3399, -2267, - -1261, -2007, -3637, -3909, -2340, -893, -1158, -2272, -2486, - -1639, -915, -777, -596, -91, 196, 85, 210, 875, - 1373, 1247, 1219, 1958, 2718, 2328, 1196, 1008, 2350, - 3677, 3269, 1503, 366, 922, 2264, 2810, 1996, 608, - -168, 75, 680, 811, 395, -56, -318, -607, -966, - -1108, -925, -613, -368, -369, -919, -1926, -2460, -1685, - -300, 155, -611, -1524, -2204, -3227, -3859, -2037, 1622, - 2382, -2583, -8448, -7544, -84, 4814, 915, -6423, -7558, - -1746, 2515, -59, -4587, -3858, 1260, 3625, 187, -4148, - -3500, 1542, 5467, 4780, 1256, -1127, -403, 2481, 5332, - 6346, 5014, 2536, 1216, 2467, 5039, 6238, 5070, 3381, - 3269, 4173, 3905, 2248, 1586, 3299, 5240, 4362, 1004, - -1382, -489, 2113, 3168, 1620, -742, -1824, -1435, -897, - -1058, -1500, -1545, -1398, -1965, -3266, -4136, -3756, -2609, - -1804, -1986, -3087, -4599, -5296, -4051, -1731, -781, -2228, - -4092, -3977, -2325, -1353, -1568, -1490, -428, 178, -672, - -1650, -1058, 749, 2039, 2079, 1540, 897, 310, 572, - 2266, 4265, 4265, 1869, -231, 559, 3332, 4752, 3229, - 768, 101, 1364, 2463, 1984, 819, 411, 723, 675, - -162, -923, -743, -32, 185, -516, -1653, -2359, -2103, - -986, 42, -205, -1702, -2870, -2337, -809, -221, -982, - -1544, -946, -598, -2117, -4291, -4100, -857, 1948, 338, - -4799, -7972, -5403, 173, 2371, -1063, -5533, -5578, -1777, - 605, -985, -3249, -2213, 1184, 2691, 560, -2356, -2288, - 1233, 5244, 6441, 4004, 370, -663, 2555, 7404, 9282, - 6573, 2612, 1836, 4662, 7467, 7393, 5421, 4262, 4741, - 5362, 4705, 3163, 2397, 3337, 4887, 4810, 2254, -749, - -1316, 772, 2706, 2016, -573, -2552, -2746, -2012, -1647, - -1978, -2579, -3105, -3473, -3911, -4484, -4891, -4795, -4163, - -3543, -3538, -4275, -5356, -5743, -4637, -2614, -1301, -1825, - -3341, -4011, -2937, -751, 1007, 1245, 235, -639, -61, - 1626, 2864, 2967, 2734, 3013, 3329, 2914, 2312, 2666, - 3839, 4308, 3162, 1453, 768, 1255, 1887, 2006, 1715, - 1031, -297, -1660, -1690, -277, 813, -30, -2137, -3370, - -2854, -1553, -593, -413, -1146, -2567, -3440, -2369, -205, - 379, -1258, -2315, -812, 262, -3205, -8576, -7894, 738, - 7492, 1951, -11595, -17098, -6934, 7139, 8065, -4575, -14199, - -8946, 3606, 7504, -547, -8242, -5113, 4406, 8113, 2134, - -5040, -4089, 4157, 10934, 10158, 4167, -565, -192, 4428, - 9765, 12201, 9861, 4512, 1225, 3451, 8483, 10133, 6497, - 2574, 3333, 6806, 6986, 2487, -1214, 623, 5416, 6647, - 2204, -3289, -4556, -1565, 1544, 1525, -1236, -4293, -5695, - -5174, -3995, -3403, -3449, -3750, -4505, -6014, -7296, -6523, - -3849, -2096, -3288, -5722, -6004, -3581, -1497, -1960, -3330, - -2800, -434, 964, -111, -1739, -1136, 1736, 4151, 3736, - 1274, -451, 469, 3386, 5833, 5898, 3646, 1085, 272, - 1743, 4061, 5108, 3837, 1490, 246, 967, 1866, 859, - -1069, -974, 1542, 2835, 47, -4285, -5068, -1567, 1781, - 1223, -1997, -4227, -3747, -1720, 41, 245, -1228, -2972, - -2673, 22, 1980, -930, -7721, -11271, -5725, 4974, 8484, - -2007, -16979, -19255, -4670, 11057, 9690, -6417, -17537, -10841, - 4262, 9292, 206, -9128, -6224, 4828, 10018, 3699, -5183, - -5121, 4702, 14279, 14466, 5778, -2633, -2185, 7036, 16118, - 16305, 8081, 390, 499, 6580, 11150, 10036, 5704, 2902, - 3378, 4664, 3786, 863, -796, 1216, 4609, 4493, -338, - -5670, -6486, -2751, 884, 571, -3095, -6446, -6997, -5770, - -5041, -5016, -4216, -2579, -2468, -5088, -8129, -7964, -4228, - -323, 497, -1556, -3653, -3615, -1718, 464, 1808, 2386, - 2832, 3085, 2905, 2676, 3473, 5501, 7094, 6442, 3929, - 1663, 1436, 3254, 5807, 7100, 5044, -34, -4091, -2992, - 2149, 5333, 2562, -3067, -5877, -4480, -2080, -1793, -3026, - -3838, -3735, -3663, -4472, -5756, -5753, -3576, -640, -274, - -3965, -7787, -6757, -717, 4380, 3595, -1553, -5936, -8603, - -10223, -8952, -922, 9700, 9355, -7788, -25795, -22413, 2268, - 20887, 12133, -11291, -20129, -5899, 10236, 8585, -3645, -6300, - 4667, 14216, 9346, -3593, -8558, 715, 15085, 21179, 14887, - 3733, -2703, -675, 7170, 15131, 18360, 13959, 4205, -2825, - -656, 7594, 11845, 7182, 319, -439, 3255, 3213, -3299, - -8972, -6318, 2300, 7190, 2254, -9247, -17334, -15064, -4452, - 5160, 5127, -4268, -14501, -17256, -11145, -1830, 3786, 2984, - -2498, -8101, -9587, -5703, 622, 4570, 4035, 1442, 729, - 2493, 3534, 2433, 2239, 5944, 11438, 12371, 6496, -211, - -156, 7092, 13566, 11979, 3928, -2545, -2226, 2713, 6150, - 5117, 1270, -1851, -2859, -2376, -1909, -2364, -3401, -4183, - -3897, -2875, -3205, -5503, -7822, -7501, -3934, -942, -1572, - -4262, -5939, -4671, -2353, -1387, -1159, -1270, -1328, -606, - 474, 1044, -2647, -11603, -17081, -10374, 5922, 14849, 2056, - -22033, -31238, -14612, 11094, 17910, 1778, -15538, -15417, -2045, - 6690, 2855, -2559, 473, 8823, 11423, 3782, -4649, -2775, - 9111, 20847, 21610, 11572, 962, -1465, 5731, 15559, 20008, - 16950, 9230, 2204, 114, 3088, 8130, 10523, 7643, 2045, - -2107, -2945, -2538, -3593, -5210, -4403, -857, 1328, -2497, - -11667, -18881, -16866, -6286, 3400, 2835, -7811, -18322, -19279, - -10025, 1525, 6930, 3766, -4647, -11401, -9904, -322, 10100, - 12428, 5874, -274, 926, 6762, 9360, 6778, 5904, 10509, - 15077, 12681, 3846, -1653, 2460, 11036, 14737, 8967, -1021, - -6168, -3899, 2328, 6041, 3404, -2878, -7672, -6869, -1918, - 801, -2188, -7419, -8083, -2687, 1898, -692, -8121, -11198, - -5642, 2830, 5915, 1120, -5666, -8314, -5770, 118, 4614, - 4713, 1482, -2544, -3331, -3779, -8931, -13840, -10273, 3355, - 13432, 2906, -20058, -30890, -17080, 7759, 16047, 2886, -12525, - -15117, -5998, 1614, 2294, 2684, 4610, 6236, 5486, 2514, - 1346, 1962, 4564, 11022, 17438, 18182, 10179, -796, -3019, - 5456, 15942, 18468, 11176, 2796, -143, 1670, 3922, 3836, - 3337, 3330, 1623, -2609, -7177, -7654, -4250, -2210, -3491, - -5312, -4380, -3103, -6738, -13209, -14278, -6529, 3346, 4931, - -2861, -11176, -12097, -5552, 2679, 7102, 6050, 1301, -3350, - -3378, 1785, 7413, 9059, 7013, 5043, 5331, 5197, 3143, - 1862, 3790, 8037, 10159, 7236, 1450, -3393, -3980, 598, - 6251, 7410, 1502, -7144, -10260, -5116, 2386, 4197, -894, - -6255, -6026, -1493, 873, -1639, -4426, -2720, 2252, 4206, - 158, -4631, -4466, 537, 4709, 4528, 1691, -828, -1394, - -455, 756, 2662, 3101, 1730, -3579, -12987, -18531, -12998, - 1944, 11963, 1503, -19826, -29919, -18138, 2254, 7644, -1829, - -9260, -6516, 134, -793, -5234, -2336, 6264, 12828, 11829, - 6589, 3429, 2592, 4795, 11433, 19490, 21681, 13136, 379, - -4138, 3585, 14812, 17633, 10124, 623, -2287, 696, 2273, - -926, -5000, -4391, -386, 139, -4657, -11003, -13946, -11930, - -7460, -1932, 1277, -2311, -10543, -16920, -14512, -4039, 4987, - 7518, 3175, -4213, -7535, -4747, 3590, 12231, 13419, 8429, - 2377, 1080, 5563, 8497, 7304, 5331, 5656, 8235, 6997, - 998, -3131, -1857, 3017, 5883, 3744, -408, -4503, -6489, - -4796, -374, 3254, 1651, -2830, -5206, -3690, -681, -969, - -2819, -2616, 19, 3379, 2359, -2476, -6413, -6111, -463, - 4664, 4106, -565, -4801, -4960, -1242, 2479, 3706, 2168, - -1104, -3048, -1563, 1217, 2013, -5714, -17921, -21743, -10839, - 7751, 13091, -4648, -26509, -29653, -9872, 10100, 9523, -4335, - -12121, -5509, 4923, 6380, 1839, -508, 3312, 10704, 14545, - 12317, 5508, -243, 2421, 11485, 19096, 18306, 8626, -1357, - -5542, -1695, 7815, 13549, 10229, -23, -8373, -7496, -2775, - -1016, -2900, -4868, -4103, -4535, -6851, -8099, -8137, -6414, - -4023, -1790, -45, -1513, -4791, -6160, -4105, 1060, 5970, - 7099, 3934, -996, -2213, 1973, 6975, 7927, 4726, 2474, - 3951, 5221, 2642, -2359, -3579, 1362, 6614, 6282, 116, - -5643, -5733, -1884, 2107, 3418, 2566, 684, -2319, -3803, - -2133, 1512, 2943, 475, -1004, 753, 3095, 1652, -3074, - -4562, -932, 3815, 4486, -22, -4199, -4666, -2201, 284, - 316, -914, -2297, -2441, -1538, -435, 909, 626, -1222, - -1534, -429, 1711, 2386, -1786, -10676, -18200, -16272, -3805, - 9505, 8238, -9397, -24577, -22256, -4907, 8659, 5940, -3701, - -6764, 40, 6190, 4239, 208, 238, 7081, 14458, 15143, - 10726, 3479, -706, 1700, 9131, 17577, 17708, 7959, -5009, - -11508, -5347, 5635, 10789, 6499, -3121, -9303, -9814, -6625, - -3333, -3193, -4349, -5615, -6188, -5123, -4441, -4550, -4074, - -2769, -61, 2441, 2881, 1395, -578, -341, 2509, 6034, - 8202, 6377, 2696, 1272, 2589, 4787, 4611, 2378, 2124, - 3911, 4872, 2049, -3374, -5770, -2705, 3179, 5905, 2589, - -2792, -5419, -3176, 1056, 2875, 2483, 1205, 605, 856, - 1012, 892, 105, -411, 707, 2924, 4184, 1755, -2553, - -4857, -3556, 401, 2466, 945, -2315, -5556, -5549, -2241, - 534, 601, -1774, -3034, -1962, -886, -448, -720, -467, - 864, 760, -22, -2546, -10211, -17121, -15877, -4803, 7993, - 7254, -6563, -18374, -17755, -6143, 3291, 4322, 1822, 416, - 2788, 5190, 4256, 2627, 2590, 6398, 12709, 15757, 12829, - 5542, -667, 167, 7241, 14346, 14826, 6392, -3516, -7434, - -4607, 1054, 2988, 847, -1549, -2641, -3046, -5363, -8256, - -9130, -6906, -1460, 2260, 1568, -2911, -8580, -9418, -3675, - 5021, 10127, 7909, 1478, -4015, -3331, 2450, 7291, 7632, - 2567, -2022, -899, 3418, 5544, 1349, -4117, -3409, 1758, - 6000, 3526, -3975, -7331, -3931, 2747, 7037, 4962, -21, - -2902, -2008, 1306, 4461, 6364, 5956, 3623, 1734, 793, - 44, -893, -1041, 1633, 5264, 4870, -943, -7404, -8611, - -4974, -1192, 185, -1334, -3672, -4910, -5132, -4387, -3532, - -3233, -2430, -469, 1245, 892, -969, -2441, -2140, 320, - 4999, 5954, -4638, -20056, -24424, -8954, 13558, 16089, -3145, - -20665, -19447, -4802, 4488, 3733, 943, 683, 3109, 6219, - 9247, 7736, 782, -1410, 8024, 20877, 20174, 4723, -7148, - -2758, 11240, 17896, 11462, 414, -6134, -4913, 113, 2818, - 98, -5900, -8369, -4446, 924, 1657, -3389, -10569, -13223, - -7690, 2339, 7741, 1634, -9014, -10982, -1172, 9642, 9098, - 1310, -2795, -1040, 2790, 3808, 3559, 3064, -527, -3160, - -1391, 3120, 5224, -144, -6714, -6416, -719, 5630, 7253, - 2735, -2973, -4325, 679, 7146, 8220, 4055, -42, 814, - 5288, 7658, 6592, 3051, -746, -541, 3401, 6030, 1953, - -6340, -8619, -2689, 4076, 3217, -4875, -9612, -7826, -4293, - -2441, -4080, -5740, -5529, -3656, -506, -1035, -5787, -9518, - -7034, 2323, 9287, 6495, -1853, -6110, -3281, -1708, -8958, - -19544, -18870, -2771, 13029, 10762, -7491, -21837, -18923, -4183, - 8733, 12580, 9779, 4597, 738, 1460, 6302, 9711, 8375, - 8143, 12512, 15808, 11272, 389, -5554, 161, 11080, 15851, - 10426, 692, -6372, -6808, -2525, 652, 827, -219, -349, - -622, -3328, -7883, -11020, -8961, -3240, 1884, 4155, 1995, - -3530, -7816, -6444, -218, 6086, 9279, 7901, 3113, -2352, - -5757, -3836, 2022, 4572, 894, -3519, -3311, -534, -618, - -3716, -5515, -3290, 1495, 4374, 4455, 2961, -645, -3247, - -656, 5273, 9838, 9751, 5755, 1863, 158, 1457, 4585, - 6390, 5379, 2894, 2284, 1867, -2279, -7051, -6578, 70, - 4745, 1660, -4524, -8007, -7088, -5690, -5467, -4178, -2679, - -2218, -3422, -4167, -4313, -6105, -6633, -4202, 864, 5119, - 4084, -163, -5331, -8699, -8710, -7313, -4649, -2471, -1419, - -1136, -3199, -6428, -8048, -4902, 1089, 4681, 5723, 5535, - 5146, 4006, 2052, 2314, 5274, 8680, 9907, 8776, 6722, - 2548, -2403, -3303, 1224, 7406, 9468, 5089, -1197, -4384, - -3570, -298, 1776, 2005, 2041, 1326, 971, -180, -2334, - -1170, 1913, 4281, 4732, 2874, 1174, -1341, -3384, -2503, - 368, 4031, 3270, -986, -3519, -5360, -6004, -5576, -3603, - 208, 708, -2137, -4940, -5349, -3588, -2796, -1399, 1017, - 3144, 4196, 2483, 828, 338, 919, 3842, 6202, 7189, - 7499, 6330, 4847, 3252, 2136, 3698, 5845, 5566, 3019, - 267, -55, -1091, -4220, -5041, -3430, -280, 171, -4649, - -8723, -9280, -5975, -3192, -3974, -3912, -4053, -3748, -3570, - -5871, -5499, -3552, -1691, 320, 341, 748, -313, -3436, - -4687, -3681, 21, 2550, 643, -2123, -3254, -2226, -1044, - -1617, -1510, 183, 1250, 726, -1662, -3388, -1759, 933, - 3817, 5242, 3025, 248, -1339, -514, 2022, 3410, 3970, - 3324, 2632, 2603, 2240, 2166, 1271, 487, 1076, 2039, - 3296, 3836, 3610, 2913, 2718, 4213, 5555, 6023, 4769, - 2442, 2067, 2173, 1623, 1201, 348, 52, -124, -1528, - -2834, -3604, -3463, -2357, -2564, -3775, -3801, -1929, -465, - -2109, -3743, -2657, 200, 2580, 954, -1304, -95, 1549, - 2303, 1795, 1633, 3356, 3699, 2361, 792, 1148, 4045, - 4820, 3851, 3197, 2449, 2704, 1722, -652, -1154, -393, - 113, -1010, -3328, -4342, -3939, -3345, -3697, -5115, -5610, - -4202, -3639, -5088, -5351, -3216, -862, -414, -1839, -3996, - -4831, -2467, 147, 1055, 1288, -247, -2225, -2233, -1562, - -1278, -936, -961, -935, -367, -323, -459, -1940, -3974, - -2262, -13, 2, -401, -1825, -2308, -1124, 448, 2154, - 2434, 1300, -812, -1337, 1325, 3374, 3466, 2500, 2156, - 3439, 3549, 2068, 1392, 1986, 3025, 3944, 3898, 3259, - 4467, 6347, 5356, 2893, 1690, 2072, 4136, 5313, 2776, - -236, -1063, -794, 524, 802, -1377, -2879, -2167, -1439, - -1595, -1539, -1666, -2495, -2375, -1253, -515, -187, -1409, - -2847, -511, 2411, 1761, 492, -18, 607, 2350, 3288, - 3505, 2741, 1099, 699, 2017, 3214, 3333, 1567, 33, - 1260, 1925, 808, -377, -2558, -3781, -1677, 164, -580, - -1727, -2619, -3421, -3586, -3957, -4562, -3646, -2285, -3437, - -5293, -4792, -4128, -4012, -2920, -2249, -2439, -3737, -5607, - -4427, -1259, 71, 609, 555, -1039, -3354, -5388, -3760, - 415, 2513, 2513, 819, -1436, -2780, -2740, -501, 2727, - 3936, 1491, -965, -766, -484, -223, 361, 695, 1771, - 1130, -1839, -1764, 797, -31, -2549, -1790, 2108, 4043, - 887, -154, 2411, 2605, 2012, 1977, 3923, 6630, 4176, - 107, -311, 1731, 1910, 1011, 3119, 3219, 998, -1282, - -2832, -1645, -685, 945, 2574, 2543, -267, -5015, -3819, - -342, 1228, 2055, -619, -1233, 2069, 2896, 1095, 62, - 1365, 3366, 4584, 4956, 3323, -19, -50, 4024, 5222, - 3695, 3118, 1933, 1256, 1443, 128, -119, 2043, 2477, - 1823, 1324, 30, -1363, -3023, -3074, -188, 621, -1775, - -2806, -2961, -2753, -4359, -5350, -1220, -116, -4157, -4811, - -2793, -1040, -1957, -2862, -1901, -3192, -3720, -2357, -1727, - -387, -2131, -5011, -3650, -454, 596, -1298, -3716, -3122, - 496, 136, -2415, -1675, -811, -837, 140, -1243, -187, - -1431, -5320, -2121, 100, -467, 2465, 681, -2093, 1224, - 1632, 1428, 1776, 648, 2480, 3622, 876, 259, 1403, - 2139, 3117, 497, -763, -170, 279, 1769, 342, -871, - -25, -1549, -2290, 290, 1042, -796, -4291, -3895, 159, - 1264, -540, -2328, -702, 1972, 852, -2274, -798, 1126, - -579, -480, 3481, 3833, 1004, 901, 1536, 1809, 3103, - 2521, 3183, 5220, 1800, -266, 4663, 4230, -790, 159, - 2274, 5114, 4304, -1998, 344, 4921, -343, -2048, 1180, - 2112, 3109, -10, -1818, 552, -1360, -2889, -1302, -1918, - -37, 1406, -1762, -3054, -1446, -2073, -4292, -3214, 1163, - 2333, -712, -2583, -2058, -1034, -600, -3796, -2395, 2137, - -1122, -1927, 702, -2196, -4374, -3257, -1558, -256, -728, - -395, -176, -1529, -2772, -1121, -340, -1147, -250, -4079, - -473, 4241, -2818, -3523, 3255, 2355, -2550, -1082, 1197, - 2213, -94, -237, 3123, 1314, -1075, 977, 1081, 2045, - 2966, -1328, -1069, -741, -524, -380, -2766, -986, 926, - -3281, -1554, 2554, -3620, -6394, -1680, -321, 2889, 243, - -1567, 2276, -1294, -525, 2010, -4883, -1495, 6778, 2085, - -873, 2496, 418, -1156, -1179, 1604, 6173, 1190, -2381, - 5788, 2431, -4941, -242, 1248, 1023, 4426, 3399, 2726, - 1388, -922, 595, 392, 1414, 6260, 2673, -973, 2237, - 1776, -2393, -757, 4158, 2842, -2327, 505, 1230, -3623, - -917, 336, -1400, -1018, 1771, 2696, -570, -2435, 886, - 2309, -2865, -1328, 2077, -1967, -3486, -411, 961, -1661, - -1979, 1179, -493, -2597, 1995, 284, -3300, -2213, 184, - 312, -1665, -641, -1325, -1276, 90, 69, 476, -778, - -1099, 853, 1515, 1630, 1188, -877, -1751, 702, 2983, - -201, 664, 4018, -352, -1864, 875, 2367, 813, -2463, - -702, 886, -2204, -2216, 399, -1729, -2408, 1412, -2757, - -3530, 449, -2554, -3910, 906, 697, -1696, 566, -1360, - -1991, 81, -1756, -159, 1180, -667, -584, -359, 183, - 1943, -412, -1747, 1659, 1961, 280, 294, 222, 2000, - 2076, 829, -43, -880, 3353, 3615, 1279, 1746, -1031, - 1301, 3477, -777, 2567, 1215, -2344, 3556, 561, -2166, - 1119, 2377, -391, -1825, -2359, 49, 1764, 391, -291, - 325, 1223, 1443, -624, -2828, 1381, 2438, 28, -652, - -166, 581, -2039, -374, -20, -2459, -1149, 1505, 2008, - -1798, -3848, -1796, -2208, -2224, -878, 728, -154, -534, - 1061, 538, -1465, 73, 1147, 82, -119, 3800, 4797, - -873, 784, 1458, -148, 3180, 1319, 908, 4951, 584, - -57, 2394, -967, 586, 405, -1601, 3566, -285, -3949, - -1301, -1953, -1223, -1831, -3477, -779, -389, -3169, -1828, - -1496, -1451, -556, -3327, -209, 534, -4908, 131, -386, - -5232, 1373, 2129, -1740, -1957, -1102, 76, 396, -1426, - -179, 1357, -3276, -1420, 3819, -44, 56, 2777, -1202, - 1908, 1410, 2031, 3495, -2197, -163, 1565, 239, 2803, - 480, -1636, 1180, 616, 1206, 1166, -1579, 1572, 814, - -774, 2310, 740, -2606, 1234, -603, -362, 1562, -2134, - 652, -777, -2353, 5464, 377, -2490, 1012, 157, 680, - -1389, -1898, 1135, -1, -1730, 1800, -1466, -1687, -1469, - -3250, -1081, 1381, -81, -204, -26, 353, 1941, 174, - 104, 2009, 1032, -871, 3280, 3398, -651, -154, 3309, - 1964, 448, 812, -17, 887, 2405, 3295, -54, -2396, - 1410, 1380, -1156, 296, -1706, -1729, 401, -970, -878, - -723, -2285, 1259, 1320, -1960, -1039, -211, -661, -763, - -1599, -43, 308, -1841, 72, -2075, -3010, -497, 506, - -377, 247, 1932, -1788, -2419, 257, 208, -2176, 488, - 2827, -1720, -1649, -619, 520, 1103, -1231, -1327, 2162, - 1535, -383, 315, -1488, -235, 1761, -27, -232, 515, - 127, -2239, 654, 2871, -379, -1274, 2445, 874, -2444, - 514, -206, -1289, 1314, 1869, 1316, 1878, -1454, -982, - 476, 359, 2084, -708, 405, -246, -1071, 1757, -866, - -2331, 783, 501, -853, 896, 36, -2468, -1138, 1445, - -613, -687, 1999, -449, -731, 1478, 384, -45, 96, - 1530, 1919, 186, -94, 1347, -329, -348, 1631, 574, - 1062, 735, -1652, 675, 244, 1241, 1137, -2469, 621, - 45, -612, 1308, -2015, -208, 2392, -1646, -67, 77, - -1558, 113, 1263, -236, -971, -333, -733, -555, 2024, - -135, -3817, -398, 1696, -1179, -1473, 1175, -166, 618, - 1132, -2504, -575, 146, -688, 1323, 150, -2021, 15, - 1673, 347, -1535, -106, 235, -32, 1167, -471, -503, - -1260, 416, -13, -1082, 1036, -790, -1676, 487, 985, - 77, 57, -1175, 1146, 2023, -1706, -404, 3249, -739, - -979, 3044, -514, -168, 2201, -2863, 1009, 1833, -2309, - 1565, 476, -1698, 1667, -496, -2193, 1686, 532, 336, - -1095, -1655, 578, -909, -1263, 2569, -2833, -1808, 2860, - -822, 27, 1098, -1371, 1585, -284, -1074, 2944, -764, - -2871, 2484, 1179, -1213, -670, -1226, 1112, 1837, -299, - -388, -51, 1, 992, -723, -361, 1723, -1115, -2012, - 1261, -9, -127, -510, -1550, 1448, 957, -1930, 171, - 776, -2104, 14, 764, -599, -745, -438, -371, -659, - 1075, 282, -3116, 684, 3747, 22, -2139, 816, 1413, - -333, 458, 906, 483, -1084, 797, 1039, -467, -377, - 1386, -1182, 610, 1787, -1354, -2800, 2638, 424, -2372, - 1153, -51, -689, 290, -2199, 818, 3755, -2674, -1689, - 3497, -507, -1978, 1729, 1413, 215, -76, 53, 759, - 371, -1529, 1005, -770, -685, 1754, -908, -653, 1047, - -1066, -784, -199, -526, 86, -1750, -916, 1839, 580, - -1884, 319, 226, -977, 212, 202, -741, -1013, 2057, - 69, -2961, 974, 1964, -512, -224, 1554, -79, -1142, - 1853, -71, 1009, 1174, -718, 2040, -158, -1508, 1042, - 0, -1219, 1212, 448, -208, -47, -779, -867, 1924, - -254, -1085, -221, -1283, 1543, -584, -951, 225, -1089, - -464, -853, -615, 1576, -2313, -1214, 950, -2548, -314, - 1201, -1527, 952, 764, -1915, 528, 169, -1676, 1742, - 425, -2346, 932, 290, 109, 492, -379, 932, 70, - 582, 135, 769, 1665, -1751, 576, 1013, 366, 2339, - 71, 637, 1500, 576, 111, 494, 765, 1170, 1421, - -5, -892, 2054, -640, 160, 1426, -651, 348, -841, - -558, 1563, 277, -408, -1468, 482, -1538, -2255, 968, - -1307, -454, 1306, -3085, -1680, 2624, -2191, -1719, 1891, - -3826, -1441, 2736, -3694, -266, 1897, -4468, 841, 2828, - -4060, -318, 2305, -1662, 528, 3056, -2429, -156, 2045, - -753, 475, 419, -597, 1100, 1845, 504, 1067, -402, - -824, 1807, 1192, 459, 200, 1728, 50, -497, 678, - -355, 938, 1239, -1223, 360, 1251, -95, 981, 1029, - -1940, 260, 1627, -2387, 3426, 519, -3141, 1822, -506, - -1471, 1101, -2137, 1069, 885, -2618, 1673, -463, -1558, - 1439, -386, -1923, 1538, -1313, -1735, 540, -1433, -915, - 494, -839, -1527, -1143, 480, -1081, 27, 1732, -1285, - -1833, 1952, -667, -1626, 1819, -1293, -1323, 2139, -376, - -1392, 1277, -1172, -240, 2907, -1875, -238, 2573, -1068, - -471, 2065, -686, -1315, 2575, 233, -1005, 1135, 706, - 534, 278, -182, 1091, -21, -222, 1413, -371, -54, - 1108, -103, 382, -70, 787, 894, -108, 1308, 1113, - -1412, 574, 1140, -2032, 500, 569, -1251, 951, -50, - -1398, 772, -474, -1536, 1297, 251, -2321, 109, -703, - -425, 40, -1354, -773, -225, -1743, -1839, 1244, 261, - -3082, -424, 1162, -937, 123, -322, -407, -561, -331, - 1369, -1142, -1050, 1024, 1116, -213, -752, 1521, -383, - -415, 1011, 947, -713, 743, 1945, -237, 881, 600, - -757, 885, -835, 756, 2454, -1985, 699, 1572, -1652, - 673, 232, -42, 1975, -736, -270, 1660, -704, -96, - 1264, -428, 278, 774, -954, -1325, 756, 1275, -594, - -353, 204, -1130, -782, -432, -979, 268, 378, 20, - -870, 405, -357, -1661, 637, 473, 293, -314, -895, - 3, -175, -1016, -643, 204, -588, -1007, -131, 401, - -849, -476, 271, 320, -198, 533, -25, -1994, 1421, - 525, -1611, 1261, 507, -488, 1093, 361, -1814, 2230, - 312, -196, 3242, -803, -962, 1714, -1479, 1426, 1612, - -1953, 1376, -581, -669, 1370, -1251, 426, 1274, -470, - 1757, 807, -589, 1275, 126, -871, 1025, -1331, 287, - 1258, -1813, 146, -839, -1471, 828, -402, -281, 1704, - -1341, -231, 939, -1035, -472, -197, -764, -380, -816, - -266, 382, -497, -1708, -591, 1119, -1941, 178, 969, - -1656, 685, 1004, -1114, -127, -1473, -678, 1610, -1253, - 277, 1807, -1642, -461, 2033, -1449, 392, 98, -157, - 1525, -860, 2455, 413, -2159, 2457, 475, -374, 1532, - -981, 843, 973, 324, 1168, 225, -407, 1487, 681, - -680, 1098, 117, 245, 1238, -223, 1076, -428, -466, - 2593, -663, -1225, 1303, -933, -561, 1190, -1071, -1229, - 406, -284, -13, 198, -1494, -637, 352, -1960, 420, - 49, -1472, -761, -234, -2213, -1750, -521, -1554, -813, - 662, -633, -1388, -15, -947, -391, -152, -894, 631, - -461, -885, 633, -51, -1063, 218, 1149, -61, -274, - 988, -140, 7, 1774, 1558, -623, 755, 1352, -511, - 1106, 744, 17, 2640, -91, 697, 1547, -1757, 1832, - 1859, -206, 1505, 575, -444, 556, 250, 1786, 792, - -125, -266, 407, 501, 798, -536, -1214, 58, 6, - 354, -685, 613, 99, -2022, -116, -236, -182, 263, - -824, -1187, -142, -138, -1228, -1008, 786, -1421, -1127, - -269, -2278, 841, 222, -2423, 678, -1153, -2082, 574, - -570, -729, 180, -777, 212, 270, -274, 1077, -493, - 118, 804, -1260, 349, 799, 545, 481, 971, 1099, - 1146, -273, 34, 1728, 1128, 411, 758, 308, -808, - 950, 1490, 209, -265, 1154, -11, -460, 2644, -122, - -728, 2033, -1100, -305, 1774, -208, -1567, -57, -140, - -670, -454, -1390, -80, 978, -438, -731, -684, 344, - -458, -199, -126, -1663, -883, 642, -1517, -1144, -375, - -422, -452, -1815, -791, 763, -1502, -205, 684, -1641, - 448, 1399, -2160, 804, 1088, -2214, 1030, 1585, -1093, - -11, 1718, -360, -81, 1294, 398, 218, 1225, 644, - 505, 2090, -385, 526, 2111, -303, -316, 1550, 1323, - -459, 881, 1874, -1256, 1429, 2485, -1003, -552, 14, - 432, 952, 471, -633, 408, -358, 140, 554, -1260, - -404, 245, -2572, 954, 1005, -1621, -82, -175, -957, - 112, 106, -1117, -819, -62, -785, 71, 93, -1296, - -1680, 242, -956, -2696, 302, -204, -1404, 254, -558, - -201, -630, 16, -436, -1647, 1649, -1096, -1267, 2273, - -1270, 20, 1749, -2509, 780, 942, -1859, 2762, 304, - -300, 2617, -947, 861, 2601, -1153, 754, 1629, -681, - 686, 1443, -235, 1900, 5, -565, 1559, 285, -170, - 757, 480, 547, 752, -427, 50, 839, -95, -791, - -1698, -291, -62, -1730, 524, 1008, -2176, -369, 165, - -749, -972, -287, 889, -1218, -1712, 833, -855, -995, - -14, -793, -1815, 605, -607, -1890, 769, -781, 230, - 1155, -2000, 876, 1835, -1617, 9, 1058, -1232, 859, - 1486, -1301, 1595, 501, -951, 2935, -921, -634, 2826, - -793, 655, 2660, -232, 235, 1879, 481, -51, 804, - 987, -360, -331, 2099, -302, -149, 1966, -1233, -12, - 1330, -2265, 1256, -116, -1394, 2937, -995, -1572, 2964, - -2257, -2587, 1820, -2132, -1609, 778, -1596, -486, 560, - -1749, 274, -706, -1714, 1304, -360, -2657, 1833, -750, - -1729, 433, -1461, -794, -1545, -892, 385, -891, -374, - 1261, -589, 235, 815, -773, -669, 636, -471, 136, - 871, -392, 782, 677, -472, 1130, 1029, -1262, 1070, - 2171, 575, 675, 600, 2104, 1077, -182, 2621, -604, - -30, 3302, -1331, 599, 742, 291, 1329, -551, 1043, - 1729, -1754, 1220, 1113, -2174, 1281, 743, -2027, 851, - -205, -1576, 214, -1629, -605, -394, -1508, -254, -63, - -489, -847, -26, -997, -1065, -120, -376, -1283, -1393, - 83, -212, -1610, 419, -1120, -590, 395, -1210, -21, - -273, -622, 899, -196, -1059, 1130, 616, -529, -166, - 794, 22, -216, 862, 664, -390, 980, 228, 789, - 182, 402, 2149, -1133, 799, 2637, -799, 176, 1306, - 905, -93, 677, 338, 121, 483, 297, 339, 347, - 249, 731, 40, 66, 112, -889, -128, 582, -1191, - -67, -1364, -233, 488, -1734, -634, 1517, -1657, -1015, - 594, -1422, 1396, -1357, -1617, 1254, -1596, -941, 789, - -1860, -77, 245, -327, 569, -723, 104, 905, -543, - -918, 1387, -42, -440, 619, 68, 45, 1364, -880, - 19, 1491, -561, 1174, 1403, -1411, 1351, 1222, -612, - 864, 877, -658, 382, 864, -552, 1286, 309, -105, - 1083, -170, -289, 1049, -248, -537, 625, -48, 337, - -385, 532, -315, -1398, 588, -628, -1192, 649, -806, - -170, 541, -2267, 1052, 274, -1970, 833, 253, -1345, - -290, -120, -959, -94, -189, -1397, -136, -155, -654, - 207, -706, 617, 415, -1962, 1169, 670, -1132, 319, - 297, -589, 100, 510, -620, 610, -153, -15, 1327, - -99, 229, 281, 169, 1015, -106, 1197, 577, -698, - 577, 931, -964, 1605, 505, -1713, 2369, 115, -1585, - 1839, 664, -1411, 867, 620, 329, 491, -1119, 420, - 266, -1708, 499, -69, -1037, 795, -321, -959, 32, - 235, -1748, 295, -249, -230, 485, -1185, -97, 489, - -2036, 711, 405, -2800, 593, 434, -1038, 536, 347, - -570, 705, -806, -290, 818, -999, 53, 1585, -756, - -657, 1180, 115, -364, 217, -226, 1033, 347, -20, - 611, 658, 590, -128, -451, 1676, -660, -21, 805, - -880, 1481, 412, -1534, 1522, 221, -132, 662, -407, - 613, 1132, -551, -187, 1184, -577, -444, 953, -1034, - -472, 461, -865, -99, 637, -572, 300, 450, -591, - 137, 404, -972, 306, -524, -1167, 433, 124, -1326, - -368, -305, -917, 452, -626, -695, 656, 258, -1401, - 270, 446, -1045, 636, -357, -1072, 913, 512, -1732, - 489, 952, -747, 58, 673, -453, 1125, -488, 46, - 1723, -1244, 417, 1803, -1215, 623, 659, -560, 676, - -9, 92, 701, 1100, -623, 142, 283, -512, 547, - 576, -525, -155, 1143, -1286, -329, 1959, -1302, -459, - 1188, -1199, 1020, -118, -1303, 956, -905, -647, 595, - -356, -1354, -74, 750, -791, -335, 56, -862, -36, - 276, -279, 46, -485, -181, 196, -584, -238, 259, - -314, -77, 383, 509, -386, -180, 859, -542, 955, - 372, -362, 1458, 113, -106, 1495, -534, 63, 1295, - -505, 846, 983, -1097, 1764, 320, -185, 1061, -525, - 115, 217, -328, 326, 312, 374, 179, -683, 485, - -1286, 147, -583, -979, 888, -504, -1235, 715, -1050, - -1111, 848, -828, -1043, -115, -327, 22, -451, -1008, - 98, -262, -545, -363, -48, -257, -731, 878, 96, - -1186, 426, 359, -1101, 1074, -267, 521, -375, -166, - 1398, -994, 780, 550, 124, -298, 581, 236, 305, - -111, 396, 741, -10, 662, 155, 271, 563, 65, - -318, 812, -483, 843, 75, -714, 1152, -26, -190, - -97, 533, -111, -564, 724, -24, -820, 835, -473, - -632, 154, -104, -932, 919, -606, -619, 496, -310, - -271, -360, 120, -630, 126, 65, -931, 548, -207, - -455, 410, -282, -931, 944, -354, 69, 412, -661, - 1068, -969, -443, 1894, -1281, -442, 2003, -1640, 713, - 852, -1344, 1338, -457, 243, 498, -697, -129, 993, - -388, -76, 1039, -768, 492, -104, -58, 951, -854, - 181, 1093, -1111, 491, 544, -1061, 118, 586, -477, - -411, 392, 233, 91, -908, 532, 218, -1176, 670, - -74, -674, 696, -801, 194, 592, -1790, 762, -564, - -791, 595, -145, -727, 228, 434, -246, -232, -169, - 281, -324, 289, -120, -270, -49, 282, 250, -56, - -405, 507, 27, -1060, 1329, -203, -204, 1677, -767, - -313, 1272, -968, 717, 183, -1652, 2157, -75, -1906, - 2590, -428, -1614, 2564, -1511, -240, 1421, -1911, 1420, - 396, -1397, 1691, -694, -1500, 1942, -823, -784, 841, - -635, 759, -447, 351, 44, -946, 227, 441, -564, - 155, -719, 182, 509, -320, -300, 205, -662, 726, - 469, -1240, 191, 664, -269, -152, -18, 214, -149, - -257, 347, 76, -79, -384, 874, -387, -269, 892, - -783, 537, 46, 27, 251, -332, 133, 377, -522, - 232, 626, -362, -499, 1112, -342, -522, 362, -187, - 547, -384, -155, 517, -551, 227, 651, -825, -88, - 579, -758, -40, 456, -774, 542, -164, -482, 968, - -1000, -394, 1094, -885, 431, 74, -348, 403, -959, - 831, -465, -330, 762, -717, -645, 1342, -499, -416, - 944, -417, -438, 737, -368, -42, 740, -1234, 689, - 29, -106, 619, -824, -10, 1047, -824, 146, -59, - 210, 163, -43, 522, -352, 213, 460, -1049, 599, - 308, -843, 632, 223, -504, 296, 530, -931, 751, - -176, -524, 379, 236, -626, 66, 662, -575, 191, - -175, -619, 660, -424, -217, 704, -498, 200, 62, - -543, 280, 91, -378, 54, 168, -554, 670, -215, - -1097, 1805, -1015, -617, 1642, -1560, 727, 61, 7, - -48, -659, 1308, -752, -613, 914, 160, -469, 164, - -167, 274, 326, -667, 497, 333, -757, 1252, -481, - -1257, 2019, -949, -719, 1676, -1078, 250, 323, -1100, - 1550, 145, -1697, 972, 522, -966, 374, -365, 846, - -276, -756, 629, -278, 302, -151, -243, -363, 841, - -7, -1092, 476, 45, 201, -378, -456, 1113, -926, - 97, 178, -240, 326, -597, 472, -10, -190, 394, - -501, -259, 307, 133, 240, -433, -192, 472, -190, - 12, 398, -191, -605, 1295, -576, -154, 474, -661, - 866, -968, 172, 887, -736, 36, 259, -201, 265, - 460, -859, 622, 102, -690, 776, -80, -745, 919, - 140, -750, 224, 134, -236, -196, 456, 409, -1069, - 600, 239, -306, -383, 541, -213, -323, -121, 700, - -735, 179, 222, -613, 653, -711, -81, 592, -694, - 117, 703, -772, -264, 644, -117, -422, 276, 64, - -355, -430, 800, -74, -619, 1207, -1057, 4, 960, - -1219, 977, -78, -1186, 1536, 267, -1388, 1144, -90, - -1052, 1889, -1255, -387, 1815, -1763, 1037, 421, -1003, - 767, -24, -277, -54, 759, -285, -1015, 1422, -581, - -121, 547, -687, 288, 440, -626, -623, 1261, -248, - -1133, 1204, -714, 382, 219, -851, 240, -161, 672, - -261, -855, 1043, -599, 111, -362, 225, 641, -913, - -122, 1075, -1165, 432, 131, -803, 978, 33, -1291, - 992, 224, -1054, 789, -121, -215, 262, -11, 89, - -174, 365, -240, 114, 406, -813, 291, 233, 158, - -377, 194, 216, -477, 635, -228, -512, 599, 23, - -273, 71, 258, 10, -155, -198, 354, 61, -749, - 768, -19, -709, 596, 97, -276, 164, 69, -144, - -20, 529, -897, 188, 480, -703, 836, -874, 259, - 917, -1044, -7, 566, -97, -439, 256, -466, 998, - -360, -1134, 1619, -762, -752, 1446, -707, -177, 652, - -899, 579, 253, -410, 146, -262, 275, 353, -610, - 52, 671, -862, 419, -140, 273, 247, -1062, 1005, - -175, -497, 772, -431, -101, 450, -598, 266, 428, - -842, 477, -11, -554, 642, 17, -787, 544, 445, - -625, -205, 796, -222, -733, 764, -572, 423, 166, - -994, 931, -228, -303, 362, -214, 104, 448, -1091, - 722, 570, -1311, 773, 259, -648, 477, 193, -682, - 302, 459, -464, -383, 1120, -561, -564, 1083, -372, - -354, 864, -586, -200, 502, -331, 27, 446, -657, - 281, 571, -888, 502, 251, -423, 116, 277, -263, - 118, -170, 168, 367, -723, 202, 438, -793, 451, - -30, -292, 202, 38, -188, -66, 221, -90, -105, - 7, 346, -578, 337, 247, -371, -14, 22, 36, - 151, -322, -244, 692, -556, -5, 550, -560, 200, - 161, -347, 191, 258, -520, 441, -212, -215, 584, - -428, -251, 213, 90, -187, 109, 138, -211, -17, - 191, 111, -259, 161, -141, 232, -175, 0, 154, - -369, 539, -171, -438, 484, 43, -375, -37, 249, - 196, -328, -106, 541, -531, 103, 240, -191, 186, - -363, 40, 585, -573, 258, 170, -593, 515, -261, - -86, 407, -339, 164, -214, -34, 464, -377, -206, - 336, -230, 239, -85, -69, 322, -503, 322, 142, - -748, 867, -160, -753, 836, -249, -362, 750, -374, - -222, 448, -82, -246, 399, 13, -429, 441, -47, - -127, -29, 337, -502, 318, 132, -457, 498, -145, - -91, 98, 208, -179, 54, 62, -260, 237, 96, - -161, 32, -150, 93, 21, -31, 74, 75, -322, - 164, 168, -191, 119, -121, -66, -195, 296, -128, - -251, 381, -56, -338, 281, -29, -472, 664, -301, - -275, 423, -285, -77, 258, -82, -139, 160, -54, - -26, 27, 75, -49, -196, 305, -131, -187, 262, - -37, -206, 65, 269, -240, -144, 261, 54, -338, - 355, 3, -503, 535, -253, -210, 433, -290, -33, - 381, -546, 173, 252, -364, 271, -329, 166, 266, - -564, 507, -32, -648, 861, -400, -357, 819, -519, - -74, 392, -423, 426, -306, -93, 691, -991, 537, - 467, -992, 614, 426, -823, 491, 182, -371, 174, - 84, -64, 98, -96, 23, 182, -69, -211, 226, - 18, -134, 334, -514, 352, 378, -623, 363, 266, - -592, 493, -46, -369, 594, -440, -10, 295, -368, - 326, -192, -140, 306, -305, 140, 198, -396, 202, - 154, -341, 208, -8, -169, -76, 106, 20, -347, - 233, 30, -193, 117, -9, -165, 182, -4, -195, - 96, 131, -188, -106, 166, -71, -99, 57, 4, - -31, -131, 101, 63, -199, 225, -25, -281, 342, - -247, -170, 516, -289, -263, 422, -158, -148, 363, - -192, -138, 122, 62, -105, 7, 194, -53, -224, - 83, 173, -182, 20, 178, -274, 182, 74, -109, - -5, 319, -303, -72, 428, -371, 50, 271, -204, - 17, 161, -256, 169, 93, -169, 94, -89, 139, - 80, -199, 325, -67, -83, 202, -154, 16, 202, - -325, 162, 61, -93, 201, -278, 236, 108, -477, - 594, -145, -370, 647, -261, -356, 669, -369, -181, - 420, -266, -154, 159, -25, 53, -40, -22, 68, - -203, 144, -2, -173, 88, -3, -62, 2, 75, - 55, -95, -130, 219, -142, -191, 164, -170, 44, - 0, -246, 249, -27, -413, 461, 27, -490, 292, - 19, -145, 13, 99, 91, -466, 209, 295, -773, - 465, 210, -680, 410, 163, -358, 399, -201, 87, - 23, -212, 270, -230, 86, 159, -353, 381, -73, - -456, 726, -353, -357, 754, -367, -344, 657, -59, - -417, 432, 35, -309, 153, 97, -69, 89, -101, - 63, 107, -127, 106, 112, -26, -236, 376, 43, - -479, 544, -57, -407, 447, -148, -103, 195, -198, - 80, 156, -228, 35, 145, -77, -55, 130, -33, - -190, 123, 41, -170, 74, 114, -241, 67, 192, - -195, -76, 186, -136, -133, 213, -105, -110, 144, - -51, -126, 154, -59, -124, 147, -49, -132, 82, - 26, -130, 63, 68, -211, 97, 131, -224, 59, - 184, -250, 59, 205, -225, -67, 163, -135, -24, - 74, -22, -4, -81, 21, 71, -137, 71, 47, - -120, 71, 34, -65, 138, -6, -116, 112, -47, - -39, 20, -75, 64, -7, 2, 35, 52, -61, - -29, 81, -61, -30, 195, -91, -136, 261, -11, - -186, 162, -86, -35, 152, -106, -32, 126, -4, - 49, 33, -9, -11, 46, 111, -132, -3, 204, - -175, -10, 281, -146, -94, 226, -126, -36, 58, - -14, 61, -172, 48, 193, -221, 83, 149, -279, - 195, 130, -357, 226, 102, -260, 191, 16, -223, - 124, 14, -144, 90, -31, -81, -66, 54, 103, - -181, 29, 174, -281, 92, 81, -226, 139, -133, - -41, 167, -147, 44, 27, -132, 107, -34, -122, - 105, -54, 17, 52, -131, 138, 33, -206, 158, - 43, -80, 24, 10, -27, 33, 43, -71, 15, - 71, -42, 14, 18, 0, -3, -14, -14, 58, - 46, -99, 122, 105, -202, 125, 119, -238, 112, - 133, -242, 113, 129, -301, 52, 161, -177, 82, - 73, -139, 46, 122, -119, 22, 155, -230, 23, - 242, -211, -12, 182, -184, -57, 190, -34, -101, - 58, -20, 6, 103, -61, -78, 12, 18, 12, - 86, -71, -27, 43, -24, 8, 39, -109, 21, - -4, -44, 66, 13, -59, 61, -39, 35, 113, - -179, 19, 171, -158, 14, 112, -133, 26, 9, - -43, -9, 6, 41, -77, 22, 80, -61, -63, - 65, -32, -32, 125, -105, -11, 114, -120, 42, - 42, -92, 45, -56, -25, 131, -83, -24, 97, - -51, -5, 67, -69, 7, 41, -27, 8, 3, - -10, 8, -3, -87, -28, 122, -33, -58, 124, - -53, -50, 67, -115, -17, 111, -112, -30, 101, - -24, -13, 41, 3, 45, -13, -34, 23, 23, - -19, 13, -49, -49, 68, -68, -32, 91, -58, - -18, 73, -19, -27, 17, -33, -35, 99, -38, - -99, 78, -31, -62, 95, -71, -124, 184, -15, - -146, 160, -27, -109, 140, -25, -63, 84, -34, - -18, 58, -68, -16, 22, -87, 86, 23, -130, - 61, 62, -132, 51, 168, -139, 35, 133, -121, - 50, 102, -120, 40, 126, -87, -40, 119, -14, - -59, 78, 11, -68, 41, 24, -25, 55, -2, - 15, 21, -73, 56, 88, -74, -41, 4, -10, - -4, 5, 7, -39, -3, -4, -39, 94, 52, - -135, 42, 90, -86, 12, 21, -55, -70, -37, - 55, -63, -35, 50, -100, 21, 84, -151, 24, - 87, -94, 51, 2, -58, 104, -61, -70, 60, - -25, -42, -31, 55, 35, -129, 47, 69, -65, - 77, 2, -60, 110, -32, -69, 84, -54, -26, - 98, -28, -7, 49, -49, -19, 119, -11, -157, - 20, 106, 29, -8, -38, -30, 72, 30, -3, - 1, -32, -11, -9, 52, 46, -144, -38, 86, - -31, -9, -42, -75, 142, 34, -64, 79, -109, - -55, 195, -69, -80, 48, -49, 62, 25, -111, - -42, 52, 19, -41, 1, -16, -33, 44, 30, - -21, 17, -2, -30, 111, 34, -111, 83, 55, - -119, 66, 62, -89, 63, -39, -143, 168, 21, - -158, 158, 32, -132, 134, -3, -77, 88, -45, - -18, 117, -51, -71, 10, 30, 35, -27, -63, - 13, 34, 23, -23, 19, -4, -92, 34, 74, - -69, -15, 20, -36, 56, -36, -96, 69, -34, - -122, 32, 31, -51, -3, -21, 4, 43, -44, - 6, 81, -39, -35, 26, -38, -24, 29, -16, - -47, -6, 19, -7, -9, 41, 32, 13, -2, - -21, 3, 24, 49, -3, -66, 14, 95, -7, - -52, 80, 68, -72, -14, 39, 2, 24, -6, - -53, 86, 21, -78, 67, 28, -34, 16, -23, - -1, 70, -3, -58, 45, 33, -94, -34, 62, - 41, -11, -27, 27, 46, 14, -33, -12, 44, - -16, -59, 6, 45, -3, -42, 2, 13, 19, - -1, -71, 3, 42, -36, 6, 17, 26, 5, - -46, 6, -68, -75, 86, -20, -90, 80, 4, - -86, 5, 2, -33, -15, -2, -8, -18, 15, - -7, -25, 27, -28, -88, 39, -2, -85, 58, - 40, -45, 3, 17, 0, 11, -4, -3, 84, - 22, -113, 8, 94, 10, 9, 28, 6, -3, - 5, -2, 23, 23, -1, -40, 20, 48, -40, - -21, 72, 7, -40, -1, 27, 16, 30, 31, - -16, 11, 9, -71, -7, 62, 21, -61, -19, - 78, -2, -22, 67, -42, -12, 75, -79, 47, - 86, -124, -42, 21, 4, 23, -32, -7, 19, - 1, -13, -46, 2, 32, -43, -7, 86, -16, - -22, 46, -61, -35, 11, -64, -38, 17, -12, - -27, 20, 41, 6, -58, -61, 58, -51, -77, - 36, -25, 19, 93, -76, 1, 72, -92, 15, - 40, -56, 65, 13, -29, 82, -9, -21, 24, - -83, -5, 4, -63, 77, 80, -58, -6, -19, - -43, 100, 5, -36, 63, 33, -26, -48, 26, - -18, -75, 34, 24, -45, -1, 6, -35, -24, - -23, -22, 47, -15, -46, 31, -40, -41, 74, - -32, -73, 59, -51, -26, 143, -29, -42, 93, - -44, -21, 56, -7, 55, 51, -61, 74, 111, - -71, 35, 124, -123, -3, 62, -79, 100, 49, - -122, 143, 79, -137, 72, 30, -82, 75, -10, - -48, 35, -23, -25, 34, 0, -54, -6, 34, - -46, -59, -7, -72, -6, 70, -41, -39, 23, - -33, 11, 104, -44, -30, 54, -69, -20, 62, - -75, 1, 45, -69, 1, 40, -59, -15, 18, - -16, 38, -1, -52, 8, 14, -32, 11, -15, - -58, 18, -22, -44, 69, 40, -50, -21, 1, - -35, -3, -5, -20, 40, 36, -41, -36, -43, - -11, 48, -34, -40, 51, -10, -9, 30, 10, - 12, 51, 51, -8, -16, 32, -6, 31, 24, - -38, 43, 18, -15, 53, -10, -55, 9, 8, - -28, 21, 10, -26, 21, 10, -9, 5, -29, - -13, 38, -1, -11, 49, 0, -41, 10, 23, - -25, -35, -2, -32, -10, 58, -6, -18, 16, - -9, 4, 11, 17, 21, 21, 12, -2, 49, - -16, -128, 21, 75, -32, 22, 34, -59, 48, - 75, -69, -11, -2, -65, 39, 57, -54, -79, - -11, -20, -13, 38, 4, -9, -22, -22, 33, - -7, -52, 10, -10, -19, 54, 47, -21, -35, - -6, -4, 11, 8, -28, 1, 8, -4, 30, - 1, -22, 26, -7, -24, 56, 25, -45, 13, - 24, -32, 13, 22, -46, -2, 15, -39, 28, - 32, -69, 0, 27, -69, 0, 39, -40, 28, - 55, -27, -13, 0, -14, 37, 25, -25, 34, - -3, -69, 26, 39, -41, -6, 29, -7, 5, - 66, 41, -27, -17, 6, -14, -21, 0, 29, - -9, -26, 32, -5, -34, 60, 15, -60, 20, - 13, 11, 43, -48, -15, 88, -13, -55, 26, - -32, -46, 35, 14, -37, -11, 12, -20, 11, - 9, -64, -16, 17, 5, 38, 7, -30, -9, - -49, -11, 52, -15, -38, -27, -12, 36, 53, - 1, -37, -17, -12, 0, 31, 1, 13, 40, - -15, 2, 47, -15, -17, 28, -2, -4, 25, - -6, -12, 2, -17, -9, 5, -15, 17, 21, - -28, 0, 15, -43, -63, -6, -14, -8, 37, - -34, -40, 30, -12, -14, 37, -13, -16, 26, - -15, -2, 13, -37, -13, 32, 13, -8, -2, - -12, -8, 9, 9, -3, 4, 13, 34, -2, - -22, 40, 19, 29, 25, -48, -17, 23, 17, - 7, 3, 0, 12, 37, -1, -25, 30, 41, - -7, 7, 29, -31, -31, -23, -27, 5, 2, - -18, -2, 22, 9, -6, 5, -7, -24, 9, - 0, -28, 19, 61, -11, -45, 21, -28, -65, - 28, 33, -44, -27, -6, -26, -8, 4, 5, - 9, -10, -46, -20, 20, -7, -7, -33, -26, - 50, 9, -65, -22, -3, -20, 15, 21, 20, - 24, -16, -27, -13, 14, 21, -38, -48, 9, - 35, 28, 21, 3, -31, -8, 57, 32, -35, - -22, 20, 14, 12, 28, 39, 0, -18, 44, - -2, -17, 53, 0, -27, 33, 43, 5, -10, - 25, 47, -3, -4, 36, 15, -12, -3, 29, - 41, 23, 23, -8, -32, 15, 37, 0, 3, - 22, 31, 1, -20, 27, 2, -50, 0, 33, - 16, -16, -17, 18, -26, -34, 31, -27, -84, - -33, 4, -5, -22, -17, -28, -66, -24, 8, - -16, -25, -51, -13, 45, -11, -49, -26, -49, - -38, 21, 10, -52, -58, -19, -4, 9, -31, - -29, 55, 2, -45, 29, 10, -22, 49, 33, - -27, -19, -5, 30, 47, 11, -11, -2, 8, - 5, 17, 8, 3, 57, 63, 28, 24, 11, - 2, 14, 22, 7, 7, 2, 23, 33, -2, - -8, 14, 7, 20, 57, 32, -5, 12, 23, - 10, 17, 26, -18, -72, -6, 74, 61, 13, - -17, -21, -7, 29, 45, 5, -52, -49, 1, - 10, 35, 40, -46, -66, 7, 31, -27, -44, - -12, -41, -22, 32, -12, -32, -3, -17, -22, - -22, -31, -30, -23, -13, 3, 0, -21, -19, - -7, -17, -9, 18, -40, -64, 1, 4, -4, - 8, -17, -28, -1, 9, -7, -9, 27, 6, - -63, -32, 52, 25, -46, -23, -6, -11, 35, - 29, -50, -44, 17, -6, -12, 53, 28, -17, - -9, 28, 34, -20, -18, 22, 43, 28, -6, - 8, 14, 19, 28, 14, 27, 26, 12, 76, - 66, -18, -2, 18, -12, -1, -2, -1, 51, - 30, -18, 5, 14, -12, 2, 13, -25, -9, - 32, 7, -5, 15, -12, -33, -18, -13, 6, - 0, -25, -12, 1, -17, 0, 13, -24, -27, - 4, 35, 14, -22, 5, 13, -18, -30, -10, - -7, -7, 31, 23, -27, -26, 9, 47, 6, - -50, -11, 19, 1, 11, 12, -19, -43, -18, - 10, -6, -3, 12, 2, -12, -16, 10, 9, - -25, -21, -10, -13, 0, 8, -1, -9, 10, - 4, -34, 14, 46, 5, 18, 24, -15, -7, - 20, -1, -13, 7, 11, 14, 11, -2, 8, - 27, 10, -1, 13, -2, -7, 48, 44, -15, - -16, -6, 3, 7, -35, -25, 8, -31, -16, - 30, 36, 22, -13, -21, -10, 8, 2, -58, - -37, 32, 25, -1, -25, -21, 3, 3, -6, - -11, -3, 2, 4, 34, 22, -25, -19, 0, - -6, -10, -8, -35, -32, 8, -3, -20, -11, - -6, 3, 8, -8, 3, 25, 23, -7, -35, - -15, 8, -20, -6, 15, -44, -29, 19, -5, - -1, 18, 28, 6, -21, 9, 11, -20, -10, - 18, 22, 6, -2, 12, 6, 23, 34, -20, - -19, 1, -10, 34, 41, 13, 6, 3, 22, - 11, -4, 4, -12, -8, 17, 18, 12, -1, - 5, 9, -6, -2, 4, 1, 3, 2, -6, - -32, -25, 9, 18, 27, -4, -54, -29, 2, - -3, -18, -38, -28, -10, 9, 20, 5, -9, - -15, -3, 2, -14, -15, -6, 5, 10, 6, - 3, -11, -9, -5, -20, -13, 8, 3, -14, - 6, 20, -15, -21, 9, 19, 21, 12, -4, - -21, -17, 16, 27, -4, -28, -2, 26, 9, - -12, -16, -28, -28, -4, 4, -15, -9, 3, - -10, -16, 2, 17, -10, -26, 3, 16, 26, - 17, -12, -9, 2, -2, -5, -11, 5, 28, - 1, -14, 13, 14, 5, 18, 6, -17, -5, - 7, 2, -3, 11, 10, -1, 50, 36, -28, - 21, 39, -9, -6, 2, 10, 36, 20, -2, - -3, -11, -10, -6, -5, -4, -8, 2, 17, - 1, -13, 11, -13, -36, 11, 14, -19, -6, - 3, 0, 20, -5, -24, 12, 7, -11, 2, - -15, -28, -1, 6, -14, -31, -39, -19, 19, - 37, 3, -32, -27, -6, 13, 31, 15, -41, - -41, 25, 35, -3, -16, -25, -19, -10, -3, - 19, 10, -4, 7, -4, -19, -12, -13, -9, - 6, 2, -12, -6, 12, 6, -1, -5, -19, - -7, 7, 40, 56, -3, -13, 21, 24, 7, - -11, -9, -3, 24, 28, -10, 1, 12, 21, - 24, -16, -15, 4, -7, -2, 19, 13, -11, - -7, -8, 15, 41, 5, -16, -18, -11, 26, - 26, -5, -12, -14, -6, 10, 8, -8, -16, - -16, -3, 10, 1, -3, -3, -2, -15, -18, - 6, -4, -4, 21, 4, -2, 15, 13, 0, - -2, 12, 7, -15, -9, 1, -2, 2, -1, - -9, -15, -17, -14, -10, 1, -4, -16, -17, - -1, 18, 8, 1, 22, 11, -19, -10, 4, - -23, -29, 0, -2, -14, -6, 13, 7, -23, - -13, 10, 9, 11, 10, 4, -4, -4, 1, - 6, 14, 9, 2, 0, 2, 6, 4, -9, - -18, -8, 8, 18, 8, 13, 9, -27, -22, - -10, -24, -9, 17, 11, 2, 9, 3, -13, - -10, -1, -7, -1, 10, -4, 1, 16, 12, - -6, -14, -2, -5, -1, 0, -1, 6, -9, - -3, 12, 4, 1, -2, 2, 17, 24, 22, - 9, 8, 21, 14, -2, -2, 4, -1, -7, - -7, -6, -1, -6, 17, 30, -7, -10, -3, - -19, -18, 2, 21, 4, -20, -6, -1, -18, - -14, -6, -7, -1, 6, 10, 8, -5, 0, - 10, -22, -40, -22, 4, 34, 16, -19, -16, - -12, -17, -16, -17, -29, -28, -4, 10, 16, - 22, 13, 4, -1, -5, 16, 15, -11, -6, - 9, 3, -14, -22, -19, -12, 5, -5, -15, - 3, 9, 27, 17, -4, 8, -2, 1, 16, - 11, 9, 9, 8, -14, -16, 7, -5, -15, - -11, -5, 19, 25, 25, 43, 21, -9, -9, - -19, -10, 14, -11, -19, 8, 3, 1, 11, - -1, -24, -20, -1, 2, 7, 24, 22, 11, - 8, 6, -2, -11, -3, -2, -4, 0, -7, - 0, 6, -1, -16, -35, -8, 8, -11, -6, - 6, 18, 16, 7, 12, 5, -2, -3, -10, - -21, -27, -10, -3, -3, 8, 0, -9, -10, - -3, 0, -5, 6, 9, 19, 23, 8, -5, - -19, -16, -5, -6, -27, -22, 1, 6, 8, - 2, -9, -13, -15, -18, -13, 4, 25, 29, - 26, -2, -22, 1, 8, 1, -6, -6, -7, - -20, 0, 13, -14, -24, -24, -21, 2, 14, - 16, 23, 15, 10, 10, 5, 0, -26, -32, - 3, 19, 5, -8, -7, -8, -3, 17, 27, - -7, -28, 10, 32, 10, 1, 10, 3, -4, - 22, 24, -31, -40, 0, 6, 5, 17, 17, - 1, 10, 30, 8, -12, -6, 9, 6, -12, - -5, 1, -4, 6, 11, 0, -9, -4, -3, - -4, -3, 2, 0, -2, -9, -27, -23, 2, - 13, -6, -9, -3, -12, -2, 10, 6, -7, - -19, -31, -13, 16, 11, -3, -13, -15, 0, - 7, -3, -7, -1, -4, 7, 15, 0, -12, - -8, -1, -7, -12, -21, -17, 5, 30, 25, - -6, -6, 0, -12, -8, 2, 13, 11, 1, - 5, 4, 4, 10, -1, -20, -12, -4, 3, - 15, 11, -7, -24, -4, 8, -2, -14, -25, - -17, 7, 21, 14, 1, 0, 12, 17, 13, - 6, 1, 6, 14, 11, -10, -21, -12, -4, - 3, -2, -21, -24, -2, 12, 14, 17, 4, - -2, 11, 11, 11, 1, -34, -32, -5, 10, - 7, -11, -12, 6, 7, -4, -10, -15, -5, - 17, 21, 0, -15, -15, -1, 5, -18, -18, - -10, -9, 24, 27, -9, -14, 0, 9, 25, - 22, 1, -7, -2, 16, 13, -14, -10, 7, - 0, 2, 15, 2, -9, 5, 10, -5, -3, - 10, 3, 0, 15, 15, -1, -3, 8, 6, - -7, -7, 2, 0, -4, 5, -8, -37, -28, - -1, 8, 6, 10, -1, -12, 12, 28, 8, - -17, -16, -15, -17, 1, 6, -4, -8, -4, - -15, -15, 6, -9, -15, 10, 9, -13, -8, - 5, -2, -10, 5, 12, -27, -33, 9, 8, - -16, -3, 16, -3, -7, 22, 22, 10, 5, - -11, -16, -4, 9, 12, 6, -3, 2, 2, - -1, 4, -7, -8, 1, 8, 19, -}; diff --git a/examples/micro_speech/yes_1000ms_sample_data.h b/examples/micro_speech/yes_1000ms_sample_data.h deleted file mode 100644 index 5d09866..0000000 --- a/examples/micro_speech/yes_1000ms_sample_data.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This data was created from the PCM data in a WAV file held in v2 of the -// Speech Commands test dataset, at the path: -// speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav -// This should contain all 16,000 samples from the one-second file. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_1000MS_SAMPLE_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_1000MS_SAMPLE_DATA_H_ - -#include - -extern const int g_yes_1000ms_sample_data_size; -extern const int16_t g_yes_1000ms_sample_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_1000MS_SAMPLE_DATA_H_ diff --git a/examples/micro_speech/yes_30ms_sample_data.cpp b/examples/micro_speech/yes_30ms_sample_data.cpp deleted file mode 100644 index 56eafa0..0000000 --- a/examples/micro_speech/yes_30ms_sample_data.cpp +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "yes_30ms_sample_data.h" - -const int g_yes_30ms_sample_data_size = 480; -const int16_t g_yes_30ms_sample_data[480] = { - -876, -470, 510, 803, 170, -787, -1568, -1893, -1598, -1027, - -992, -1803, -2610, -2484, -1905, -2113, -3113, -3399, -2267, -1261, - -2007, -3637, -3909, -2340, -893, -1158, -2272, -2486, -1639, -915, - -777, -596, -91, 196, 85, 210, 875, 1373, 1247, 1219, - 1958, 2718, 2328, 1196, 1008, 2350, 3677, 3269, 1503, 366, - 922, 2264, 2810, 1996, 608, -168, 75, 680, 811, 395, - -56, -318, -607, -966, -1108, -925, -613, -368, -369, -919, - -1926, -2460, -1685, -300, 155, -611, -1524, -2204, -3227, -3859, - -2037, 1622, 2382, -2583, -8448, -7544, -84, 4814, 915, -6423, - -7558, -1746, 2515, -59, -4587, -3858, 1260, 3625, 187, -4148, - -3500, 1542, 5467, 4780, 1256, -1127, -403, 2481, 5332, 6346, - 5014, 2536, 1216, 2467, 5039, 6238, 5070, 3381, 3269, 4173, - 3905, 2248, 1586, 3299, 5240, 4362, 1004, -1382, -489, 2113, - 3168, 1620, -742, -1824, -1435, -897, -1058, -1500, -1545, -1398, - -1965, -3266, -4136, -3756, -2609, -1804, -1986, -3087, -4599, -5296, - -4051, -1731, -781, -2228, -4092, -3977, -2325, -1353, -1568, -1490, - -428, 178, -672, -1650, -1058, 749, 2039, 2079, 1540, 897, - 310, 572, 2266, 4265, 4265, 1869, -231, 559, 3332, 4752, - 3229, 768, 101, 1364, 2463, 1984, 819, 411, 723, 675, - -162, -923, -743, -32, 185, -516, -1653, -2359, -2103, -986, - 42, -205, -1702, -2870, -2337, -809, -221, -982, -1544, -946, - -598, -2117, -4291, -4100, -857, 1948, 338, -4799, -7972, -5403, - 173, 2371, -1063, -5533, -5578, -1777, 605, -985, -3249, -2213, - 1184, 2691, 560, -2356, -2288, 1233, 5244, 6441, 4004, 370, - -663, 2555, 7404, 9282, 6573, 2612, 1836, 4662, 7467, 7393, - 5421, 4262, 4741, 5362, 4705, 3163, 2397, 3337, 4887, 4810, - 2254, -749, -1316, 772, 2706, 2016, -573, -2552, -2746, -2012, - -1647, -1978, -2579, -3105, -3473, -3911, -4484, -4891, -4795, -4163, - -3543, -3538, -4275, -5356, -5743, -4637, -2614, -1301, -1825, -3341, - -4011, -2937, -751, 1007, 1245, 235, -639, -61, 1626, 2864, - 2967, 2734, 3013, 3329, 2914, 2312, 2666, 3839, 4308, 3162, - 1453, 768, 1255, 1887, 2006, 1715, 1031, -297, -1660, -1690, - -277, 813, -30, -2137, -3370, -2854, -1553, -593, -413, -1146, - -2567, -3440, -2369, -205, 379, -1258, -2315, -812, 262, -3205, - -8576, -7894, 738, 7492, 1951, -11595, -17098, -6934, 7139, 8065, - -4575, -14199, -8946, 3606, 7504, -547, -8242, -5113, 4406, 8113, - 2134, -5040, -4089, 4157, 10934, 10158, 4167, -565, -192, 4428, - 9765, 12201, 9861, 4512, 1225, 3451, 8483, 10133, 6497, 2574, - 3333, 6806, 6986, 2487, -1214, 623, 5416, 6647, 2204, -3289, - -4556, -1565, 1544, 1525, -1236, -4293, -5695, -5174, -3995, -3403, - -3449, -3750, -4505, -6014, -7296, -6523, -3849, -2096, -3288, -5722, - -6004, -3581, -1497, -1960, -3330, -2800, -434, 964, -111, -1739, - -1136, 1736, 4151, 3736, 1274, -451, 469, 3386, 5833, 5898, - 3646, 1085, 272, 1743, 4061, 5108, 3837, 1490, 246, 967, - 1866, 859, -1069, -974, 1542, 2835, 47, -4285, -5068, -1567, - 1781, 1223, -1997, -4227, -3747, -1720, 41, 245, -1228, -2972, - -2673, 22, 1980, -930, -7721, -11271, -5725, 4974, 8484, -2007, - -16979, -19255, -4670, 11057, 9690, -6417, -17537, -10841, 4262, 9292, -}; diff --git a/examples/micro_speech/yes_30ms_sample_data.h b/examples/micro_speech/yes_30ms_sample_data.h deleted file mode 100644 index cfe201a..0000000 --- a/examples/micro_speech/yes_30ms_sample_data.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This data was created from the PCM data in a WAV file held in v2 of the -// Speech Commands test dataset, at the path: -// speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav -// The data was extracted starting at an offset of 8,000, which corresponds to -// the 26th spectrogram slice. It's designed to be used to test the -// preprocessing pipeline, to ensure that the expected spectrogram slice is -// produced given this input. - -#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_ -#define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_ - -#include - -extern const int g_yes_30ms_sample_data_size; -extern const int16_t g_yes_30ms_sample_data[]; - -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_ diff --git a/examples/person_detection/CMakeLists.txt b/examples/person_detection/CMakeLists.txt index 0ee8c72..5b5fc35 100644 --- a/examples/person_detection/CMakeLists.txt +++ b/examples/person_detection/CMakeLists.txt @@ -6,15 +6,15 @@ set(CMAKE_C_STANDARD 11) set(CMAKE_CXX_STANDARD 11) -add_executable(detection_responder_test_int8 "") +add_executable(detection_responder_test "") -target_include_directories(detection_responder_test_int8 +target_include_directories(detection_responder_test PRIVATE ${CMAKE_CURRENT_LIST_DIR}/. ) set_target_properties( - detection_responder_test_int8 + detection_responder_test PROPERTIES COMPILE_FLAGS -fno-rtti COMPILE_FLAGS -fno-exceptions @@ -22,7 +22,7 @@ set_target_properties( COMPILE_FLAGS -nostdlib ) -target_sources(detection_responder_test_int8 +target_sources(detection_responder_test PRIVATE ${CMAKE_CURRENT_LIST_DIR}/detection_responder.cpp ${CMAKE_CURRENT_LIST_DIR}/detection_responder_test.cpp @@ -30,12 +30,15 @@ target_sources(detection_responder_test_int8 ) target_link_libraries( - detection_responder_test_int8 + detection_responder_test pico-tflmicro hardware_pwm ) -pico_add_extra_outputs(detection_responder_test_int8) +pico_enable_stdio_usb(detection_responder_test 1) +pico_enable_stdio_uart(detection_responder_test 0) + +pico_add_extra_outputs(detection_responder_test) add_executable(person_detection_benchmark "") @@ -57,9 +60,9 @@ set_target_properties( target_sources(person_detection_benchmark PRIVATE ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model_int8/no_person_image_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_detect_model_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_image_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model/no_person_image_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model/person_detect_model_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model/person_image_data.cpp ${CMAKE_CURRENT_LIST_DIR}/person_detect_model_data.h ) @@ -69,18 +72,21 @@ target_link_libraries( hardware_pwm ) +pico_enable_stdio_usb(person_detection_benchmark 1) +pico_enable_stdio_uart(person_detection_benchmark 0) + pico_add_extra_outputs(person_detection_benchmark) -add_executable(person_detection_test_int8 "") +add_executable(person_detection_test "") -target_include_directories(person_detection_test_int8 +target_include_directories(person_detection_test PRIVATE ${CMAKE_CURRENT_LIST_DIR}/. ) set_target_properties( - person_detection_test_int8 + person_detection_test PROPERTIES COMPILE_FLAGS -fno-rtti COMPILE_FLAGS -fno-exceptions @@ -88,13 +94,13 @@ set_target_properties( COMPILE_FLAGS -nostdlib ) -target_sources(person_detection_test_int8 +target_sources(person_detection_test PRIVATE ${CMAKE_CURRENT_LIST_DIR}/model_settings.cpp ${CMAKE_CURRENT_LIST_DIR}/person_detection_test.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model_int8/no_person_image_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_detect_model_data.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_image_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model/no_person_image_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model/person_detect_model_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model/person_image_data.cpp ${CMAKE_CURRENT_LIST_DIR}/model_settings.h ${CMAKE_CURRENT_LIST_DIR}/no_person_image_data.h ${CMAKE_CURRENT_LIST_DIR}/person_detect_model_data.h @@ -102,23 +108,26 @@ target_sources(person_detection_test_int8 ) target_link_libraries( - person_detection_test_int8 + person_detection_test pico-tflmicro hardware_pwm ) -pico_add_extra_outputs(person_detection_test_int8) +pico_enable_stdio_usb(person_detection_test 1) +pico_enable_stdio_uart(person_detection_test 0) + +pico_add_extra_outputs(person_detection_test) -add_executable(person_detection_int8 "") +add_executable(person_detection "") -target_include_directories(person_detection_int8 +target_include_directories(person_detection PRIVATE ${CMAKE_CURRENT_LIST_DIR}/. ) set_target_properties( - person_detection_int8 + person_detection PROPERTIES COMPILE_FLAGS -fno-rtti COMPILE_FLAGS -fno-exceptions @@ -126,14 +135,14 @@ set_target_properties( COMPILE_FLAGS -nostdlib ) -target_sources(person_detection_int8 +target_sources(person_detection PRIVATE ${CMAKE_CURRENT_LIST_DIR}/detection_responder.cpp ${CMAKE_CURRENT_LIST_DIR}/image_provider.cpp ${CMAKE_CURRENT_LIST_DIR}/main.cpp ${CMAKE_CURRENT_LIST_DIR}/main_functions.cpp ${CMAKE_CURRENT_LIST_DIR}/model_settings.cpp - ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_detect_model_data.cpp + ${CMAKE_CURRENT_LIST_DIR}/tensorflow/lite/micro/tools/make/downloads/person_model/person_detect_model_data.cpp ${CMAKE_CURRENT_LIST_DIR}/detection_responder.h ${CMAKE_CURRENT_LIST_DIR}/image_provider.h ${CMAKE_CURRENT_LIST_DIR}/main_functions.h @@ -142,23 +151,26 @@ target_sources(person_detection_int8 ) target_link_libraries( - person_detection_int8 + person_detection pico-tflmicro hardware_pwm ) -pico_add_extra_outputs(person_detection_int8) +pico_enable_stdio_usb(person_detection 1) +pico_enable_stdio_uart(person_detection 0) +pico_add_extra_outputs(person_detection) -add_executable(image_provider_test_int8 "") -target_include_directories(image_provider_test_int8 +add_executable(image_provider_test "") + +target_include_directories(image_provider_test PRIVATE ${CMAKE_CURRENT_LIST_DIR}/. ) set_target_properties( - image_provider_test_int8 + image_provider_test PROPERTIES COMPILE_FLAGS -fno-rtti COMPILE_FLAGS -fno-exceptions @@ -166,7 +178,7 @@ set_target_properties( COMPILE_FLAGS -nostdlib ) -target_sources(image_provider_test_int8 +target_sources(image_provider_test PRIVATE ${CMAKE_CURRENT_LIST_DIR}/image_provider.cpp ${CMAKE_CURRENT_LIST_DIR}/image_provider_test.cpp @@ -176,10 +188,13 @@ target_sources(image_provider_test_int8 ) target_link_libraries( - image_provider_test_int8 + image_provider_test pico-tflmicro hardware_pwm ) -pico_add_extra_outputs(image_provider_test_int8) +pico_enable_stdio_usb(image_provider_test 1) +pico_enable_stdio_uart(image_provider_test 0) + +pico_add_extra_outputs(image_provider_test) diff --git a/examples/person_detection/detection_responder.h b/examples/person_detection/detection_responder.h index 8887c58..a9b9f1e 100644 --- a/examples/person_detection/detection_responder.h +++ b/examples/person_detection/detection_responder.h @@ -20,7 +20,7 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_DETECTION_RESPONDER_H_ #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h" // Called every time the results of a person detection run are available. The // `person_score` has the numerical confidence that the captured image contains diff --git a/examples/person_detection/image_provider.h b/examples/person_detection/image_provider.h index 15a3c12..587b01e 100644 --- a/examples/person_detection/image_provider.h +++ b/examples/person_detection/image_provider.h @@ -17,7 +17,7 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_IMAGE_PROVIDER_H_ #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h" // This is an abstraction around an image source like a camera, and is // expected to return 8-bit sample data. The assumption is that this will be diff --git a/examples/person_detection/image_provider_test.cpp b/examples/person_detection/image_provider_test.cpp index d8eabee..c1db32d 100644 --- a/examples/person_detection/image_provider_test.cpp +++ b/examples/person_detection/image_provider_test.cpp @@ -19,7 +19,7 @@ limitations under the License. #include "tensorflow/lite/c/common.h" #include "model_settings.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h" #include "tensorflow/lite/micro/testing/micro_test.h" TF_LITE_MICRO_TESTS_BEGIN @@ -31,7 +31,7 @@ TF_LITE_MICRO_TEST(TestImageProvider) { TfLiteStatus get_status = GetImage(µ_error_reporter, kNumCols, kNumRows, kNumChannels, image_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); - TF_LITE_MICRO_EXPECT_NE(image_data, nullptr); + TF_LITE_MICRO_EXPECT(image_data != nullptr); // Make sure we can read all of the returned memory locations. uint32_t total = 0; diff --git a/examples/person_detection/main_functions.cpp b/examples/person_detection/main_functions.cpp index 3d07fa5..e29eb28 100644 --- a/examples/person_detection/main_functions.cpp +++ b/examples/person_detection/main_functions.cpp @@ -19,11 +19,10 @@ limitations under the License. #include "image_provider.h" #include "model_settings.h" #include "person_detect_model_data.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_interpreter.h" #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" #include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" // Globals, used for compatibility with Arduino-style sketches. namespace { @@ -81,7 +80,7 @@ void setup() { // Build an interpreter to run the model with. // NOLINTNEXTLINE(runtime-global-variables) static tflite::MicroInterpreter static_interpreter( - model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter); + model, micro_op_resolver, tensor_arena, kTensorArenaSize); interpreter = &static_interpreter; // Allocate memory from the tensor_arena for the model's tensors. diff --git a/examples/person_detection/no_person_image_data.h b/examples/person_detection/no_person_image_data.h index 4e026af..4265f87 100644 --- a/examples/person_detection/no_person_image_data.h +++ b/examples/person_detection/no_person_image_data.h @@ -24,7 +24,7 @@ limitations under the License. #include -extern const int g_no_person_data_size; -extern const uint8_t g_no_person_data[]; +extern const unsigned int g_no_person_image_data_size; +extern const uint8_t g_no_person_image_data[]; -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_NO_PERSON_IMAGE_DATA_H_ +#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_NO_PERSON_IMAGE_DATA_H_ diff --git a/examples/person_detection/person_detection_test.cpp b/examples/person_detection/person_detection_test.cpp index f67b863..a55e277 100644 --- a/examples/person_detection/person_detection_test.cpp +++ b/examples/person_detection/person_detection_test.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,33 +16,33 @@ limitations under the License. #include "tensorflow/lite/c/common.h" #include "model_settings.h" #include "no_person_image_data.h" -#include "person_detect_model_data.h" #include "person_image_data.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_interpreter.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" +#include "person_detect_model_data.h" #include "tensorflow/lite/micro/testing/micro_test.h" #include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" // Create an area of memory to use for input, output, and intermediate arrays. +#if defined(XTENSA) && defined(VISION_P6) +constexpr int tensor_arena_size = 352 * 1024; +#else constexpr int tensor_arena_size = 136 * 1024; +#endif // defined(XTENSA) && defined(VISION_P6) uint8_t tensor_arena[tensor_arena_size]; TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestInvoke) { - // Set up logging. - tflite::MicroErrorReporter micro_error_reporter; - // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. const tflite::Model* model = ::tflite::GetModel(g_person_detect_model_data); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(µ_error_reporter, - "Model provided is schema version %d not equal " - "to supported version %d.\n", - model->version(), TFLITE_SCHEMA_VERSION); + MicroPrintf( + "Model provided is schema version %d not equal " + "to supported version %d.\n", + model->version(), TFLITE_SCHEMA_VERSION); } // Pull in only the operation implementations we need. @@ -51,23 +51,23 @@ TF_LITE_MICRO_TEST(TestInvoke) { // incur some penalty in code space for op implementations that are not // needed by this graph. tflite::MicroMutableOpResolver<5> micro_op_resolver; - micro_op_resolver.AddAveragePool2D(); - micro_op_resolver.AddConv2D(); - micro_op_resolver.AddDepthwiseConv2D(); + micro_op_resolver.AddAveragePool2D(tflite::Register_AVERAGE_POOL_2D_INT8()); + micro_op_resolver.AddConv2D(tflite::Register_CONV_2D_INT8()); + micro_op_resolver.AddDepthwiseConv2D( + tflite::Register_DEPTHWISE_CONV_2D_INT8()); micro_op_resolver.AddReshape(); - micro_op_resolver.AddSoftmax(); + micro_op_resolver.AddSoftmax(tflite::Register_SOFTMAX_INT8()); // Build an interpreter to run the model with. tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, - tensor_arena_size, - µ_error_reporter); + tensor_arena_size); interpreter.AllocateTensors(); // Get information about the memory area to use for the model's input. TfLiteTensor* input = interpreter.input(0); // Make sure the input has the properties we expect. - TF_LITE_MICRO_EXPECT_NE(nullptr, input); + TF_LITE_MICRO_EXPECT(input != nullptr); TF_LITE_MICRO_EXPECT_EQ(4, input->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(kNumRows, input->dims->data[1]); @@ -76,13 +76,13 @@ TF_LITE_MICRO_TEST(TestInvoke) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type); // Copy an image with a person into the memory area used for the input. - TFLITE_DCHECK_EQ(input->bytes, static_cast(g_person_data_size)); - memcpy(input->data.int8, g_person_data, input->bytes); + TFLITE_DCHECK_EQ(input->bytes, static_cast(g_person_image_data_size)); + memcpy(input->data.int8, g_person_image_data, input->bytes); // Run the model on this input and make sure it succeeds. TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); + MicroPrintf("Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -97,18 +97,16 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Make sure that the expected "Person" score is higher than the other class. int8_t person_score = output->data.int8[kPersonIndex]; int8_t no_person_score = output->data.int8[kNotAPersonIndex]; - TF_LITE_REPORT_ERROR(µ_error_reporter, - "person data. person score: %d, no person score: %d\n", - person_score, no_person_score); + MicroPrintf("person data. person score: %d, no person score: %d\n", + person_score, no_person_score); TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score); - // TODO(b/161461076): Update model to make this work on real negative inputs. - memset(input->data.int8, 0, input->bytes); + memcpy(input->data.int8, g_no_person_image_data, input->bytes); // Run the model on this "No Person" input. invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); + MicroPrintf("Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -123,13 +121,11 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Make sure that the expected "No Person" score is higher. person_score = output->data.int8[kPersonIndex]; no_person_score = output->data.int8[kNotAPersonIndex]; - TF_LITE_REPORT_ERROR( - µ_error_reporter, - "no person data. person score: %d, no person score: %d\n", person_score, - no_person_score); + MicroPrintf("no person data. person score: %d, no person score: %d\n", + person_score, no_person_score); TF_LITE_MICRO_EXPECT_GT(no_person_score, person_score); - TF_LITE_REPORT_ERROR(µ_error_reporter, "Ran successfully\n"); + MicroPrintf("Ran successfully\n"); } TF_LITE_MICRO_TESTS_END diff --git a/examples/person_detection/person_image_data.h b/examples/person_detection/person_image_data.h index 1e677ed..9223d54 100644 --- a/examples/person_detection/person_image_data.h +++ b/examples/person_detection/person_image_data.h @@ -24,7 +24,7 @@ limitations under the License. #include -extern const int g_person_data_size; -extern const uint8_t g_person_data[]; +extern const unsigned int g_person_image_data_size; +extern const uint8_t g_person_image_data[]; -#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_IMAGE_DATA_H_ +#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_IMAGE_DATA_H_ diff --git a/examples/person_detection/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cpp b/examples/person_detection/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cpp index 4b637aa..6ed6461 100644 --- a/examples/person_detection/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cpp +++ b/examples/person_detection/tensorflow/lite/micro/benchmarks/person_detection_benchmark.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,18 +13,21 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/all_ops_resolver.h" -#include "tensorflow/lite/micro/benchmarks/micro_benchmark.h" #include "model_settings.h" #include "no_person_image_data.h" #include "person_detect_model_data.h" #include "person_image_data.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/benchmarks/micro_benchmark.h" +#include "tensorflow/lite/micro/kernels/conv.h" +#include "tensorflow/lite/micro/kernels/fully_connected.h" #include "tensorflow/lite/micro/micro_interpreter.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" +#include "tensorflow/lite/micro/micro_profiler.h" #include "tensorflow/lite/micro/micro_utils.h" +#include "tensorflow/lite/micro/system_setup.h" #include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" /* * Person Detection benchmark. Evaluates runtime performance of the visual @@ -32,9 +35,9 @@ limitations under the License. * exmaples/person_detection. */ -namespace { +namespace tflite { -using PersonDetectionOpResolver = tflite::AllOpsResolver; +using PersonDetectionOpResolver = MicroMutableOpResolver<6>; using PersonDetectionBenchmarkRunner = MicroBenchmarkRunner; // Create an area of memory to use for input, output, and intermediate arrays. @@ -44,48 +47,76 @@ alignas(16) uint8_t tensor_arena[kTensorArenaSize]; uint8_t op_resolver_buffer[sizeof(PersonDetectionOpResolver)]; uint8_t benchmark_runner_buffer[sizeof(PersonDetectionBenchmarkRunner)]; -PersonDetectionBenchmarkRunner* benchmark_runner = nullptr; // Initialize benchmark runner instance explicitly to avoid global init order // issues on Sparkfun. Use new since static variables within a method // are automatically surrounded by locking, which breaks bluepill and stm32f4. -void CreateBenchmarkRunner() { +PersonDetectionBenchmarkRunner *CreateBenchmarkRunner(MicroProfiler *profiler) { // We allocate PersonDetectionOpResolver from a global buffer // because the object's lifetime must exceed that of the // PersonDetectionBenchmarkRunner object. - benchmark_runner = new (benchmark_runner_buffer) - PersonDetectionBenchmarkRunner(g_person_detect_model_data, - new (op_resolver_buffer) - PersonDetectionOpResolver(), - tensor_arena, kTensorArenaSize); -} - -void InitializeBenchmarkRunner() { - CreateBenchmarkRunner(); - benchmark_runner->SetInput(reinterpret_cast(g_person_data)); + PersonDetectionOpResolver *op_resolver = + new (op_resolver_buffer) PersonDetectionOpResolver(); + op_resolver->AddFullyConnected(tflite::Register_FULLY_CONNECTED_INT8()); + op_resolver->AddConv2D(tflite::Register_CONV_2D_INT8REF()); + op_resolver->AddDepthwiseConv2D(); + op_resolver->AddSoftmax(); + op_resolver->AddAveragePool2D(); + op_resolver->AddReshape(); + return new (benchmark_runner_buffer) + PersonDetectionBenchmarkRunner(g_person_detect_model_data, op_resolver, + tensor_arena, kTensorArenaSize, profiler); } -void PersonDetectionTenIerationsWithPerson() { - benchmark_runner->SetInput(reinterpret_cast(g_person_data)); - for (int i = 0; i < 10; i++) { - benchmark_runner->RunSingleIteration(); +void PersonDetectionNIerations(const int8_t *input, int iterations, + const char *tag, + PersonDetectionBenchmarkRunner &benchmark_runner, + MicroProfiler &profiler) { + benchmark_runner.SetInput(input); + uint32_t ticks = 0; + for (int i = 0; i < iterations; ++i) { + profiler.ClearEvents(); + benchmark_runner.RunSingleIteration(); + ticks += profiler.GetTotalTicks(); } + MicroPrintf("%s took %u ticks (%u ms)", tag, ticks, TicksToMs(ticks)); } -void PersonDetectionTenIerationsWithoutPerson() { - benchmark_runner->SetInput(reinterpret_cast(g_no_person_data)); - for (int i = 0; i < 10; i++) { - benchmark_runner->RunSingleIteration(); +} // namespace tflite + +int main(int argc, char **argv) { + tflite::InitializeTarget(); + + while (true) { + tflite::MicroProfiler profiler; + + uint32_t event_handle = profiler.BeginEvent("InitializeBenchmarkRunner"); + tflite::PersonDetectionBenchmarkRunner *benchmark_runner = + CreateBenchmarkRunner(&profiler); + profiler.EndEvent(event_handle); + profiler.Log(); + MicroPrintf(""); // null MicroPrintf serves as a newline. + + tflite::PersonDetectionNIerations( + reinterpret_cast(g_person_image_data), 1, + "WithPersonDataIterations(1)", *benchmark_runner, profiler); + profiler.Log(); + MicroPrintf(""); // null MicroPrintf serves as a newline. + + tflite::PersonDetectionNIerations( + reinterpret_cast(g_no_person_image_data), 1, + "NoPersonDataIterations(1)", *benchmark_runner, profiler); + profiler.Log(); + MicroPrintf(""); // null MicroPrintf serves as a newline. + + tflite::PersonDetectionNIerations( + reinterpret_cast(g_person_image_data), 10, + "WithPersonDataIterations(10)", *benchmark_runner, profiler); + MicroPrintf(""); // null MicroPrintf serves as a newline. + + tflite::PersonDetectionNIerations( + reinterpret_cast(g_no_person_image_data), 10, + "NoPersonDataIterations(10)", *benchmark_runner, profiler); + MicroPrintf(""); // null MicroPrintf serves as a newline. } } - -} // namespace - -TF_LITE_MICRO_BENCHMARKS_BEGIN - -TF_LITE_MICRO_BENCHMARK(InitializeBenchmarkRunner()); -TF_LITE_MICRO_BENCHMARK(benchmark_runner->RunSingleIteration()); -TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIerationsWithPerson()); -TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIerationsWithoutPerson()); - -TF_LITE_MICRO_BENCHMARKS_END diff --git a/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model/no_person_image_data.cpp b/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model/no_person_image_data.cpp new file mode 100644 index 0000000..2effdf5 --- /dev/null +++ b/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model/no_person_image_data.cpp @@ -0,0 +1,775 @@ +#include + +#include "no_person_image_data.h" + +const unsigned int g_no_person_image_data_size = 9216; +alignas(16) const unsigned char g_no_person_image_data[] = { + 0xa, 0xa, 0xb, 0xb, 0x9, 0x8, 0x8, 0x7, 0x6, 0x4, 0x5, 0x5, + 0x4, 0xd, 0xf, 0x1, 0x6, 0x11, 0xc, 0x3, 0x9, 0xf, 0xa, 0x6, + 0x15, 0x10, 0x19, 0x11, 0x3e, 0x14, 0x16, 0x41, 0x29, 0x6, 0xd, 0x38, + 0x60, 0x57, 0x66, 0x3e, 0xf, 0x8, 0x15, 0x27, 0x36, 0x15, 0x25, 0x49, + 0x9c, 0xab, 0x88, 0x76, 0x90, 0xa1, 0x7e, 0x97, 0x63, 0x47, 0x4b, 0x7e, + 0xc8, 0xd8, 0xa7, 0x23, 0x25, 0x5d, 0x3e, 0x1d, 0x66, 0x24, 0x3, 0x3, + 0x4, 0x24, 0x49, 0x10, 0x21, 0x70, 0xbc, 0xe6, 0xe4, 0xe4, 0xe4, 0xe4, + 0xe4, 0xe2, 0xe2, 0xe2, 0xe4, 0xe3, 0xe0, 0xdf, 0xe0, 0xdb, 0xca, 0xb8, + 0x9, 0xa, 0xb, 0xa, 0xa, 0xa, 0x9, 0x8, 0xd, 0x8, 0xa, 0xa, + 0x6, 0xd, 0x15, 0x10, 0x4, 0xd, 0x11, 0xa, 0x8, 0xb, 0xd, 0x9, + 0x12, 0x22, 0x33, 0x1a, 0x38, 0x18, 0x36, 0x6f, 0x3c, 0xa, 0x21, 0x51, + 0x30, 0x2a, 0x3e, 0x27, 0x14, 0x16, 0x2, 0x26, 0x3c, 0x1a, 0x18, 0x43, + 0x84, 0xa3, 0x8b, 0x7a, 0x91, 0xa6, 0x94, 0x8e, 0x5c, 0x3a, 0x4c, 0x7f, + 0xcf, 0xeb, 0xac, 0x28, 0x8e, 0xa4, 0x5a, 0x3d, 0x68, 0xe, 0x13, 0x21, + 0x24, 0xf, 0x28, 0x11, 0x3a, 0xa7, 0xda, 0xe1, 0xe4, 0xe3, 0xe4, 0xe3, + 0xe3, 0xe2, 0xe2, 0xe2, 0xe0, 0xe0, 0xdf, 0xdd, 0xe1, 0xe0, 0xd7, 0xcc, + 0x7, 0x8, 0x9, 0xa, 0xa, 0xb, 0xa, 0x9, 0xc, 0x6, 0x9, 0xd, + 0x6, 0xb, 0x19, 0x20, 0xb, 0x8, 0xe, 0x10, 0xb, 0xe, 0x11, 0x9, + 0x2, 0x1a, 0x2c, 0x12, 0x2d, 0x19, 0x4a, 0x82, 0x3b, 0xb, 0x21, 0x52, + 0x12, 0x25, 0x40, 0x14, 0x27, 0x48, 0x11, 0x3b, 0x5b, 0x33, 0x31, 0x6b, + 0x88, 0xa2, 0x98, 0x7c, 0x86, 0x9e, 0x9d, 0x7a, 0x4b, 0x32, 0x63, 0x7a, + 0xac, 0xd7, 0xc4, 0x8d, 0xc2, 0xcd, 0x7f, 0x79, 0x9a, 0x55, 0x87, 0x95, + 0x70, 0x14, 0x18, 0x1c, 0x42, 0xc6, 0xe7, 0xe0, 0xe3, 0xe2, 0xe3, 0xe2, + 0xe3, 0xe3, 0xe3, 0xe3, 0xdf, 0xe1, 0xdf, 0xde, 0xe0, 0xe4, 0xe2, 0xdd, + 0x6, 0x7, 0x8, 0xa, 0xa, 0xc, 0xc, 0xc, 0x9, 0x5, 0xb, 0x11, + 0x9, 0xc, 0x1e, 0x28, 0x13, 0x4, 0x6, 0x10, 0x11, 0x13, 0x13, 0x8, + 0x2, 0x8, 0x10, 0xa, 0x2f, 0x15, 0x3a, 0x5b, 0x1b, 0x1c, 0x1d, 0x3f, + 0x13, 0x2b, 0x5b, 0xe, 0x3e, 0x75, 0x2f, 0x45, 0x87, 0x62, 0x65, 0x8c, + 0x8c, 0x92, 0xae, 0x91, 0x87, 0xa0, 0x8e, 0x66, 0x6b, 0x2b, 0x57, 0x68, + 0x9d, 0xd7, 0xd6, 0xd2, 0xd4, 0xe5, 0xbc, 0xb9, 0xd1, 0xb5, 0xe0, 0xd6, + 0xb0, 0x32, 0x3d, 0x5c, 0x62, 0xce, 0xe1, 0xe3, 0xe3, 0xe2, 0xe2, 0xe3, + 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe6, 0xe5, 0xe0, 0xdf, 0xe1, 0xe1, 0xdf, + 0x5, 0x6, 0x8, 0x9, 0xa, 0xc, 0xc, 0xc, 0xd, 0xa, 0x10, 0x14, + 0xc, 0xf, 0x1c, 0x22, 0xd, 0x3, 0x7, 0x14, 0x13, 0xa, 0xa, 0xd, + 0x14, 0x5, 0x8, 0x1c, 0x46, 0x17, 0x2c, 0x3a, 0x13, 0x43, 0x27, 0x38, + 0x2b, 0x11, 0x55, 0xf, 0x3c, 0x7f, 0x4b, 0x40, 0xa2, 0x7b, 0x80, 0x7a, + 0x7e, 0x71, 0xbb, 0xb1, 0x99, 0xb0, 0x81, 0x65, 0x7d, 0x2d, 0x60, 0x75, + 0x97, 0xb5, 0xb5, 0xd6, 0xe1, 0xe3, 0xe1, 0xd8, 0xe3, 0xde, 0xe4, 0xdb, + 0xc7, 0x55, 0x7c, 0xb9, 0xab, 0xe1, 0xe1, 0xe7, 0xe4, 0xe3, 0xe3, 0xe4, + 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe9, 0xe9, 0xe2, 0xde, 0xdc, 0xdc, 0xd9, + 0x6, 0x7, 0x8, 0x9, 0xb, 0xc, 0xc, 0xc, 0xd, 0xb, 0xf, 0xf, + 0x8, 0xf, 0x16, 0xf, 0x4, 0xa, 0x11, 0x1e, 0x20, 0x8, 0x1, 0x15, + 0x17, 0x6, 0xb, 0x2d, 0x57, 0x23, 0x42, 0x51, 0x45, 0x5f, 0x20, 0x3e, + 0x5e, 0x1f, 0x47, 0x11, 0x23, 0x83, 0x89, 0x5c, 0x99, 0x6a, 0x7d, 0x6f, + 0x80, 0x6c, 0xaa, 0xb6, 0x8e, 0xab, 0x7d, 0x78, 0x78, 0x31, 0x6a, 0x7d, + 0x82, 0x85, 0x91, 0xc7, 0xe1, 0xd5, 0xe1, 0xdf, 0xe3, 0xe2, 0xdd, 0xeb, + 0xdd, 0x8a, 0xaa, 0xe4, 0xda, 0xe8, 0xe6, 0xe8, 0xe7, 0xe7, 0xe5, 0xe6, + 0xe5, 0xe4, 0xe4, 0xe3, 0xe1, 0xe5, 0xe6, 0xe1, 0xdd, 0xdc, 0xda, 0xd6, + 0x7, 0x7, 0x8, 0x9, 0xb, 0xb, 0xb, 0xb, 0xb, 0xa, 0xa, 0x6, + 0x7, 0x17, 0x1b, 0xa, 0x10, 0x16, 0x13, 0x29, 0x45, 0x23, 0x2, 0x13, + 0x20, 0xe, 0x9, 0x26, 0x52, 0x31, 0x67, 0x75, 0x7a, 0x63, 0x1a, 0x43, + 0x77, 0x54, 0x49, 0x1a, 0x27, 0x8c, 0xb9, 0x83, 0x83, 0x5a, 0x75, 0x76, + 0x74, 0x75, 0x87, 0xad, 0x78, 0x94, 0x81, 0x79, 0x87, 0x34, 0x53, 0x6b, + 0x83, 0x89, 0x8a, 0x9c, 0xd0, 0xe0, 0xe6, 0xe4, 0xdd, 0xda, 0xde, 0xe4, + 0xec, 0xc6, 0xce, 0xe3, 0xe5, 0xdc, 0xea, 0xe4, 0xe9, 0xea, 0xe8, 0xe8, + 0xe6, 0xe4, 0xe4, 0xe3, 0xe1, 0xe3, 0xe2, 0xdc, 0xdb, 0xdd, 0xdb, 0xd7, + 0x7, 0x8, 0x9, 0xa, 0xa, 0xb, 0xa, 0xa, 0xd, 0xd, 0xb, 0x5, + 0xe, 0x28, 0x2d, 0x15, 0x28, 0x21, 0xc, 0x2f, 0x6b, 0x49, 0x8, 0xa, + 0x3b, 0x21, 0xa, 0x17, 0x46, 0x36, 0x79, 0x7d, 0x82, 0x60, 0x2a, 0x48, + 0x64, 0x71, 0x4d, 0x26, 0x49, 0x8a, 0xaf, 0x88, 0x78, 0x68, 0x73, 0x6f, + 0x48, 0x70, 0x6c, 0xb2, 0x7a, 0x89, 0x83, 0x65, 0x74, 0x41, 0x69, 0x7d, + 0x83, 0x7c, 0x81, 0x89, 0x9a, 0xde, 0xe1, 0xde, 0xd7, 0xdc, 0xef, 0xdb, + 0xde, 0xe9, 0xee, 0xea, 0xf1, 0xda, 0xef, 0xe1, 0xec, 0xeb, 0xea, 0xe8, + 0xe7, 0xe4, 0xe4, 0xe3, 0xe5, 0xe4, 0xe0, 0xda, 0xda, 0xdc, 0xdb, 0xd6, + 0x0, 0x8, 0x9, 0xb, 0xe, 0x9, 0x4, 0xb, 0x18, 0x3, 0x12, 0x10, + 0x18, 0x20, 0x42, 0x34, 0x5c, 0x3d, 0xd, 0x2e, 0x83, 0x66, 0x1a, 0x10, + 0x2e, 0x2c, 0x9, 0x24, 0x59, 0x50, 0x86, 0x7e, 0x8b, 0x6b, 0x35, 0x57, + 0x71, 0x6d, 0x52, 0x51, 0x4b, 0x77, 0xb5, 0x51, 0x51, 0x71, 0x78, 0x7c, + 0x51, 0x6c, 0x77, 0xb0, 0x94, 0x98, 0x71, 0x5e, 0x57, 0x3e, 0x6c, 0x88, + 0x8c, 0x67, 0x65, 0x7b, 0x8a, 0xce, 0xe7, 0xdb, 0xe1, 0xe0, 0xd8, 0xe1, + 0xe4, 0xea, 0xea, 0xe7, 0xe8, 0xe8, 0xe7, 0xea, 0xea, 0xe9, 0xe9, 0xe9, + 0xea, 0xe7, 0xe6, 0xe4, 0xe0, 0xdf, 0xdd, 0xdc, 0xdc, 0xda, 0xd7, 0xd4, + 0xa, 0xd, 0x2, 0x1, 0x4, 0x9, 0x8, 0xb, 0xd, 0x12, 0x17, 0x11, + 0x15, 0x31, 0x35, 0x34, 0x68, 0x53, 0x25, 0x4f, 0x75, 0x59, 0x11, 0x19, + 0x38, 0x2b, 0x29, 0x36, 0x6d, 0x75, 0x7d, 0x73, 0x73, 0x6f, 0x48, 0x4b, + 0x62, 0x7c, 0x7e, 0x6d, 0x56, 0x66, 0xa1, 0x54, 0x53, 0x69, 0x75, 0x81, + 0x73, 0x81, 0x82, 0xa9, 0x8b, 0x8b, 0x69, 0x5a, 0x41, 0x3a, 0x74, 0x7d, + 0x85, 0x72, 0x72, 0x65, 0x7a, 0xb9, 0xd9, 0xda, 0xe5, 0xe4, 0xde, 0xe8, + 0xe0, 0xed, 0xe6, 0xe2, 0xf0, 0xea, 0xdd, 0xe5, 0xe7, 0xe9, 0xe7, 0xe3, + 0xe1, 0xdf, 0xde, 0xdb, 0xe1, 0xdf, 0xdd, 0xdc, 0xdc, 0xdc, 0xdb, 0xda, + 0x7, 0xb, 0xf, 0x14, 0x14, 0x7, 0x6, 0x18, 0x25, 0x27, 0x27, 0x2e, + 0x2d, 0x4a, 0x31, 0x47, 0x6b, 0x83, 0x5f, 0x65, 0x5f, 0x76, 0x44, 0x3b, + 0x5c, 0x37, 0x4d, 0x52, 0x7e, 0x87, 0x60, 0x68, 0x67, 0x69, 0x59, 0x45, + 0x4c, 0x61, 0x79, 0x5e, 0x74, 0x66, 0x9b, 0x6e, 0x62, 0x69, 0x76, 0x86, + 0x7c, 0x80, 0x83, 0x9a, 0x7c, 0x75, 0x5a, 0x51, 0x3b, 0x38, 0x7d, 0x7b, + 0x85, 0x75, 0x7a, 0x5b, 0x69, 0x9d, 0xc1, 0xd0, 0xe0, 0xdd, 0xd7, 0xe3, + 0xdd, 0xe9, 0xe0, 0xdf, 0xef, 0xe9, 0xda, 0xe3, 0xe2, 0xe8, 0xe7, 0xe0, + 0xdf, 0xe3, 0xe4, 0xe1, 0xe0, 0xdc, 0xdb, 0xda, 0xdb, 0xdd, 0xde, 0xde, + 0xf, 0x7, 0xe, 0x28, 0x2a, 0xb, 0x7, 0x27, 0x59, 0x32, 0x3f, 0x5c, + 0x5f, 0x65, 0x4d, 0x69, 0x73, 0x95, 0x80, 0x72, 0x65, 0x94, 0x85, 0x73, + 0x83, 0x57, 0x6a, 0x72, 0x85, 0x7a, 0x50, 0x6b, 0x69, 0x5d, 0x66, 0x66, + 0x6c, 0x64, 0x87, 0x7a, 0x78, 0x68, 0x9f, 0x85, 0x6f, 0x70, 0x7c, 0x84, + 0x68, 0x6e, 0x85, 0x90, 0x78, 0x6d, 0x57, 0x53, 0x45, 0x38, 0x7e, 0x7c, + 0x80, 0x66, 0x75, 0x62, 0x76, 0x9a, 0xba, 0xd1, 0xe3, 0xe2, 0xdd, 0xe5, + 0xe2, 0xdb, 0xdd, 0xdf, 0xdb, 0xdc, 0xe1, 0xe0, 0xda, 0xe0, 0xe0, 0xd9, + 0xd9, 0xdd, 0xde, 0xdb, 0xdd, 0xdb, 0xda, 0xdb, 0xdb, 0xdc, 0xdc, 0xda, + 0x1b, 0x12, 0xc, 0x16, 0x1e, 0x15, 0x15, 0x28, 0x6f, 0x36, 0x53, 0x70, + 0x7f, 0x7e, 0x7d, 0x77, 0x83, 0x7c, 0x77, 0x7e, 0x80, 0x86, 0x87, 0x87, + 0x77, 0x7a, 0x7f, 0x84, 0x81, 0x76, 0x66, 0x74, 0x73, 0x63, 0x6e, 0x7a, + 0x7f, 0x6e, 0x8c, 0x84, 0x64, 0x6a, 0xa2, 0x8e, 0x74, 0x79, 0x80, 0x7a, + 0x68, 0x71, 0x99, 0x90, 0x81, 0x76, 0x69, 0x67, 0x44, 0x42, 0x7d, 0x71, + 0x6d, 0x5e, 0x72, 0x6a, 0x7c, 0x8e, 0xa6, 0xbf, 0xd7, 0xe0, 0xdf, 0xe5, + 0xeb, 0xc8, 0xd5, 0xdb, 0xb7, 0xc1, 0xde, 0xce, 0xda, 0xdc, 0xdd, 0xdd, + 0xe0, 0xe0, 0xde, 0xda, 0xde, 0xdc, 0xdc, 0xdd, 0xdf, 0xde, 0xd9, 0xd6, + 0xb, 0x30, 0x38, 0x1f, 0xf, 0x14, 0x1e, 0x29, 0x53, 0x3f, 0x55, 0x61, + 0x7f, 0x89, 0x93, 0x65, 0x78, 0x74, 0x79, 0x7d, 0x88, 0x7c, 0x6e, 0x5f, + 0x44, 0x80, 0x82, 0x81, 0x77, 0x81, 0x82, 0x70, 0x6c, 0x6d, 0x68, 0x66, + 0x69, 0x6a, 0x7d, 0x6d, 0x6b, 0x86, 0xb1, 0x98, 0x7b, 0x81, 0x7e, 0x6c, + 0x7e, 0x82, 0xae, 0x89, 0x81, 0x7d, 0x77, 0x6e, 0x3e, 0x5a, 0x84, 0x6b, + 0x65, 0x72, 0x7a, 0x71, 0x7a, 0x7c, 0x89, 0xa5, 0xc4, 0xdb, 0xe1, 0xde, + 0xe3, 0xb5, 0xc0, 0xc3, 0x97, 0xa0, 0xbf, 0xac, 0xb8, 0xb6, 0xbe, 0xd2, + 0xe2, 0xe3, 0xe1, 0xe3, 0xe0, 0xde, 0xde, 0xde, 0xe0, 0xdf, 0xdc, 0xd8, + 0x1a, 0x48, 0x58, 0x38, 0x1b, 0x12, 0x1b, 0x29, 0x2e, 0x55, 0x47, 0x54, + 0x7c, 0x7f, 0x7a, 0x50, 0x5a, 0x77, 0x79, 0x7c, 0x7e, 0x84, 0x62, 0x49, + 0x36, 0x75, 0x7c, 0x80, 0x76, 0x87, 0x80, 0x6c, 0x5e, 0x74, 0x63, 0x64, + 0x6d, 0x82, 0x8b, 0x81, 0x7a, 0x9c, 0xb0, 0x95, 0x7f, 0x84, 0x76, 0x65, + 0x8a, 0x86, 0xb1, 0x77, 0x7c, 0x7e, 0x75, 0x5c, 0x42, 0x6d, 0x84, 0x7a, + 0x77, 0x88, 0x73, 0x75, 0x87, 0x7e, 0x82, 0x98, 0xbb, 0xdd, 0xe4, 0xd3, + 0xc3, 0xa2, 0x9b, 0x9b, 0x88, 0x86, 0x90, 0x87, 0x86, 0x7e, 0x8f, 0xb9, + 0xd7, 0xd8, 0xd7, 0xde, 0xe0, 0xdd, 0xda, 0xd8, 0xd9, 0xdc, 0xdd, 0xdd, + 0x57, 0x4e, 0x3e, 0x35, 0x2c, 0x1b, 0x16, 0x25, 0x1e, 0x68, 0x38, 0x57, + 0x81, 0x70, 0x52, 0x48, 0x4a, 0x64, 0x60, 0x87, 0x7f, 0x82, 0x57, 0x60, + 0x52, 0x70, 0x79, 0x8b, 0x81, 0x86, 0x6f, 0x70, 0x70, 0x8a, 0x6f, 0x77, + 0x7b, 0x85, 0x7d, 0x7e, 0x6e, 0x8f, 0x97, 0x80, 0x76, 0x7f, 0x70, 0x64, + 0x86, 0x80, 0xac, 0x6b, 0x7a, 0x80, 0x70, 0x4a, 0x4a, 0x6d, 0x7a, 0x8b, + 0x8c, 0x8a, 0x5e, 0x72, 0x84, 0x74, 0x71, 0x80, 0xa4, 0xc8, 0xcc, 0xb2, + 0xa3, 0x93, 0x7d, 0x7c, 0x87, 0x7d, 0x6d, 0x73, 0x8b, 0x7e, 0x90, 0xc3, + 0xe4, 0xe2, 0xde, 0xe5, 0xdc, 0xd7, 0xd1, 0xcf, 0xd2, 0xd7, 0xdc, 0xdf, + 0x71, 0x57, 0x61, 0x45, 0x41, 0x3e, 0x42, 0x2c, 0x33, 0x61, 0x4f, 0x57, + 0x4d, 0x50, 0x56, 0x5e, 0x65, 0x5a, 0x67, 0x86, 0x8b, 0x70, 0x5a, 0x59, + 0x55, 0x78, 0xa4, 0x87, 0x6d, 0x72, 0x5e, 0x5c, 0x57, 0x79, 0x76, 0x61, + 0x6f, 0x82, 0x7d, 0x77, 0x70, 0x86, 0x8d, 0x72, 0x7c, 0x80, 0x64, 0x75, + 0x7a, 0x71, 0x95, 0x7a, 0x76, 0x8a, 0x83, 0x3e, 0x4b, 0x6e, 0x80, 0xc0, + 0xc9, 0x98, 0x61, 0x63, 0x66, 0x74, 0x79, 0x74, 0x7d, 0x8f, 0x94, 0x8b, + 0x78, 0x7e, 0x7e, 0x76, 0x6c, 0x6b, 0x72, 0x78, 0x85, 0x79, 0x9d, 0xbe, + 0xbd, 0xd5, 0xec, 0xd7, 0xd7, 0xca, 0xbd, 0xb4, 0xbb, 0xd8, 0xe5, 0xd9, + 0x61, 0x6c, 0x91, 0x62, 0x4d, 0x54, 0x51, 0x3a, 0x5e, 0x7b, 0x76, 0x8b, + 0x76, 0x6a, 0x77, 0x8c, 0x82, 0x6b, 0x5d, 0x66, 0x6c, 0x66, 0x64, 0x6a, + 0x55, 0x5d, 0x6c, 0x60, 0x57, 0x63, 0x56, 0x42, 0x4c, 0x62, 0x63, 0x5b, + 0x6a, 0x75, 0x73, 0x78, 0x74, 0x85, 0x91, 0x82, 0x87, 0x80, 0x61, 0x70, + 0x6b, 0x65, 0x7f, 0x6d, 0x7f, 0x9c, 0x8e, 0x50, 0x51, 0x6f, 0x90, 0xa9, + 0xb6, 0x91, 0x8a, 0x7b, 0x75, 0x75, 0x6e, 0x65, 0x69, 0x75, 0x79, 0x73, + 0x6c, 0x6b, 0x69, 0x67, 0x65, 0x67, 0x6e, 0x76, 0x7f, 0x69, 0x7b, 0x90, + 0x8f, 0xb3, 0xdb, 0xd6, 0xca, 0xb3, 0x9a, 0x8c, 0x99, 0xc2, 0xe1, 0xe0, + 0x75, 0x5c, 0x85, 0x71, 0x51, 0x3b, 0x3c, 0x68, 0x77, 0x74, 0x66, 0x7a, + 0x76, 0x80, 0x8f, 0x90, 0x7f, 0x6b, 0x57, 0x55, 0x61, 0x70, 0x7a, 0x80, + 0x6e, 0x64, 0x5d, 0x61, 0x59, 0x5c, 0x5e, 0x3e, 0x3f, 0x54, 0x63, 0x6c, + 0x74, 0x6c, 0x63, 0x68, 0x75, 0x86, 0x99, 0x94, 0x8d, 0x7d, 0x62, 0x69, + 0x5f, 0x60, 0x74, 0x73, 0x9e, 0xba, 0x93, 0x56, 0x4e, 0x79, 0xa5, 0x93, + 0xbc, 0xb9, 0xc8, 0x8a, 0x75, 0x6f, 0x68, 0x65, 0x67, 0x6b, 0x6d, 0x6e, + 0x67, 0x5d, 0x58, 0x5d, 0x62, 0x63, 0x68, 0x6f, 0x68, 0x58, 0x69, 0x7a, + 0x79, 0x95, 0xb8, 0xb3, 0xa1, 0x8e, 0x80, 0x7d, 0x8b, 0xaf, 0xc9, 0xc9, + 0x5b, 0x2a, 0x5d, 0x7d, 0x6c, 0x39, 0x1c, 0x5c, 0x7c, 0x6f, 0x5f, 0x59, + 0x5c, 0x85, 0x9b, 0x87, 0x7a, 0x71, 0x5f, 0x51, 0x59, 0x6f, 0x7c, 0x7c, + 0x6b, 0x67, 0x61, 0x6c, 0x54, 0x42, 0x55, 0x3d, 0x2f, 0x46, 0x5e, 0x6c, + 0x6c, 0x61, 0x57, 0x57, 0x6e, 0x87, 0xa0, 0x97, 0x80, 0x74, 0x6c, 0x6b, + 0x6e, 0x71, 0x75, 0x79, 0xb2, 0xcb, 0x93, 0x5d, 0x52, 0x71, 0x86, 0x88, + 0xd3, 0xc6, 0xaf, 0x71, 0x69, 0x69, 0x6d, 0x75, 0x75, 0x6d, 0x6d, 0x72, + 0x6e, 0x5e, 0x57, 0x5f, 0x67, 0x64, 0x62, 0x65, 0x5a, 0x54, 0x68, 0x78, + 0x72, 0x7e, 0x8e, 0x85, 0x78, 0x71, 0x73, 0x7b, 0x82, 0x97, 0xa8, 0xa8, + 0x44, 0x30, 0x44, 0x4b, 0x56, 0x5c, 0x32, 0x3b, 0x55, 0x57, 0x66, 0x5b, + 0x52, 0x69, 0x80, 0x7a, 0x88, 0x86, 0x71, 0x54, 0x4e, 0x5f, 0x67, 0x63, + 0x6e, 0x6f, 0x66, 0x6d, 0x51, 0x38, 0x49, 0x40, 0x39, 0x54, 0x6e, 0x74, + 0x6e, 0x6e, 0x70, 0x6b, 0x5f, 0x80, 0x9c, 0x8d, 0x66, 0x65, 0x7a, 0x75, + 0x81, 0x81, 0x75, 0x7b, 0xb1, 0xcd, 0x92, 0x68, 0x81, 0x84, 0x67, 0x93, + 0xd9, 0xad, 0x6c, 0x65, 0x65, 0x68, 0x72, 0x7a, 0x73, 0x63, 0x63, 0x6e, + 0x6e, 0x61, 0x5c, 0x64, 0x6a, 0x65, 0x5d, 0x5a, 0x60, 0x5d, 0x69, 0x6e, + 0x64, 0x6a, 0x75, 0x73, 0x6c, 0x65, 0x6a, 0x6d, 0x6b, 0x7a, 0x93, 0xa0, + 0x58, 0x56, 0x35, 0x1b, 0x2f, 0x61, 0x4a, 0x2e, 0x25, 0x22, 0x4e, 0x65, + 0x6c, 0x5c, 0x58, 0x63, 0x76, 0x82, 0x7f, 0x6e, 0x69, 0x6f, 0x66, 0x55, + 0x8b, 0x83, 0x6d, 0x67, 0x5c, 0x47, 0x41, 0x3f, 0x42, 0x60, 0x87, 0x8f, + 0x80, 0x7a, 0x78, 0x6d, 0x5c, 0x78, 0x93, 0x86, 0x52, 0x54, 0x7f, 0x79, + 0x76, 0x80, 0x7e, 0x8f, 0xb9, 0xcd, 0x8a, 0x62, 0x8a, 0xa6, 0x6c, 0x7c, + 0xa3, 0xa5, 0x6d, 0x6a, 0x64, 0x66, 0x6c, 0x6e, 0x67, 0x5f, 0x66, 0x74, + 0x63, 0x60, 0x5e, 0x62, 0x67, 0x66, 0x5d, 0x56, 0x61, 0x64, 0x6e, 0x71, + 0x67, 0x66, 0x6d, 0x6d, 0x69, 0x62, 0x6a, 0x70, 0x6a, 0x71, 0x88, 0x99, + 0x3e, 0x5b, 0x34, 0x28, 0x38, 0x5d, 0x4f, 0x2c, 0x29, 0x20, 0x50, 0x6f, + 0x8c, 0x6b, 0x51, 0x5d, 0x64, 0x74, 0x81, 0x81, 0x86, 0x83, 0x6a, 0x4a, + 0x72, 0x6c, 0x59, 0x4d, 0x5b, 0x52, 0x31, 0x35, 0x35, 0x48, 0x73, 0x87, + 0x73, 0x64, 0x67, 0x64, 0x76, 0x7c, 0x91, 0x92, 0x54, 0x46, 0x77, 0x74, + 0x78, 0x87, 0x92, 0xaa, 0xbc, 0xc9, 0x87, 0x60, 0x5e, 0x89, 0x5c, 0x50, + 0x5e, 0x9a, 0x88, 0x60, 0x64, 0x64, 0x64, 0x63, 0x62, 0x66, 0x6e, 0x76, + 0x5a, 0x62, 0x64, 0x61, 0x63, 0x68, 0x65, 0x5b, 0x57, 0x62, 0x72, 0x76, + 0x6e, 0x67, 0x67, 0x68, 0x67, 0x60, 0x6a, 0x76, 0x70, 0x6e, 0x79, 0x83, + 0x1a, 0x5c, 0x2f, 0x1f, 0x2c, 0x62, 0x73, 0x53, 0x3b, 0x44, 0x71, 0x6e, + 0x7f, 0x5f, 0x49, 0x5c, 0x7a, 0x7f, 0x7d, 0x78, 0x7b, 0x78, 0x5a, 0x38, + 0x3b, 0x43, 0x45, 0x3d, 0x5e, 0x60, 0x33, 0x42, 0x3b, 0x39, 0x57, 0x70, + 0x5f, 0x5b, 0x76, 0x8d, 0x96, 0x86, 0x95, 0xa3, 0x60, 0x40, 0x6d, 0x6b, + 0x98, 0x9d, 0x9f, 0xac, 0xad, 0xbf, 0x8c, 0x72, 0x58, 0x57, 0x43, 0x54, + 0x59, 0x8e, 0x94, 0x64, 0x68, 0x66, 0x61, 0x5e, 0x61, 0x67, 0x69, 0x67, + 0x5b, 0x69, 0x6d, 0x64, 0x62, 0x6b, 0x6c, 0x62, 0x56, 0x5f, 0x68, 0x67, + 0x5e, 0x5c, 0x60, 0x68, 0x6d, 0x5d, 0x5e, 0x66, 0x62, 0x61, 0x6b, 0x72, + 0x21, 0x25, 0x1e, 0x25, 0x30, 0x63, 0x98, 0x79, 0x6a, 0x71, 0x6c, 0x80, + 0x6e, 0x71, 0x60, 0x66, 0x8c, 0x80, 0x82, 0x75, 0x79, 0x73, 0x47, 0x44, + 0x35, 0x35, 0x34, 0x35, 0x41, 0x4e, 0x4e, 0x45, 0x3d, 0x35, 0x47, 0x64, + 0x62, 0x5d, 0x6f, 0x87, 0x7b, 0x8d, 0xa0, 0x9a, 0x84, 0x7e, 0x82, 0x7e, + 0xab, 0xa8, 0xa8, 0x97, 0x99, 0xb0, 0xa0, 0x98, 0x56, 0x3d, 0x35, 0x5e, + 0x5e, 0x94, 0xb0, 0x6f, 0x5c, 0x6c, 0x69, 0x5d, 0x66, 0x6e, 0x69, 0x62, + 0x63, 0x68, 0x6a, 0x67, 0x62, 0x63, 0x68, 0x6e, 0x6c, 0x6c, 0x66, 0x68, + 0x6e, 0x65, 0x5c, 0x61, 0x60, 0x62, 0x6f, 0x61, 0x69, 0x5f, 0x71, 0x75, + 0x2a, 0x2b, 0x21, 0x24, 0x21, 0x3f, 0x78, 0x7e, 0x7f, 0x71, 0x71, 0x5f, + 0x6b, 0x5c, 0x5e, 0x55, 0x62, 0x49, 0x5a, 0x71, 0x73, 0x64, 0x4e, 0x48, + 0x39, 0x31, 0x31, 0x3b, 0x4a, 0x57, 0x6b, 0x7d, 0x5c, 0x43, 0x46, 0x60, + 0x62, 0x58, 0x66, 0x7c, 0x7d, 0x7c, 0x86, 0x99, 0xa0, 0x95, 0x8f, 0x93, + 0xab, 0xa2, 0x97, 0x8c, 0x8f, 0x9c, 0x9b, 0x98, 0x49, 0x44, 0x44, 0x6c, + 0x77, 0xae, 0xb6, 0x6e, 0x65, 0x67, 0x62, 0x5f, 0x66, 0x68, 0x67, 0x69, + 0x6b, 0x64, 0x5f, 0x61, 0x67, 0x6c, 0x66, 0x60, 0x6e, 0x6b, 0x62, 0x63, + 0x6b, 0x65, 0x5b, 0x5e, 0x6d, 0x6a, 0x6b, 0x5f, 0x5f, 0x5b, 0x64, 0x68, + 0x34, 0x2e, 0x21, 0x27, 0x29, 0x36, 0x6b, 0x93, 0x6c, 0x5b, 0x59, 0x4f, + 0x64, 0x56, 0x54, 0x44, 0x55, 0x4c, 0x4b, 0x6e, 0x73, 0x45, 0x36, 0x49, + 0x38, 0x38, 0x53, 0x80, 0x90, 0x7f, 0x71, 0x74, 0x65, 0x4b, 0x44, 0x4f, + 0x4b, 0x41, 0x53, 0x74, 0x77, 0x77, 0x7f, 0x99, 0xa4, 0x88, 0x7e, 0x9e, + 0xa2, 0x90, 0x7f, 0x80, 0x88, 0x7f, 0x82, 0x7c, 0x47, 0x50, 0x48, 0x60, + 0x73, 0xb0, 0xb5, 0x7e, 0x5f, 0x5c, 0x5f, 0x65, 0x66, 0x5f, 0x60, 0x68, + 0x69, 0x62, 0x5c, 0x5f, 0x66, 0x67, 0x62, 0x5c, 0x54, 0x68, 0x76, 0x7c, + 0x7a, 0x67, 0x5d, 0x65, 0x64, 0x6b, 0x69, 0x66, 0x55, 0x57, 0x5a, 0x65, + 0x25, 0x21, 0x1f, 0x2e, 0x37, 0x2e, 0x37, 0x4e, 0x45, 0x41, 0x3d, 0x61, + 0x5f, 0x5d, 0x40, 0x3d, 0x48, 0x5e, 0x40, 0x5d, 0x81, 0x55, 0x51, 0x80, + 0x7b, 0x61, 0x58, 0x6b, 0x7c, 0x7b, 0x79, 0x7f, 0x61, 0x5c, 0x52, 0x47, + 0x3a, 0x38, 0x58, 0x86, 0x89, 0x88, 0x82, 0x93, 0xa1, 0x81, 0x6f, 0x8f, + 0x92, 0x7e, 0x70, 0x7e, 0x93, 0x7d, 0x7f, 0x73, 0x64, 0x71, 0x63, 0x65, + 0x6e, 0x9f, 0xa9, 0x99, 0x57, 0x5f, 0x6f, 0x74, 0x69, 0x5f, 0x61, 0x63, + 0x5f, 0x62, 0x63, 0x60, 0x59, 0x55, 0x5d, 0x67, 0x64, 0x6f, 0x75, 0x74, + 0x70, 0x64, 0x61, 0x6d, 0x54, 0x66, 0x69, 0x70, 0x56, 0x5c, 0x5d, 0x6e, + 0x14, 0x21, 0x3a, 0x52, 0x5e, 0x4c, 0x32, 0x2f, 0x35, 0x33, 0x38, 0x62, + 0x5a, 0x51, 0x3b, 0x47, 0x42, 0x42, 0x22, 0x3a, 0x62, 0x64, 0x76, 0x7e, + 0x46, 0x4f, 0x60, 0x76, 0x84, 0x7f, 0x6a, 0x57, 0x59, 0x63, 0x51, 0x32, + 0x2d, 0x3b, 0x5e, 0x85, 0x88, 0x7b, 0x61, 0x6e, 0x95, 0x8c, 0x6e, 0x72, + 0x7f, 0x63, 0x5a, 0x60, 0x82, 0x79, 0x84, 0x7d, 0x6e, 0x7b, 0x73, 0x70, + 0x66, 0x81, 0x8e, 0xa4, 0x64, 0x76, 0x8b, 0x81, 0x66, 0x63, 0x6b, 0x65, + 0x5d, 0x5e, 0x60, 0x5d, 0x53, 0x4f, 0x5b, 0x6c, 0x67, 0x6a, 0x69, 0x6f, + 0x75, 0x68, 0x55, 0x52, 0x6a, 0x6f, 0x64, 0x6c, 0x5c, 0x69, 0x65, 0x6f, + 0x21, 0x21, 0x36, 0x3c, 0x3d, 0x3b, 0x2c, 0x2a, 0x32, 0x2d, 0x41, 0x44, + 0x52, 0x3e, 0x4e, 0x59, 0x6a, 0x40, 0x35, 0x49, 0x54, 0x82, 0x94, 0x59, + 0x1e, 0x3f, 0x5f, 0x6c, 0x77, 0x83, 0x80, 0x72, 0x67, 0x68, 0x46, 0x26, + 0x33, 0x49, 0x5f, 0x79, 0x6d, 0x67, 0x59, 0x66, 0x8b, 0x91, 0x80, 0x7d, + 0x8a, 0x64, 0x60, 0x42, 0x62, 0x6d, 0x80, 0x87, 0x68, 0x6a, 0x68, 0x66, + 0x57, 0x78, 0x93, 0xc3, 0x7b, 0x86, 0x92, 0x7c, 0x57, 0x5c, 0x6d, 0x63, + 0x64, 0x56, 0x4e, 0x53, 0x58, 0x5b, 0x5e, 0x64, 0x53, 0x62, 0x6b, 0x71, + 0x72, 0x67, 0x61, 0x6c, 0x8f, 0x7a, 0x5f, 0x5b, 0x5f, 0x6d, 0x67, 0x63, + 0x79, 0x4a, 0x42, 0x35, 0x2b, 0x34, 0x2f, 0x2e, 0x2f, 0x40, 0x55, 0x3f, + 0x49, 0x48, 0x5c, 0x57, 0x3f, 0x33, 0x3e, 0x40, 0x50, 0x77, 0x68, 0x35, + 0x2e, 0x49, 0x61, 0x6a, 0x77, 0x89, 0x89, 0x7b, 0x81, 0x76, 0x4d, 0x37, + 0x4a, 0x56, 0x63, 0x83, 0x71, 0x70, 0x75, 0x81, 0x8c, 0x8a, 0x88, 0x8a, + 0x8a, 0x6e, 0x83, 0x40, 0x52, 0x69, 0x74, 0x81, 0x6c, 0x64, 0x66, 0x64, + 0x58, 0x8d, 0xac, 0xd2, 0x85, 0x7d, 0x82, 0x73, 0x52, 0x56, 0x68, 0x5e, + 0x63, 0x4e, 0x43, 0x4f, 0x61, 0x65, 0x60, 0x5d, 0x61, 0x65, 0x62, 0x62, + 0x6e, 0x76, 0x86, 0x9e, 0x83, 0x6f, 0x5e, 0x50, 0x5b, 0x5f, 0x67, 0x64, + 0x7f, 0x34, 0x28, 0x30, 0x36, 0x49, 0x3c, 0x31, 0x36, 0x68, 0x6f, 0x5e, + 0x46, 0x64, 0x58, 0x44, 0x31, 0x6c, 0x76, 0x58, 0x7b, 0x79, 0x38, 0x3e, + 0x5b, 0x5c, 0x5b, 0x61, 0x73, 0x81, 0x76, 0x60, 0x7a, 0x6b, 0x46, 0x39, + 0x46, 0x42, 0x50, 0x81, 0x80, 0x6a, 0x69, 0x7a, 0x7d, 0x72, 0x65, 0x5b, + 0x55, 0x4c, 0x81, 0x33, 0x3e, 0x56, 0x52, 0x60, 0x5c, 0x56, 0x64, 0x61, + 0x57, 0x8c, 0x95, 0x9e, 0x86, 0x6f, 0x74, 0x75, 0x5b, 0x5a, 0x67, 0x5e, + 0x5d, 0x4b, 0x44, 0x53, 0x63, 0x64, 0x60, 0x5e, 0x61, 0x58, 0x57, 0x78, + 0xa6, 0xaa, 0x86, 0x6b, 0x55, 0x56, 0x61, 0x50, 0x55, 0x4d, 0x66, 0x70, + 0x5b, 0x26, 0x1b, 0x2f, 0x2e, 0x33, 0x3e, 0x34, 0x47, 0x5a, 0x5d, 0x54, + 0x55, 0x6f, 0x3e, 0x40, 0x43, 0x41, 0x6d, 0x6e, 0x7f, 0x71, 0x5d, 0x59, + 0x70, 0x64, 0x60, 0x79, 0x6e, 0x74, 0x69, 0x66, 0x6b, 0x4e, 0x48, 0x4a, + 0x45, 0x51, 0x63, 0x61, 0x6d, 0x84, 0x6b, 0x68, 0x8c, 0x74, 0x44, 0x4d, + 0x5f, 0x73, 0x67, 0x3c, 0x63, 0x45, 0x48, 0x47, 0x5f, 0x63, 0x69, 0x5c, + 0x58, 0x84, 0x7b, 0x9f, 0x95, 0x5e, 0x79, 0x71, 0x51, 0x75, 0x4b, 0x55, + 0x5a, 0x54, 0x4f, 0x50, 0x58, 0x66, 0x74, 0x7c, 0x78, 0x88, 0x9d, 0xa4, + 0x96, 0x7c, 0x65, 0x59, 0x5d, 0x5e, 0x52, 0x4a, 0x53, 0x5b, 0x5c, 0x61, + 0x77, 0x5d, 0x56, 0x4e, 0x36, 0x2c, 0x2f, 0x2c, 0x38, 0x68, 0x59, 0x48, + 0x40, 0x51, 0x3d, 0x2e, 0x2c, 0x45, 0x7a, 0x7c, 0x79, 0x74, 0x73, 0x84, + 0x74, 0x6e, 0x70, 0x7d, 0x63, 0x69, 0x58, 0x43, 0x49, 0x59, 0x5a, 0x3e, + 0x32, 0x55, 0x66, 0x4c, 0x5e, 0x5b, 0x5d, 0x71, 0x83, 0x76, 0x68, 0x6d, + 0x8b, 0x92, 0x7b, 0x46, 0x63, 0x47, 0x51, 0x5e, 0x72, 0x83, 0x94, 0x7e, + 0x5c, 0x6b, 0x8a, 0xbb, 0xa7, 0x67, 0x96, 0x80, 0x3f, 0x4d, 0x5c, 0x60, + 0x76, 0x74, 0x74, 0x7b, 0x86, 0x94, 0x9b, 0x9f, 0x95, 0x8b, 0x7e, 0x71, + 0x64, 0x59, 0x5a, 0x61, 0x55, 0x57, 0x4a, 0x43, 0x4d, 0x58, 0x5f, 0x67, + 0x7b, 0x85, 0x84, 0x6e, 0x53, 0x41, 0x35, 0x2a, 0x48, 0x75, 0x60, 0x6f, + 0x7f, 0x7d, 0x66, 0x3e, 0x39, 0x4c, 0x71, 0x7a, 0x5b, 0x4a, 0x4b, 0x73, + 0x92, 0x87, 0x7e, 0x72, 0x47, 0x55, 0x55, 0x3e, 0x33, 0x42, 0x4a, 0x3c, + 0x31, 0x49, 0x63, 0x63, 0x8d, 0x62, 0x60, 0x6d, 0x5e, 0x5d, 0x79, 0x89, + 0x97, 0x92, 0x78, 0x41, 0x54, 0x44, 0x5a, 0x78, 0x83, 0x8a, 0x94, 0x8b, + 0x72, 0x6e, 0x9b, 0xa5, 0xc8, 0x81, 0x83, 0x68, 0x5f, 0x6e, 0x9f, 0xa9, + 0x9d, 0x92, 0x87, 0x7e, 0x79, 0x6f, 0x61, 0x57, 0x57, 0x54, 0x5c, 0x6a, + 0x6b, 0x60, 0x5c, 0x61, 0x55, 0x56, 0x49, 0x42, 0x4b, 0x53, 0x54, 0x59, + 0x85, 0x95, 0x8c, 0x74, 0x72, 0x77, 0x6d, 0x5e, 0x78, 0x82, 0x72, 0x8e, + 0x9f, 0x7b, 0x4e, 0x34, 0x36, 0x30, 0x39, 0x5a, 0x3e, 0x2b, 0x2f, 0x66, + 0x80, 0x83, 0x8e, 0x8e, 0x52, 0x4b, 0x3f, 0x28, 0x35, 0x36, 0x3d, 0x3d, + 0x39, 0x48, 0x6a, 0x84, 0x70, 0x5a, 0x67, 0x78, 0x65, 0x61, 0x79, 0x87, + 0x8d, 0x84, 0x71, 0x41, 0x4a, 0x41, 0x54, 0x79, 0x81, 0x84, 0x85, 0x83, + 0x7f, 0x73, 0x9e, 0x71, 0x99, 0x8d, 0x70, 0x77, 0xab, 0x8e, 0x6c, 0x66, + 0x5d, 0x55, 0x51, 0x52, 0x58, 0x58, 0x50, 0x47, 0x51, 0x4b, 0x50, 0x5f, + 0x60, 0x58, 0x5b, 0x67, 0x60, 0x5c, 0x4c, 0x45, 0x50, 0x52, 0x45, 0x3e, + 0x73, 0x7a, 0x64, 0x49, 0x53, 0x70, 0x7d, 0x7b, 0x81, 0x7b, 0x81, 0x78, + 0x78, 0x57, 0x2e, 0x3a, 0x33, 0x2e, 0x24, 0x3e, 0x2a, 0x29, 0x3b, 0x67, + 0x87, 0x7d, 0x78, 0x71, 0x33, 0x2b, 0x34, 0x3d, 0x3b, 0x47, 0x45, 0x39, + 0x3f, 0x57, 0x72, 0x82, 0x58, 0x65, 0x69, 0x67, 0x62, 0x5b, 0x64, 0x7b, + 0x8e, 0x87, 0x81, 0x55, 0x50, 0x44, 0x40, 0x58, 0x70, 0x89, 0x85, 0x74, + 0x63, 0x61, 0x9c, 0x6e, 0xab, 0xa6, 0x57, 0x4a, 0x63, 0x5a, 0x4b, 0x6a, + 0x63, 0x59, 0x50, 0x4d, 0x50, 0x4f, 0x46, 0x3d, 0x5a, 0x52, 0x51, 0x59, + 0x57, 0x55, 0x61, 0x74, 0x67, 0x5c, 0x45, 0x41, 0x54, 0x5c, 0x4b, 0x3d, + 0x7a, 0x82, 0x7d, 0x6a, 0x66, 0x78, 0x88, 0x8a, 0x79, 0x76, 0x80, 0x60, + 0x6f, 0x69, 0x41, 0x3f, 0x41, 0x50, 0x3e, 0x33, 0x21, 0x2e, 0x4f, 0x61, + 0x63, 0x69, 0x6b, 0x6f, 0x43, 0x2c, 0x20, 0x2e, 0x3d, 0x3c, 0x37, 0x3e, + 0x4c, 0x51, 0x60, 0x7c, 0x71, 0x87, 0x74, 0x5b, 0x59, 0x4b, 0x4a, 0x6b, + 0x80, 0x7e, 0x86, 0x61, 0x52, 0x4b, 0x2d, 0x31, 0x57, 0x78, 0x6c, 0x53, + 0x44, 0x59, 0xa2, 0x94, 0xaf, 0x9e, 0x67, 0x59, 0x42, 0x50, 0x58, 0x65, + 0x5f, 0x59, 0x54, 0x56, 0x5b, 0x5c, 0x56, 0x50, 0x40, 0x46, 0x53, 0x61, + 0x68, 0x6a, 0x6d, 0x73, 0x65, 0x55, 0x3b, 0x3a, 0x57, 0x68, 0x5d, 0x53, + 0x78, 0x79, 0x8a, 0x8a, 0x78, 0x76, 0x7e, 0x78, 0x86, 0x7e, 0x6f, 0x56, + 0x79, 0x73, 0x56, 0x3b, 0x4a, 0x4f, 0x3d, 0x2e, 0x2f, 0x34, 0x60, 0x69, + 0x66, 0x6a, 0x4f, 0x44, 0x31, 0x33, 0x2e, 0x46, 0x44, 0x37, 0x2d, 0x3d, + 0x4e, 0x45, 0x4e, 0x78, 0x5f, 0x78, 0x7e, 0x7f, 0x7a, 0x59, 0x45, 0x57, + 0x6c, 0x6d, 0x81, 0x63, 0x56, 0x5b, 0x2d, 0x21, 0x38, 0x56, 0x49, 0x47, + 0x3f, 0x60, 0x9b, 0xad, 0xad, 0xa1, 0x88, 0x66, 0x4e, 0x51, 0x4b, 0x49, + 0x5e, 0x5d, 0x5d, 0x5f, 0x5f, 0x59, 0x4f, 0x47, 0x55, 0x55, 0x55, 0x57, + 0x61, 0x6c, 0x6b, 0x65, 0x60, 0x57, 0x42, 0x42, 0x5c, 0x6a, 0x60, 0x58, + 0x8c, 0x77, 0x82, 0x8e, 0x7c, 0x7c, 0x88, 0x7e, 0x85, 0x7c, 0x5a, 0x58, + 0x7f, 0x6e, 0x82, 0x7e, 0x6a, 0x4a, 0x2e, 0x33, 0x43, 0x2b, 0x51, 0x5f, + 0x51, 0x62, 0x40, 0x31, 0x35, 0x43, 0x35, 0x43, 0x4e, 0x5b, 0x46, 0x2f, + 0x3b, 0x4c, 0x56, 0x66, 0x6e, 0x6f, 0x7c, 0x8a, 0x79, 0x53, 0x44, 0x51, + 0x6b, 0x6f, 0x86, 0x69, 0x60, 0x6d, 0x39, 0x24, 0x21, 0x48, 0x4c, 0x5a, + 0x49, 0x5c, 0x82, 0xb1, 0x8f, 0xaf, 0xa8, 0x66, 0x6e, 0x59, 0x46, 0x5f, + 0x57, 0x5c, 0x65, 0x6b, 0x6b, 0x63, 0x56, 0x4c, 0x4f, 0x53, 0x56, 0x5b, + 0x6d, 0x79, 0x6c, 0x57, 0x60, 0x5e, 0x52, 0x51, 0x62, 0x65, 0x54, 0x49, + 0x6d, 0x57, 0x64, 0x7c, 0x8b, 0x83, 0x6c, 0x73, 0x72, 0x5a, 0x58, 0x6d, + 0x6d, 0x5c, 0x68, 0x8a, 0x65, 0x32, 0x26, 0x2d, 0x3d, 0x38, 0x25, 0x45, + 0x47, 0x3a, 0x2d, 0x3f, 0x38, 0x1c, 0x29, 0x3e, 0x4c, 0x6b, 0x51, 0x37, + 0x3f, 0x3e, 0x4f, 0x62, 0x71, 0x75, 0x7d, 0x83, 0x82, 0x7b, 0x76, 0x76, + 0x67, 0x90, 0x85, 0x6a, 0x56, 0x7b, 0x62, 0x36, 0x4e, 0x58, 0x70, 0x53, + 0x4f, 0x43, 0x8e, 0x91, 0x78, 0xbc, 0x9e, 0x99, 0x78, 0x6b, 0x43, 0x54, + 0x60, 0x52, 0x76, 0x73, 0x57, 0x57, 0x4e, 0x4c, 0x5b, 0x5f, 0x60, 0x62, + 0x69, 0x6e, 0x6c, 0x66, 0x63, 0x68, 0x6c, 0x60, 0x53, 0x5a, 0x5d, 0x4f, + 0x6d, 0x4e, 0x54, 0x6c, 0x7c, 0x70, 0x53, 0x56, 0x31, 0x3e, 0x55, 0x60, + 0x58, 0x50, 0x63, 0x81, 0x52, 0x2c, 0x29, 0x37, 0x47, 0x3b, 0x2a, 0x4b, + 0x65, 0x65, 0x52, 0x4f, 0x41, 0x23, 0x21, 0x27, 0x43, 0x65, 0x55, 0x4e, + 0x3c, 0x2b, 0x3e, 0x6d, 0x70, 0x6d, 0x62, 0x59, 0x61, 0x74, 0x80, 0x81, + 0x7d, 0x7b, 0x6d, 0x6b, 0x5a, 0x6e, 0x6b, 0x65, 0x7a, 0x71, 0x61, 0x63, + 0x73, 0x4f, 0x90, 0x8d, 0x8a, 0xb5, 0xa7, 0xb0, 0x9f, 0x9a, 0x8b, 0x64, + 0x53, 0x4f, 0x60, 0x61, 0x68, 0x67, 0x4f, 0x55, 0x67, 0x68, 0x67, 0x68, + 0x6c, 0x72, 0x70, 0x6b, 0x5c, 0x5d, 0x62, 0x5d, 0x56, 0x61, 0x6d, 0x68, + 0x62, 0x4a, 0x5c, 0x80, 0x8d, 0x70, 0x3b, 0x2d, 0x34, 0x40, 0x48, 0x45, + 0x4b, 0x60, 0x74, 0x7c, 0x5c, 0x37, 0x2b, 0x3c, 0x4f, 0x4d, 0x50, 0x7a, + 0x68, 0x70, 0x5d, 0x60, 0x5c, 0x35, 0x24, 0x28, 0x2c, 0x63, 0x4e, 0x42, + 0x30, 0x46, 0x4f, 0x5e, 0x54, 0x4f, 0x4d, 0x55, 0x62, 0x69, 0x69, 0x65, + 0x56, 0x48, 0x4f, 0x6b, 0x5f, 0x68, 0x6f, 0x73, 0x7a, 0x79, 0x62, 0x75, + 0x7b, 0x3a, 0x81, 0xa0, 0x94, 0x97, 0xc3, 0xb5, 0xaf, 0xba, 0xac, 0x64, + 0x4b, 0x55, 0x57, 0x54, 0x6a, 0x64, 0x49, 0x5c, 0x62, 0x63, 0x62, 0x60, + 0x62, 0x66, 0x66, 0x62, 0x5d, 0x58, 0x5d, 0x60, 0x5c, 0x62, 0x6b, 0x6a, + 0x57, 0x47, 0x64, 0x8b, 0x97, 0x74, 0x39, 0x27, 0x3e, 0x62, 0x76, 0x61, + 0x4d, 0x5e, 0x86, 0xa1, 0x63, 0x38, 0x1c, 0x2a, 0x44, 0x49, 0x55, 0x79, + 0x81, 0x85, 0x77, 0x8b, 0x90, 0x5c, 0x3a, 0x39, 0x4c, 0x77, 0x50, 0x4d, + 0x4d, 0x83, 0x70, 0x50, 0x5a, 0x56, 0x64, 0x7d, 0x81, 0x67, 0x4c, 0x42, + 0x30, 0x34, 0x4c, 0x5f, 0x5b, 0x6b, 0x71, 0x60, 0x43, 0x49, 0x49, 0x56, + 0x57, 0x21, 0x58, 0x9a, 0x88, 0x73, 0xbd, 0x80, 0x8a, 0xb7, 0xb7, 0x99, + 0x5f, 0x68, 0x65, 0x5f, 0x60, 0x55, 0x4e, 0x61, 0x65, 0x68, 0x69, 0x68, + 0x68, 0x6a, 0x68, 0x64, 0x5b, 0x53, 0x59, 0x66, 0x66, 0x65, 0x64, 0x5c, + 0x51, 0x42, 0x55, 0x6d, 0x77, 0x6b, 0x4e, 0x4f, 0x4c, 0x58, 0x62, 0x64, + 0x6b, 0x75, 0x75, 0x6c, 0x6b, 0x45, 0x23, 0x2f, 0x4c, 0x4d, 0x4d, 0x57, + 0x66, 0x75, 0x74, 0x8a, 0x92, 0x6e, 0x4a, 0x33, 0x47, 0x58, 0x41, 0x6c, + 0x74, 0x91, 0x77, 0x64, 0x68, 0x6e, 0x77, 0x7b, 0x74, 0x5f, 0x46, 0x33, + 0x46, 0x4a, 0x58, 0x54, 0x54, 0x66, 0x71, 0x5d, 0x3b, 0x27, 0x38, 0x3b, + 0x53, 0x4c, 0x51, 0x8c, 0x70, 0x83, 0xac, 0x7e, 0x99, 0xa5, 0xb0, 0x96, + 0x85, 0x74, 0x6d, 0x69, 0x55, 0x53, 0x61, 0x5e, 0x5e, 0x64, 0x68, 0x68, + 0x68, 0x68, 0x64, 0x5f, 0x58, 0x4d, 0x53, 0x63, 0x6a, 0x6d, 0x6a, 0x5e, + 0x43, 0x3e, 0x4e, 0x59, 0x65, 0x6d, 0x67, 0x71, 0x5d, 0x78, 0x8b, 0x81, + 0x6b, 0x61, 0x68, 0x71, 0x79, 0x62, 0x3a, 0x41, 0x5c, 0x63, 0x69, 0x65, + 0x71, 0x8b, 0x91, 0x95, 0x8f, 0x83, 0x6d, 0x42, 0x2f, 0x4f, 0x53, 0x76, + 0x69, 0x6e, 0x61, 0x55, 0x66, 0x80, 0x83, 0x6e, 0x6a, 0x77, 0x6a, 0x49, + 0x4c, 0x49, 0x4f, 0x4f, 0x61, 0x5e, 0x63, 0x60, 0x51, 0x2f, 0x50, 0x4a, + 0x63, 0x7a, 0x62, 0x8a, 0x67, 0xa8, 0x8d, 0x9e, 0xbc, 0x9a, 0xb9, 0x8b, + 0x9e, 0x75, 0x5d, 0x60, 0x55, 0x5c, 0x6d, 0x54, 0x54, 0x5b, 0x60, 0x5f, + 0x5e, 0x5d, 0x59, 0x54, 0x64, 0x59, 0x59, 0x5f, 0x62, 0x6a, 0x6d, 0x61, + 0x2f, 0x37, 0x4f, 0x54, 0x60, 0x70, 0x67, 0x65, 0x62, 0x66, 0x5c, 0x47, + 0x3e, 0x49, 0x51, 0x4d, 0x6c, 0x70, 0x50, 0x4c, 0x5c, 0x67, 0x79, 0x73, + 0x82, 0x86, 0x8c, 0x8d, 0x6b, 0x57, 0x5b, 0x40, 0x63, 0x76, 0x74, 0x78, + 0x62, 0x58, 0x50, 0x39, 0x46, 0x53, 0x52, 0x45, 0x47, 0x55, 0x49, 0x2e, + 0x34, 0x3b, 0x43, 0x4b, 0x74, 0x63, 0x59, 0x5e, 0x50, 0x37, 0x5d, 0x5b, + 0x5b, 0x6d, 0x5e, 0x78, 0x7a, 0xaa, 0x76, 0xa2, 0xac, 0xa3, 0xd0, 0xc4, + 0xa9, 0x89, 0x5b, 0x5a, 0x61, 0x62, 0x6a, 0x58, 0x5f, 0x64, 0x66, 0x61, + 0x5e, 0x5c, 0x5c, 0x59, 0x6a, 0x6a, 0x6b, 0x63, 0x58, 0x5e, 0x63, 0x59, + 0x24, 0x2f, 0x43, 0x40, 0x4a, 0x5d, 0x52, 0x49, 0x49, 0x5e, 0x5b, 0x3b, + 0x2c, 0x42, 0x5d, 0x63, 0x5f, 0x80, 0x73, 0x6c, 0x6d, 0x6b, 0x74, 0x68, + 0x6f, 0x53, 0x66, 0x88, 0x62, 0x45, 0x6b, 0x7c, 0x81, 0x56, 0x45, 0x67, + 0x72, 0x5b, 0x5f, 0x5c, 0x74, 0x5d, 0x4c, 0x4e, 0x4e, 0x42, 0x34, 0x30, + 0x34, 0x49, 0x49, 0x41, 0x77, 0x6d, 0x5e, 0x60, 0x5c, 0x45, 0x5e, 0x6c, + 0x61, 0x67, 0x61, 0x69, 0x8f, 0xa4, 0xa5, 0xcc, 0xaa, 0xb2, 0xa8, 0xb6, + 0xb0, 0xa9, 0x6e, 0x62, 0x71, 0x64, 0x65, 0x67, 0x5d, 0x60, 0x5e, 0x56, + 0x51, 0x52, 0x52, 0x52, 0x5e, 0x6c, 0x76, 0x6a, 0x55, 0x57, 0x5d, 0x52, + 0x1e, 0x40, 0x38, 0x3b, 0x44, 0x41, 0x45, 0x33, 0x4f, 0x66, 0x49, 0x31, + 0x27, 0x2d, 0x57, 0x66, 0x80, 0x7f, 0x74, 0x73, 0x54, 0x60, 0x67, 0x74, + 0x8a, 0x60, 0x45, 0x55, 0x3b, 0x30, 0x69, 0x7e, 0x69, 0x4c, 0x65, 0x72, + 0x6a, 0x44, 0x5a, 0x6f, 0x8e, 0x99, 0x8c, 0x78, 0x73, 0x59, 0x38, 0x2c, + 0x2c, 0x4e, 0x4d, 0x5f, 0x77, 0x6a, 0x63, 0x6b, 0x64, 0x5d, 0x65, 0x70, + 0x69, 0x63, 0x62, 0x5e, 0x7c, 0x82, 0x91, 0xb6, 0xbc, 0xbe, 0x9e, 0xb1, + 0xbd, 0xaa, 0xaa, 0x78, 0x67, 0x5f, 0x56, 0x5c, 0x61, 0x5f, 0x5d, 0x5f, + 0x5f, 0x57, 0x49, 0x3e, 0x4d, 0x69, 0x6a, 0x5e, 0x5d, 0x4f, 0x3c, 0x3d, + 0x18, 0x30, 0x28, 0x2b, 0x34, 0x38, 0x42, 0x33, 0x55, 0x58, 0x34, 0x29, + 0x30, 0x33, 0x42, 0x3d, 0x6f, 0x5b, 0x54, 0x4d, 0x45, 0x47, 0x5b, 0x67, + 0x60, 0x50, 0x41, 0x33, 0x24, 0x3f, 0x6b, 0x63, 0x55, 0x63, 0x6b, 0x60, + 0x63, 0x30, 0x3e, 0x80, 0x75, 0x73, 0x66, 0x6d, 0x65, 0x4c, 0x34, 0x45, + 0x42, 0x58, 0x52, 0x5e, 0x6f, 0x67, 0x69, 0x6e, 0x69, 0x60, 0x64, 0x6f, + 0x73, 0x72, 0x69, 0x5b, 0x6d, 0x7f, 0x96, 0x9a, 0xb1, 0xb1, 0x9e, 0xa5, + 0xba, 0xaa, 0xb7, 0x88, 0x74, 0x56, 0x57, 0x66, 0x69, 0x63, 0x5f, 0x61, + 0x65, 0x62, 0x56, 0x4c, 0x5b, 0x6c, 0x67, 0x5f, 0x60, 0x54, 0x44, 0x47, + 0x1a, 0x29, 0x23, 0x23, 0x29, 0x32, 0x3f, 0x37, 0x3a, 0x41, 0x2f, 0x2b, + 0x30, 0x2b, 0x35, 0x3b, 0x48, 0x2d, 0x33, 0x2f, 0x40, 0x3c, 0x52, 0x50, + 0x43, 0x2c, 0x32, 0x32, 0x38, 0x60, 0x7b, 0x7f, 0x62, 0x50, 0x3c, 0x4b, + 0x7a, 0x58, 0x38, 0x43, 0x53, 0x6e, 0x5f, 0x58, 0x45, 0x56, 0x43, 0x45, + 0x56, 0x63, 0x62, 0x6e, 0x73, 0x6d, 0x75, 0x71, 0x60, 0x5f, 0x60, 0x65, + 0x6b, 0x6d, 0x66, 0x5b, 0x67, 0x8b, 0xa8, 0x91, 0xbb, 0xbe, 0xb6, 0xb9, + 0xa2, 0xa2, 0xb2, 0xaf, 0x91, 0x74, 0x56, 0x5e, 0x6f, 0x69, 0x62, 0x63, + 0x68, 0x69, 0x61, 0x59, 0x65, 0x69, 0x65, 0x64, 0x67, 0x5c, 0x4f, 0x53, + 0x1e, 0x25, 0x28, 0x27, 0x23, 0x2b, 0x36, 0x32, 0x1f, 0x28, 0x24, 0x20, + 0x1e, 0x18, 0x24, 0x38, 0x3d, 0x29, 0x27, 0x22, 0x35, 0x42, 0x4e, 0x3f, + 0x34, 0x1d, 0x2f, 0x36, 0x4c, 0x7d, 0x87, 0x8d, 0x41, 0x2a, 0x43, 0x71, + 0x80, 0x6b, 0x51, 0x34, 0x43, 0x72, 0x65, 0x56, 0x34, 0x59, 0x45, 0x36, + 0x4b, 0x5e, 0x6b, 0x7b, 0x73, 0x69, 0x6f, 0x61, 0x55, 0x5f, 0x60, 0x5b, + 0x5c, 0x5e, 0x5e, 0x61, 0x6d, 0x96, 0xac, 0x95, 0xb6, 0xc1, 0xb4, 0xbc, + 0x9f, 0xa8, 0xa0, 0xc1, 0xa6, 0xa8, 0x6d, 0x5c, 0x6d, 0x6a, 0x66, 0x65, + 0x66, 0x66, 0x64, 0x61, 0x65, 0x62, 0x61, 0x65, 0x64, 0x58, 0x52, 0x59, + 0x1a, 0x1b, 0x2d, 0x31, 0x25, 0x27, 0x2b, 0x27, 0x39, 0x31, 0x25, 0x1c, + 0x20, 0x21, 0x22, 0x32, 0x48, 0x41, 0x2c, 0x25, 0x23, 0x49, 0x4a, 0x34, + 0x23, 0x29, 0x36, 0x1f, 0x35, 0x72, 0x6c, 0x53, 0x1d, 0x20, 0x48, 0x77, + 0x78, 0x6c, 0x5b, 0x4a, 0x3b, 0x57, 0x54, 0x60, 0x38, 0x48, 0x37, 0x39, + 0x3c, 0x56, 0x69, 0x75, 0x67, 0x59, 0x5d, 0x4c, 0x52, 0x61, 0x60, 0x5c, + 0x65, 0x63, 0x5c, 0x60, 0x8f, 0xac, 0xb0, 0xac, 0xb2, 0xbd, 0xa3, 0xb0, + 0xbc, 0xbd, 0xab, 0xa9, 0xad, 0xb4, 0x97, 0x6f, 0x60, 0x64, 0x66, 0x65, + 0x63, 0x63, 0x67, 0x6a, 0x67, 0x5e, 0x5b, 0x5a, 0x4f, 0x46, 0x4d, 0x58, + 0x23, 0x1b, 0x3a, 0x46, 0x39, 0x36, 0x2d, 0x21, 0x37, 0x32, 0x2e, 0x20, + 0x23, 0x2c, 0x29, 0x3a, 0x2f, 0x3b, 0x32, 0x37, 0x2d, 0x50, 0x42, 0x28, + 0x28, 0x2a, 0x28, 0x1d, 0x2c, 0x46, 0x3c, 0x2b, 0x23, 0x2e, 0x1c, 0x34, + 0x5b, 0x5b, 0x34, 0x3a, 0x29, 0x48, 0x4f, 0x51, 0x33, 0x4f, 0x4b, 0x43, + 0x4c, 0x65, 0x69, 0x6c, 0x66, 0x5b, 0x5d, 0x52, 0x55, 0x5c, 0x55, 0x5d, + 0x74, 0x6d, 0x5c, 0x60, 0xa7, 0xad, 0xaa, 0xb2, 0xa8, 0xba, 0x9e, 0xa6, + 0xbf, 0xbf, 0xca, 0x94, 0xbc, 0x9c, 0xad, 0x73, 0x5a, 0x5d, 0x61, 0x61, + 0x61, 0x63, 0x6a, 0x70, 0x6c, 0x5f, 0x5a, 0x4f, 0x3b, 0x3b, 0x4d, 0x56, + 0x3c, 0x29, 0x4c, 0x5f, 0x55, 0x51, 0x36, 0x1d, 0x1d, 0x1e, 0x25, 0x1b, + 0x29, 0x3b, 0x2e, 0x36, 0x18, 0x21, 0x30, 0x3a, 0x38, 0x40, 0x3a, 0x29, + 0x2d, 0x22, 0x16, 0x29, 0x35, 0x25, 0x23, 0x2a, 0x1a, 0x2f, 0x17, 0x1b, + 0x2a, 0x32, 0x1d, 0x38, 0x32, 0x50, 0x57, 0x3e, 0x2a, 0x56, 0x69, 0x52, + 0x5a, 0x70, 0x61, 0x60, 0x6d, 0x6a, 0x69, 0x63, 0x54, 0x5a, 0x51, 0x59, + 0x6f, 0x65, 0x5f, 0x79, 0xa0, 0x8f, 0x94, 0x92, 0x8f, 0xa4, 0x9a, 0x8e, + 0xaf, 0xb5, 0xc8, 0x9d, 0xc6, 0x9a, 0xb3, 0x7d, 0x5e, 0x5b, 0x57, 0x56, + 0x5a, 0x60, 0x66, 0x69, 0x68, 0x5f, 0x60, 0x56, 0x45, 0x4a, 0x59, 0x54, + 0x53, 0x33, 0x56, 0x6d, 0x67, 0x61, 0x39, 0x16, 0x34, 0x25, 0x26, 0x28, + 0x54, 0x72, 0x4a, 0x31, 0x2b, 0x1c, 0x29, 0x26, 0x2b, 0x1e, 0x31, 0x34, + 0x1d, 0x2d, 0x18, 0x20, 0x2a, 0x21, 0x27, 0x1a, 0x1e, 0x19, 0x18, 0x2d, + 0x20, 0x3c, 0x40, 0x3e, 0x56, 0x4f, 0x50, 0x40, 0x31, 0x44, 0x64, 0x66, + 0x51, 0x66, 0x4c, 0x4d, 0x6c, 0x70, 0x68, 0x63, 0x55, 0x61, 0x5a, 0x58, + 0x60, 0x55, 0x67, 0xa1, 0xab, 0x8b, 0x98, 0x80, 0x86, 0x9a, 0xa1, 0x7e, + 0xb3, 0xb7, 0xac, 0xaa, 0xbd, 0xb3, 0xbe, 0x9e, 0x67, 0x5c, 0x4f, 0x4c, + 0x53, 0x5b, 0x5e, 0x5d, 0x5d, 0x5b, 0x67, 0x68, 0x5e, 0x65, 0x69, 0x54, + 0x4f, 0x62, 0x64, 0x5e, 0x65, 0x3b, 0x3d, 0x2c, 0x1d, 0x34, 0x17, 0x2b, + 0x74, 0x6c, 0x32, 0x21, 0x18, 0x2a, 0x2f, 0x24, 0x4c, 0x3c, 0x2a, 0x22, + 0x23, 0x22, 0x2c, 0x29, 0x16, 0x1c, 0x28, 0x1e, 0x1f, 0x1f, 0x1b, 0x1b, + 0x27, 0x37, 0x3f, 0x3d, 0x51, 0x47, 0x62, 0x72, 0x52, 0x70, 0x6f, 0x75, + 0x43, 0x4d, 0x5c, 0x56, 0x6e, 0x63, 0x66, 0x60, 0x49, 0x5b, 0x62, 0x55, + 0x4c, 0x58, 0x72, 0x84, 0x96, 0xaa, 0x8e, 0x78, 0x92, 0x71, 0x8b, 0xb8, + 0xc6, 0xcc, 0xc0, 0xbc, 0xbd, 0xc0, 0xa0, 0xb7, 0x79, 0x5c, 0x4a, 0x40, + 0x4c, 0x54, 0x49, 0x58, 0x5f, 0x6c, 0x6f, 0x6a, 0x62, 0x55, 0x54, 0x64, + 0x50, 0x65, 0x65, 0x5a, 0x62, 0x48, 0x49, 0x2c, 0x1c, 0x34, 0x28, 0x3b, + 0x6d, 0x5e, 0x2b, 0x1e, 0x24, 0x37, 0x47, 0x46, 0x64, 0x44, 0x2f, 0x2d, + 0x28, 0x29, 0x2b, 0x26, 0x1b, 0x1d, 0x22, 0x1c, 0x1e, 0x1e, 0x1b, 0x1a, + 0x22, 0x2e, 0x34, 0x34, 0x41, 0x57, 0x89, 0x8a, 0x4b, 0x51, 0x5e, 0x7a, + 0x52, 0x59, 0x63, 0x56, 0x69, 0x5f, 0x62, 0x63, 0x59, 0x5d, 0x5a, 0x53, + 0x53, 0x5c, 0x65, 0x68, 0x87, 0xb0, 0xbe, 0x8e, 0x80, 0x76, 0x88, 0x8c, + 0xc9, 0xbb, 0xbf, 0xbd, 0x9f, 0xbc, 0xbc, 0xb8, 0x81, 0x59, 0x46, 0x42, + 0x4a, 0x4e, 0x45, 0x54, 0x55, 0x6b, 0x6b, 0x58, 0x50, 0x4c, 0x46, 0x45, + 0x57, 0x6c, 0x70, 0x67, 0x6e, 0x61, 0x54, 0x2d, 0x30, 0x2d, 0x24, 0x3d, + 0x6e, 0x6d, 0x40, 0x29, 0x3f, 0x3c, 0x41, 0x54, 0x7a, 0x5d, 0x38, 0x2a, + 0x1f, 0x26, 0x23, 0x1e, 0x21, 0x23, 0x1f, 0x21, 0x1d, 0x1e, 0x1d, 0x1a, + 0x1e, 0x24, 0x29, 0x29, 0x30, 0x40, 0x65, 0x64, 0x32, 0x33, 0x3c, 0x4d, + 0x5b, 0x66, 0x6c, 0x5f, 0x67, 0x5d, 0x60, 0x61, 0x5a, 0x5a, 0x55, 0x50, + 0x50, 0x57, 0x5e, 0x62, 0x79, 0x9a, 0xc6, 0x83, 0x52, 0x65, 0x82, 0x8c, + 0xb6, 0xbc, 0xcd, 0xcb, 0xaf, 0xd1, 0xca, 0xb4, 0x8d, 0x53, 0x42, 0x4a, + 0x50, 0x4f, 0x43, 0x4b, 0x52, 0x51, 0x56, 0x63, 0x61, 0x45, 0x3a, 0x4a, + 0x57, 0x62, 0x6f, 0x73, 0x74, 0x69, 0x52, 0x2c, 0x2c, 0x1e, 0x26, 0x44, + 0x69, 0x6a, 0x3f, 0x25, 0x2b, 0x27, 0x25, 0x3b, 0x60, 0x59, 0x38, 0x21, + 0x22, 0x2a, 0x20, 0x1c, 0x2a, 0x27, 0x1f, 0x26, 0x1e, 0x1f, 0x1f, 0x1d, + 0x1d, 0x1f, 0x22, 0x24, 0x31, 0x2b, 0x32, 0x35, 0x27, 0x30, 0x35, 0x35, + 0x56, 0x66, 0x6b, 0x63, 0x65, 0x5e, 0x5c, 0x5f, 0x55, 0x5c, 0x5e, 0x54, + 0x44, 0x41, 0x52, 0x65, 0x84, 0x8a, 0xb9, 0x7a, 0x40, 0x4f, 0x69, 0xa6, + 0xbc, 0xb9, 0xbd, 0xda, 0xdf, 0xca, 0x9b, 0xbf, 0x9e, 0x55, 0x3d, 0x4b, + 0x57, 0x55, 0x41, 0x3f, 0x62, 0x54, 0x51, 0x65, 0x6b, 0x53, 0x4c, 0x63, + 0x4f, 0x4b, 0x5d, 0x70, 0x69, 0x61, 0x4d, 0x3b, 0x22, 0x22, 0x4a, 0x60, + 0x62, 0x4f, 0x28, 0x1f, 0x22, 0x37, 0x42, 0x47, 0x4e, 0x53, 0x43, 0x35, + 0x30, 0x35, 0x28, 0x24, 0x31, 0x2b, 0x20, 0x27, 0x22, 0x20, 0x21, 0x20, + 0x20, 0x1f, 0x21, 0x24, 0x26, 0x23, 0x24, 0x24, 0x24, 0x27, 0x37, 0x37, + 0x48, 0x58, 0x58, 0x55, 0x55, 0x58, 0x58, 0x62, 0x5a, 0x63, 0x69, 0x5f, + 0x46, 0x35, 0x3e, 0x52, 0x84, 0x88, 0xaf, 0x7e, 0x4d, 0x4a, 0x47, 0x9e, + 0xb7, 0xcf, 0xc8, 0xc2, 0xca, 0xd1, 0x9a, 0x8f, 0xaf, 0x66, 0x40, 0x3e, + 0x4c, 0x4e, 0x38, 0x33, 0x53, 0x5b, 0x4b, 0x37, 0x40, 0x51, 0x4e, 0x44, + 0x50, 0x43, 0x5a, 0x70, 0x5b, 0x62, 0x5c, 0x61, 0x53, 0x47, 0x64, 0x6e, + 0x6d, 0x60, 0x3f, 0x49, 0x54, 0x6b, 0x6d, 0x68, 0x59, 0x60, 0x4a, 0x32, + 0x2e, 0x2c, 0x27, 0x29, 0x30, 0x2e, 0x28, 0x2c, 0x25, 0x22, 0x20, 0x22, + 0x22, 0x21, 0x22, 0x25, 0x1d, 0x1f, 0x1e, 0x22, 0x30, 0x28, 0x34, 0x2c, + 0x3b, 0x47, 0x3f, 0x41, 0x40, 0x50, 0x54, 0x65, 0x5b, 0x5c, 0x63, 0x68, + 0x5a, 0x43, 0x39, 0x3c, 0x56, 0x65, 0x85, 0x61, 0x3f, 0x4a, 0x46, 0x98, + 0xbb, 0xd8, 0xe1, 0xc6, 0xb0, 0xe3, 0xbc, 0x65, 0xac, 0x83, 0x57, 0x38, + 0x3d, 0x41, 0x2f, 0x37, 0x3a, 0x41, 0x39, 0x37, 0x48, 0x50, 0x40, 0x30, + 0x46, 0x43, 0x63, 0x71, 0x4f, 0x63, 0x64, 0x71, 0x79, 0x5c, 0x67, 0x69, + 0x79, 0x7a, 0x5b, 0x72, 0x74, 0x76, 0x6a, 0x68, 0x55, 0x5d, 0x45, 0x2e, + 0x32, 0x29, 0x2b, 0x31, 0x2d, 0x2c, 0x2f, 0x2d, 0x2a, 0x21, 0x1c, 0x1f, + 0x21, 0x1f, 0x20, 0x23, 0x27, 0x29, 0x21, 0x28, 0x45, 0x34, 0x3b, 0x26, + 0x2f, 0x3d, 0x32, 0x3a, 0x3b, 0x51, 0x50, 0x5f, 0x5e, 0x58, 0x5e, 0x69, + 0x63, 0x4a, 0x3a, 0x3a, 0x46, 0x45, 0x65, 0x5b, 0x32, 0x4d, 0x60, 0xa1, + 0xd6, 0xbf, 0xd0, 0xe6, 0xd9, 0xdc, 0xb7, 0x99, 0x8d, 0xa0, 0x87, 0x50, + 0x45, 0x40, 0x31, 0x4a, 0x48, 0x37, 0x31, 0x4d, 0x69, 0x60, 0x51, 0x57, + 0x2d, 0x39, 0x63, 0x6a, 0x3e, 0x58, 0x57, 0x5e, 0x5a, 0x4e, 0x67, 0x68, + 0x72, 0x69, 0x45, 0x69, 0x6e, 0x70, 0x68, 0x6d, 0x4f, 0x59, 0x5a, 0x60, + 0x4b, 0x3a, 0x3d, 0x40, 0x2e, 0x2a, 0x2e, 0x26, 0x2c, 0x21, 0x1a, 0x1d, + 0x21, 0x1f, 0x1c, 0x1e, 0x1c, 0x27, 0x21, 0x21, 0x39, 0x21, 0x32, 0x25, + 0x24, 0x38, 0x33, 0x43, 0x44, 0x57, 0x4c, 0x55, 0x6b, 0x63, 0x64, 0x69, + 0x5b, 0x40, 0x36, 0x3e, 0x6c, 0x51, 0x7a, 0x8c, 0x4a, 0x56, 0x72, 0xa3, + 0xbf, 0xd5, 0xc7, 0xbc, 0xcd, 0xe7, 0xc4, 0xc1, 0x6b, 0xb0, 0xb3, 0x72, + 0x5b, 0x4c, 0x39, 0x5d, 0x52, 0x3c, 0x21, 0x22, 0x41, 0x57, 0x5f, 0x67, + 0x37, 0x3d, 0x59, 0x4f, 0x4f, 0x66, 0x5c, 0x53, 0x60, 0x56, 0x57, 0x4c, + 0x5a, 0x66, 0x58, 0x70, 0x58, 0x5e, 0x69, 0x65, 0x55, 0x53, 0x58, 0x55, + 0x52, 0x38, 0x50, 0x58, 0x52, 0x2e, 0x30, 0x27, 0x18, 0x1e, 0x26, 0x2a, + 0x29, 0x25, 0x1f, 0x1a, 0x20, 0x19, 0x25, 0x2c, 0x26, 0x31, 0x38, 0x27, + 0x42, 0x45, 0x46, 0x45, 0x48, 0x4e, 0x51, 0x52, 0x6a, 0x65, 0x7e, 0x8b, + 0x5d, 0x37, 0x3d, 0x4d, 0x78, 0xbc, 0xa1, 0xa3, 0x4a, 0x4f, 0x61, 0x96, + 0xac, 0xda, 0xdf, 0xe6, 0xc5, 0xd5, 0xbd, 0xe5, 0xa4, 0x88, 0x89, 0x8a, + 0x98, 0x7d, 0x71, 0x53, 0x4d, 0x5c, 0x4d, 0x3c, 0x4c, 0x5a, 0x53, 0x4f, + 0x2d, 0x44, 0x67, 0x60, 0x61, 0x6e, 0x68, 0x71, 0x6d, 0x52, 0x58, 0x51, + 0x4e, 0x62, 0x66, 0x6d, 0x5a, 0x58, 0x5a, 0x53, 0x47, 0x4b, 0x53, 0x4f, + 0x42, 0x30, 0x3f, 0x46, 0x49, 0x2b, 0x2e, 0x2e, 0x3a, 0x2e, 0x2c, 0x31, + 0x2a, 0x1e, 0x24, 0x36, 0x3a, 0x27, 0x2a, 0x32, 0x2a, 0x29, 0x2c, 0x26, + 0x31, 0x3d, 0x48, 0x4b, 0x4a, 0x4a, 0x4c, 0x4d, 0x67, 0x6d, 0x85, 0x93, + 0x65, 0x47, 0x3a, 0x37, 0x7a, 0xaf, 0x96, 0x7a, 0x39, 0x46, 0x6b, 0x92, + 0xa3, 0xca, 0xda, 0xe0, 0xc6, 0xcc, 0xbe, 0xe9, 0xc1, 0x76, 0x66, 0x56, + 0x6c, 0x76, 0x80, 0x92, 0x6c, 0x67, 0x5a, 0x57, 0x5d, 0x58, 0x4b, 0x45, + 0x2c, 0x4a, 0x67, 0x67, 0x70, 0x73, 0x66, 0x78, 0x5d, 0x4e, 0x69, 0x5c, + 0x33, 0x44, 0x6a, 0x74, 0x54, 0x50, 0x50, 0x4a, 0x42, 0x48, 0x4d, 0x46, + 0x43, 0x39, 0x3a, 0x3a, 0x46, 0x33, 0x35, 0x43, 0x51, 0x39, 0x34, 0x3e, + 0x36, 0x22, 0x30, 0x54, 0x54, 0x39, 0x31, 0x38, 0x2e, 0x22, 0x22, 0x24, + 0x25, 0x3a, 0x4b, 0x49, 0x40, 0x3e, 0x46, 0x4c, 0x4e, 0x68, 0x87, 0x9c, + 0x6b, 0x5e, 0x4d, 0x46, 0x6f, 0xa1, 0x9c, 0x6e, 0x3d, 0x3c, 0x59, 0x63, + 0x95, 0xb6, 0xd7, 0xdb, 0xd2, 0xc4, 0xc4, 0xf1, 0xe4, 0x81, 0x6c, 0x56, + 0x66, 0x76, 0x70, 0x91, 0x6b, 0x57, 0x56, 0x67, 0x67, 0x5b, 0x50, 0x4a, + 0x2b, 0x49, 0x57, 0x60, 0x78, 0x6f, 0x5e, 0x6d, 0x6a, 0x4c, 0x49, 0x48, + 0x46, 0x5d, 0x66, 0x4f, 0x4e, 0x52, 0x5b, 0x5d, 0x53, 0x52, 0x52, 0x47, + 0x44, 0x3c, 0x2a, 0x22, 0x2e, 0x23, 0x25, 0x3a, 0x48, 0x34, 0x36, 0x46, + 0x3f, 0x2d, 0x38, 0x58, 0x5a, 0x47, 0x3a, 0x35, 0x2d, 0x23, 0x1d, 0x1d, + 0x28, 0x3d, 0x4a, 0x40, 0x30, 0x33, 0x45, 0x55, 0x54, 0x70, 0x90, 0xa2, + 0x64, 0x51, 0x44, 0x44, 0x66, 0x94, 0xa1, 0x7e, 0x42, 0x37, 0x3f, 0x40, + 0x68, 0x84, 0xbb, 0xc5, 0xc9, 0xa6, 0xae, 0xe6, 0xe7, 0x9b, 0x80, 0x7b, + 0x82, 0x7c, 0x63, 0x64, 0x55, 0x44, 0x51, 0x69, 0x64, 0x5d, 0x5d, 0x54, + 0x30, 0x53, 0x59, 0x63, 0x75, 0x69, 0x5e, 0x6d, 0x71, 0x67, 0x4f, 0x4e, + 0x65, 0x71, 0x60, 0x45, 0x4e, 0x58, 0x6a, 0x6d, 0x5f, 0x5b, 0x5a, 0x52, + 0x54, 0x4b, 0x2b, 0x1c, 0x1e, 0x1c, 0x1b, 0x30, 0x3f, 0x39, 0x3c, 0x42, + 0x3c, 0x31, 0x37, 0x46, 0x4f, 0x4f, 0x41, 0x2f, 0x28, 0x25, 0x1b, 0x14, + 0x29, 0x3f, 0x48, 0x3a, 0x2d, 0x35, 0x4d, 0x5f, 0x57, 0x62, 0x81, 0x9c, + 0x6e, 0x52, 0x42, 0x3f, 0x65, 0x88, 0x98, 0x86, 0x38, 0x31, 0x34, 0x46, + 0x47, 0x61, 0xab, 0xc3, 0xcb, 0x92, 0x9b, 0xdb, 0xd9, 0xaa, 0x72, 0x7e, + 0x84, 0x7d, 0x7a, 0x5f, 0x4b, 0x4b, 0x5b, 0x60, 0x53, 0x57, 0x5e, 0x53, + 0x4d, 0x70, 0x70, 0x6d, 0x6b, 0x5b, 0x5e, 0x66, 0x66, 0x86, 0x7a, 0x6d, + 0x6f, 0x68, 0x62, 0x5b, 0x4d, 0x56, 0x66, 0x65, 0x55, 0x52, 0x56, 0x51, + 0x64, 0x5b, 0x3b, 0x2f, 0x24, 0x2b, 0x27, 0x31, 0x4f, 0x52, 0x50, 0x45, + 0x37, 0x31, 0x37, 0x40, 0x42, 0x4f, 0x43, 0x2a, 0x25, 0x25, 0x1c, 0x15, + 0x29, 0x3e, 0x48, 0x3d, 0x38, 0x46, 0x59, 0x63, 0x57, 0x50, 0x70, 0x90, + 0x86, 0x67, 0x52, 0x3d, 0x5d, 0x7c, 0x9a, 0x8f, 0x3a, 0x32, 0x2d, 0x41, + 0x3c, 0x59, 0xac, 0xd6, 0xd9, 0x95, 0x98, 0xe0, 0xe2, 0xbc, 0x6b, 0x7a, + 0x7a, 0x77, 0x84, 0x63, 0x44, 0x54, 0x5f, 0x52, 0x46, 0x4e, 0x56, 0x4e, + 0x67, 0x7a, 0x6c, 0x65, 0x5c, 0x55, 0x5e, 0x50, 0x69, 0x78, 0x6c, 0x6a, + 0x6a, 0x6e, 0x6f, 0x4e, 0x52, 0x56, 0x5f, 0x5d, 0x4f, 0x4c, 0x4f, 0x47, + 0x5b, 0x52, 0x3e, 0x3f, 0x2c, 0x3a, 0x35, 0x34, 0x58, 0x60, 0x61, 0x50, + 0x37, 0x2c, 0x38, 0x48, 0x35, 0x45, 0x3c, 0x2a, 0x29, 0x25, 0x20, 0x29, + 0x36, 0x46, 0x4a, 0x41, 0x43, 0x55, 0x62, 0x64, 0x6a, 0x5e, 0x7d, 0x80, + 0x7d, 0x56, 0x49, 0x23, 0x4b, 0x6d, 0xaa, 0x95, 0x50, 0x3f, 0x36, 0x36, + 0x32, 0x4e, 0xa4, 0xdd, 0xd8, 0x8e, 0x8a, 0xdd, 0xf2, 0xbf, 0x74, 0x80, + 0x77, 0x74, 0x73, 0x62, 0x45, 0x5a, 0x5c, 0x4b, 0x46, 0x4d, 0x4f, 0x4f, + 0x68, 0x66, 0x4c, 0x4b, 0x53, 0x5d, 0x65, 0x3d, 0x4c, 0x5d, 0x67, 0x68, + 0x49, 0x51, 0x73, 0x50, 0x5c, 0x5d, 0x66, 0x65, 0x58, 0x54, 0x50, 0x42, + 0x5a, 0x52, 0x4a, 0x58, 0x42, 0x57, 0x51, 0x47, 0x4c, 0x58, 0x61, 0x56, + 0x37, 0x24, 0x33, 0x4e, 0x2e, 0x3a, 0x33, 0x2b, 0x2e, 0x25, 0x26, 0x3f, + 0x49, 0x51, 0x4c, 0x40, 0x45, 0x5a, 0x66, 0x64, 0x58, 0x58, 0x7f, 0x65, + 0x62, 0x3e, 0x47, 0x25, 0x3c, 0x60, 0xb0, 0x89, 0x5c, 0x4c, 0x53, 0x41, + 0x37, 0x53, 0xa7, 0xeb, 0xdf, 0x94, 0x8c, 0xe7, 0xf2, 0xaa, 0x6f, 0x7b, + 0x72, 0x77, 0x6a, 0x73, 0x55, 0x67, 0x60, 0x50, 0x53, 0x51, 0x4b, 0x52, + 0x7b, 0x73, 0x48, 0x40, 0x4d, 0x59, 0x5c, 0x36, 0x38, 0x4b, 0x42, 0x45, + 0x4b, 0x68, 0x61, 0x5d, 0x4b, 0x3d, 0x52, 0x4b, 0x5b, 0x4e, 0x44, 0x40, + 0x38, 0x39, 0x42, 0x46, 0x53, 0x56, 0x45, 0x4d, 0x41, 0x4a, 0x53, 0x57, + 0x43, 0x25, 0x30, 0x5a, 0x3f, 0x33, 0x2f, 0x32, 0x2e, 0x29, 0x3c, 0x58, + 0x67, 0x48, 0x45, 0x3c, 0x47, 0x5c, 0x7c, 0x60, 0x6b, 0x65, 0x89, 0x68, + 0x62, 0x46, 0x22, 0x2e, 0x3d, 0x66, 0xb7, 0x9b, 0x5e, 0x7e, 0x67, 0x5e, + 0x4a, 0x38, 0x7f, 0xcf, 0xd7, 0xa4, 0x63, 0xe0, 0xf9, 0x9a, 0x5f, 0x68, + 0x5b, 0x76, 0x7a, 0x86, 0x6d, 0x6c, 0x64, 0x59, 0x56, 0x5e, 0x67, 0x69, + 0x68, 0x5f, 0x42, 0x4e, 0x69, 0x71, 0x62, 0x34, 0x33, 0x44, 0x3f, 0x4a, + 0x54, 0x70, 0x6a, 0x69, 0x52, 0x4c, 0x53, 0x33, 0x42, 0x52, 0x58, 0x50, + 0x45, 0x36, 0x3d, 0x4f, 0x62, 0x5a, 0x3d, 0x45, 0x3a, 0x4c, 0x5f, 0x6a, + 0x5c, 0x3a, 0x30, 0x45, 0x38, 0x30, 0x2e, 0x35, 0x33, 0x30, 0x44, 0x61, + 0x59, 0x4f, 0x54, 0x3d, 0x36, 0x4c, 0x7f, 0x6e, 0x81, 0x7b, 0x92, 0x71, + 0x5e, 0x45, 0x1f, 0x28, 0x48, 0x67, 0xbf, 0xb4, 0x81, 0xb8, 0x88, 0x6b, + 0x49, 0x31, 0x5f, 0xcb, 0xd6, 0xb9, 0x6b, 0xbf, 0xf3, 0xa9, 0x69, 0x73, + 0x67, 0x6d, 0x6d, 0x74, 0x7b, 0x64, 0x59, 0x60, 0x60, 0x5b, 0x6a, 0x85, + 0x6d, 0x5c, 0x44, 0x57, 0x79, 0x81, 0x6e, 0x45, 0x36, 0x3d, 0x38, 0x47, + 0x48, 0x5e, 0x5b, 0x5d, 0x52, 0x49, 0x44, 0x1f, 0x2b, 0x47, 0x53, 0x51, + 0x3b, 0x45, 0x5c, 0x65, 0x6c, 0x64, 0x4d, 0x56, 0x4b, 0x5a, 0x67, 0x6d, + 0x66, 0x4c, 0x3c, 0x43, 0x35, 0x2b, 0x2b, 0x34, 0x36, 0x34, 0x45, 0x5f, + 0x56, 0x4e, 0x4c, 0x38, 0x36, 0x4b, 0x7b, 0x5d, 0x4a, 0x5a, 0x80, 0x7f, + 0x66, 0x59, 0x2d, 0x2e, 0x50, 0x69, 0xca, 0xc2, 0x89, 0xdc, 0x91, 0x6a, + 0x4f, 0x2f, 0x39, 0xa4, 0xb6, 0xbc, 0x86, 0xa8, 0xf0, 0xc0, 0x6e, 0x74, + 0x7d, 0x7c, 0x82, 0x81, 0x76, 0x5e, 0x56, 0x65, 0x67, 0x5e, 0x6d, 0x8b, + 0x67, 0x57, 0x48, 0x58, 0x6e, 0x6f, 0x59, 0x3d, 0x3a, 0x35, 0x2d, 0x39, + 0x32, 0x44, 0x47, 0x4c, 0x4d, 0x37, 0x32, 0x2a, 0x3b, 0x4e, 0x51, 0x58, + 0x38, 0x4c, 0x56, 0x3e, 0x45, 0x5c, 0x54, 0x52, 0x54, 0x62, 0x68, 0x66, + 0x63, 0x51, 0x43, 0x43, 0x3c, 0x2c, 0x29, 0x37, 0x3e, 0x3c, 0x45, 0x56, + 0x56, 0x4c, 0x47, 0x43, 0x43, 0x4a, 0x72, 0x4d, 0x2e, 0x52, 0x80, 0x8e, + 0x66, 0x5a, 0x2d, 0x27, 0x4c, 0x69, 0xd2, 0xc3, 0x7b, 0xde, 0x88, 0x67, + 0x53, 0x37, 0x33, 0x7a, 0x9a, 0xb4, 0xa3, 0x8f, 0xe8, 0xdc, 0x86, 0x7f, + 0x9c, 0x9b, 0x9e, 0x87, 0x74, 0x6b, 0x66, 0x6a, 0x72, 0x7d, 0x90, 0xa3, + 0x5d, 0x5d, 0x66, 0x6e, 0x6f, 0x62, 0x46, 0x35, 0x3d, 0x32, 0x29, 0x34, + 0x29, 0x43, 0x50, 0x55, 0x50, 0x3f, 0x3e, 0x45, 0x5b, 0x75, 0x67, 0x63, + 0x5b, 0x4a, 0x30, 0x16, 0x2e, 0x54, 0x4c, 0x46, 0x4e, 0x5e, 0x64, 0x64, + 0x63, 0x54, 0x42, 0x3e, 0x46, 0x2f, 0x2a, 0x3d, 0x4f, 0x4f, 0x4f, 0x56, + 0x4d, 0x4e, 0x51, 0x53, 0x45, 0x39, 0x71, 0x5d, 0x38, 0x5f, 0x89, 0x91, + 0x5f, 0x57, 0x3d, 0x36, 0x4a, 0x6f, 0xd8, 0xcb, 0x89, 0xeb, 0x8e, 0x6b, + 0x4d, 0x35, 0x3c, 0x5c, 0x9e, 0xba, 0xbe, 0x75, 0xd2, 0xef, 0xb3, 0x95, + 0xa6, 0x9b, 0x98, 0x75, 0x73, 0x6f, 0x6e, 0x75, 0x7f, 0x8d, 0xa1, 0xb1, + 0x63, 0x68, 0x76, 0x6b, 0x5e, 0x59, 0x48, 0x45, 0x4e, 0x42, 0x3c, 0x44, + 0x32, 0x51, 0x61, 0x60, 0x5a, 0x5d, 0x57, 0x4e, 0x5c, 0x86, 0x6c, 0x51, + 0x4e, 0x32, 0x2a, 0x32, 0x45, 0x43, 0x37, 0x4b, 0x5c, 0x5f, 0x59, 0x56, + 0x5b, 0x56, 0x4e, 0x51, 0x48, 0x31, 0x2a, 0x41, 0x57, 0x58, 0x54, 0x55, + 0x4b, 0x4e, 0x47, 0x4a, 0x3e, 0x34, 0x76, 0x61, 0x2e, 0x56, 0x84, 0x8c, + 0x62, 0x5a, 0x50, 0x3d, 0x50, 0x7e, 0xe0, 0xd6, 0xab, 0xf4, 0x92, 0x67, + 0x4c, 0x2a, 0x33, 0x39, 0x93, 0xb2, 0xc7, 0x7e, 0xb8, 0xe2, 0xbf, 0x8b, + 0x8e, 0x84, 0x8f, 0x81, 0x82, 0x7c, 0x89, 0xa0, 0xa3, 0x94, 0x92, 0x9e, + 0x5d, 0x5f, 0x66, 0x4a, 0x3e, 0x49, 0x43, 0x49, 0x55, 0x4f, 0x50, 0x55, + 0x3c, 0x5a, 0x65, 0x57, 0x5b, 0x64, 0x5e, 0x54, 0x56, 0x7e, 0x60, 0x47, + 0x2b, 0x22, 0x39, 0x4f, 0x48, 0x28, 0x1c, 0x49, 0x66, 0x57, 0x44, 0x45, + 0x54, 0x57, 0x55, 0x5c, 0x50, 0x3c, 0x35, 0x48, 0x55, 0x50, 0x48, 0x48, + 0x46, 0x58, 0x4c, 0x43, 0x36, 0x37, 0x7b, 0x55, 0x3c, 0x5a, 0x88, 0x87, + 0x67, 0x56, 0x51, 0x29, 0x47, 0x8a, 0xe3, 0xd6, 0xbc, 0xea, 0x96, 0x74, + 0x63, 0x3c, 0x42, 0x39, 0x77, 0x88, 0xa2, 0x87, 0x9e, 0xb0, 0xa6, 0x80, + 0x8d, 0x88, 0x92, 0x94, 0x99, 0x96, 0x9e, 0xaa, 0xa8, 0x99, 0x93, 0x97, + 0x5c, 0x5e, 0x6a, 0x4e, 0x46, 0x58, 0x4a, 0x48, 0x47, 0x4a, 0x52, 0x58, + 0x3e, 0x5d, 0x63, 0x4c, 0x52, 0x55, 0x58, 0x65, 0x63, 0x7c, 0x61, 0x5b, + 0x4d, 0x42, 0x4a, 0x4c, 0x41, 0x28, 0x21, 0x49, 0x51, 0x40, 0x32, 0x3f, + 0x58, 0x56, 0x48, 0x47, 0x63, 0x50, 0x48, 0x53, 0x55, 0x46, 0x3a, 0x3b, + 0x37, 0x6d, 0x72, 0x55, 0x30, 0x2f, 0x7e, 0x58, 0x54, 0x62, 0x85, 0x7a, + 0x6b, 0x60, 0x6e, 0x40, 0x31, 0x88, 0xe3, 0xd0, 0xbb, 0xd9, 0xa2, 0x96, + 0x82, 0x64, 0x72, 0x63, 0x6e, 0x5e, 0x64, 0x76, 0x89, 0x81, 0x91, 0x8f, + 0xb2, 0xa5, 0x93, 0x8b, 0x8f, 0x90, 0x7e, 0x65, 0x65, 0x81, 0x97, 0x9b, + 0x4f, 0x6a, 0x68, 0x5e, 0x68, 0x67, 0x5e, 0x61, 0x59, 0x35, 0x2c, 0x3b, + 0x48, 0x71, 0x51, 0x46, 0x47, 0x3e, 0x3f, 0x62, 0x6b, 0x6d, 0x4c, 0x5c, + 0x69, 0x51, 0x30, 0x22, 0x35, 0x39, 0x39, 0x62, 0x64, 0x48, 0x36, 0x61, + 0x62, 0x7f, 0x57, 0x4f, 0x6d, 0x63, 0x5a, 0x5e, 0x5f, 0x46, 0x30, 0x33, + 0x31, 0x46, 0x7a, 0x7f, 0x3b, 0x2e, 0x85, 0x64, 0x62, 0x6e, 0x71, 0x6e, + 0x6e, 0x6f, 0x83, 0xa2, 0x85, 0x8e, 0x98, 0x97, 0x8e, 0x86, 0x87, 0x8d, + 0x93, 0x96, 0x8e, 0x84, 0x75, 0x61, 0x5c, 0x6c, 0x79, 0x85, 0x8b, 0x96, + 0xa5, 0xa1, 0x8c, 0x82, 0x8a, 0x6f, 0x8d, 0x6c, 0x4f, 0x7f, 0x8e, 0x8d, + 0x45, 0x61, 0x69, 0x61, 0x62, 0x5e, 0x5e, 0x6c, 0x4f, 0x39, 0x39, 0x4b, + 0x59, 0x69, 0x4a, 0x2e, 0x42, 0x43, 0x4d, 0x5d, 0x69, 0x80, 0x5c, 0x62, + 0x7b, 0x62, 0x3c, 0x25, 0x25, 0x39, 0x49, 0x40, 0x6f, 0x5c, 0x2c, 0x62, + 0x74, 0x8d, 0x77, 0x65, 0x6e, 0x5c, 0x4b, 0x54, 0x62, 0x4c, 0x2c, 0x21, + 0x16, 0x46, 0x86, 0x95, 0x66, 0x47, 0x85, 0x76, 0x6e, 0x88, 0x8e, 0x82, + 0x83, 0x95, 0xae, 0xc4, 0xa6, 0x97, 0x8a, 0x89, 0x8c, 0x8b, 0x8a, 0x8b, + 0x9d, 0x93, 0x7f, 0x72, 0x6b, 0x5d, 0x57, 0x63, 0x4c, 0x55, 0x5f, 0x73, + 0x8c, 0x8b, 0x74, 0x66, 0x61, 0x51, 0x6e, 0x57, 0x40, 0x58, 0x51, 0x4d, + 0x3e, 0x59, 0x65, 0x5d, 0x50, 0x48, 0x51, 0x68, 0x5f, 0x64, 0x6b, 0x69, + 0x60, 0x46, 0x3a, 0x1a, 0x10, 0x2f, 0x5f, 0x65, 0x73, 0x95, 0x67, 0x61, + 0x7a, 0x5f, 0x38, 0x2d, 0x20, 0x35, 0x5d, 0x3d, 0x5b, 0x6a, 0x2e, 0x55, + 0x5f, 0x69, 0x77, 0x7c, 0x79, 0x66, 0x56, 0x63, 0x75, 0x64, 0x42, 0x34, + 0x50, 0x7c, 0x96, 0x95, 0x97, 0x92, 0xb4, 0x97, 0x91, 0xa2, 0xa0, 0x8f, + 0x85, 0x87, 0x99, 0xb3, 0x9f, 0x8e, 0x88, 0x91, 0x97, 0x8c, 0x81, 0x7e, + 0x84, 0x7c, 0x6e, 0x6e, 0x70, 0x5b, 0x41, 0x3c, 0x35, 0x3a, 0x40, 0x55, + 0x6e, 0x6d, 0x56, 0x49, 0x31, 0x29, 0x3a, 0x2c, 0x27, 0x32, 0x24, 0x2b, + 0x59, 0x6a, 0x72, 0x66, 0x4f, 0x43, 0x4e, 0x65, 0x62, 0x6f, 0x70, 0x63, + 0x59, 0x38, 0x46, 0x24, 0x25, 0x2f, 0x56, 0x56, 0x7a, 0xac, 0x82, 0x7b, + 0x84, 0x75, 0x47, 0x41, 0x2a, 0x1d, 0x4a, 0x42, 0x35, 0x62, 0x48, 0x60, + 0x63, 0x5d, 0x68, 0x7f, 0x75, 0x70, 0x6d, 0x81, 0x9a, 0x99, 0x8e, 0x8f, + 0x90, 0x85, 0x86, 0x8f, 0x99, 0x92, 0xb3, 0xa5, 0x9a, 0x92, 0x8a, 0x86, + 0x6f, 0x4e, 0x53, 0x7d, 0x82, 0x7e, 0x84, 0x8e, 0x86, 0x71, 0x6a, 0x71, + 0x65, 0x64, 0x63, 0x6c, 0x72, 0x58, 0x34, 0x27, 0x26, 0x25, 0x24, 0x30, + 0x43, 0x3e, 0x2a, 0x23, 0x29, 0x29, 0x32, 0x2d, 0x37, 0x36, 0x20, 0x2f, + 0x6e, 0x72, 0x72, 0x62, 0x4b, 0x42, 0x4b, 0x55, 0x68, 0x6b, 0x5e, 0x4b, + 0x48, 0x34, 0x43, 0x1d, 0x37, 0x28, 0x43, 0x59, 0x94, 0xae, 0x74, 0x5c, + 0x77, 0x8f, 0x61, 0x4b, 0x33, 0x16, 0x3b, 0x4f, 0x30, 0x5f, 0x74, 0x88, + 0x92, 0x8a, 0x6b, 0x76, 0xae, 0xa1, 0x8f, 0x91, 0x9e, 0x99, 0x8b, 0x8b, + 0x91, 0x6a, 0x90, 0xb2, 0x88, 0x50, 0x7d, 0x9b, 0x89, 0x7f, 0x7a, 0x7a, + 0x65, 0x3e, 0x3e, 0x62, 0x6d, 0x64, 0x60, 0x5c, 0x51, 0x49, 0x55, 0x6a, + 0x4d, 0x49, 0x42, 0x45, 0x4a, 0x39, 0x27, 0x27, 0x24, 0x29, 0x2c, 0x39, + 0x47, 0x40, 0x33, 0x33, 0x51, 0x5b, 0x61, 0x63, 0x6d, 0x58, 0x35, 0x3a, + 0x73, 0x6f, 0x6d, 0x5e, 0x4c, 0x4e, 0x53, 0x49, 0x5f, 0x6e, 0x6f, 0x58, + 0x47, 0x42, 0x4d, 0x38, 0x28, 0x2f, 0x5f, 0x87, 0xa9, 0x8a, 0x5b, 0x4a, + 0x5f, 0x93, 0x72, 0x43, 0x30, 0x26, 0x3a, 0x45, 0x50, 0x76, 0x96, 0x93, + 0x8f, 0x88, 0x6f, 0x7c, 0x8f, 0x7c, 0x69, 0x72, 0x92, 0x9c, 0x90, 0x8c, + 0x8f, 0x86, 0xba, 0xb3, 0x73, 0x4b, 0x6e, 0x70, 0x71, 0x79, 0x71, 0x61, + 0x4e, 0x39, 0x31, 0x3d, 0x4c, 0x40, 0x34, 0x33, 0x39, 0x44, 0x55, 0x63, + 0x4b, 0x4a, 0x42, 0x42, 0x44, 0x37, 0x2f, 0x37, 0x40, 0x48, 0x55, 0x6a, + 0x7c, 0x79, 0x71, 0x77, 0x80, 0x8a, 0x84, 0x82, 0x85, 0x72, 0x5a, 0x5c, + 0x75, 0x73, 0x75, 0x69, 0x5d, 0x68, 0x69, 0x4d, 0x38, 0x4c, 0x62, 0x62, + 0x62, 0x89, 0x8d, 0x85, 0x68, 0x78, 0x8e, 0x9c, 0x98, 0x5e, 0x66, 0x76, + 0x60, 0x7a, 0x6e, 0x4a, 0x3d, 0x49, 0x4e, 0x42, 0x78, 0x93, 0x98, 0x89, + 0x77, 0x72, 0x80, 0x85, 0x8a, 0x7c, 0x6a, 0x71, 0x89, 0x8f, 0x81, 0x79, + 0x93, 0x9f, 0xb2, 0x86, 0x71, 0x74, 0x7e, 0x5f, 0x6d, 0x75, 0x6c, 0x5b, + 0x4b, 0x3c, 0x33, 0x38, 0x53, 0x51, 0x4d, 0x4e, 0x5a, 0x6a, 0x72, 0x72, + 0x73, 0x78, 0x7a, 0x7d, 0x7c, 0x69, 0x5b, 0x61, 0x5b, 0x5d, 0x61, 0x74, + 0x8a, 0x8a, 0x87, 0x90, 0x90, 0x96, 0x89, 0x81, 0x81, 0x7e, 0x7d, 0x7f, + 0x62, 0x64, 0x6c, 0x62, 0x58, 0x69, 0x65, 0x3f, 0x3b, 0x30, 0x39, 0x51, + 0x7c, 0xcc, 0xb1, 0x95, 0xa7, 0xa9, 0x9a, 0x99, 0x8f, 0x49, 0x57, 0x5b, + 0x47, 0x37, 0x49, 0x57, 0x61, 0x86, 0x91, 0x81, 0x91, 0x9e, 0x86, 0x8d, + 0x8e, 0x89, 0xa5, 0x84, 0x90, 0x91, 0x8b, 0x8d, 0x98, 0x94, 0x8a, 0x8a, + 0x93, 0x8f, 0x91, 0x82, 0x9a, 0x92, 0x8b, 0x90, 0x92, 0x8c, 0x87, 0x8a, + 0x87, 0x74, 0x71, 0x85, 0x8b, 0x91, 0x8d, 0x82, 0x81, 0x8d, 0x95, 0x93, + 0x91, 0x94, 0x94, 0x97, 0x96, 0x82, 0x73, 0x77, 0x76, 0x6a, 0x5d, 0x66, + 0x79, 0x7b, 0x7c, 0x86, 0x88, 0x92, 0x8e, 0x8a, 0x88, 0x86, 0x89, 0x82, + 0x60, 0x62, 0x6a, 0x69, 0x60, 0x5d, 0x4c, 0x2f, 0x2b, 0x18, 0x27, 0x72, + 0xb2, 0xb0, 0x9d, 0xa2, 0x86, 0x9f, 0xa9, 0x93, 0x7c, 0x78, 0x78, 0x76, + 0x7d, 0x7d, 0x7d, 0x82, 0x88, 0x90, 0x93, 0x93, 0x96, 0x94, 0x90, 0x90, + 0x92, 0x95, 0x96, 0x96, 0x98, 0x93, 0x94, 0x95, 0x92, 0x98, 0x9b, 0x90, + 0x96, 0x9b, 0x9c, 0x97, 0x92, 0x94, 0x98, 0x99, 0x99, 0x98, 0x97, 0x93, + 0x90, 0x90, 0x92, 0x94, 0x9e, 0x94, 0x94, 0x9e, 0xa2, 0x9b, 0x95, 0x97, + 0x99, 0x90, 0x8f, 0x97, 0x96, 0x89, 0x80, 0x81, 0x7f, 0x71, 0x60, 0x5a, + 0x5f, 0x69, 0x70, 0x73, 0x8a, 0x96, 0x91, 0x86, 0x8c, 0x90, 0x8e, 0x8e, + 0x5b, 0x67, 0x72, 0x68, 0x56, 0x52, 0x47, 0x2e, 0x20, 0x4a, 0x77, 0x9f, + 0xc5, 0xce, 0xba, 0xa7, 0x8f, 0x9d, 0xa2, 0x96, 0x89, 0x87, 0x88, 0x87, + 0x88, 0x85, 0x88, 0x8d, 0x92, 0x92, 0x8c, 0x87, 0x88, 0x85, 0x81, 0x83, + 0x88, 0x8b, 0x8a, 0x87, 0x83, 0x7d, 0x7e, 0x7d, 0x7a, 0x83, 0x88, 0x7f, + 0x95, 0x98, 0x94, 0x8f, 0x8e, 0x8f, 0x8c, 0x88, 0x8d, 0x8d, 0x8d, 0x8d, + 0x8d, 0x8f, 0x94, 0x98, 0xa1, 0x98, 0x95, 0x9b, 0x9d, 0x98, 0x99, 0x9f, + 0x96, 0x97, 0x96, 0x95, 0x96, 0x95, 0x8e, 0x87, 0x81, 0x7c, 0x6f, 0x5f, + 0x54, 0x57, 0x66, 0x73, 0x90, 0x99, 0x95, 0x91, 0x98, 0x97, 0x8f, 0x8f, + 0x4d, 0x62, 0x6c, 0x51, 0x2f, 0x2c, 0x2e, 0x23, 0x4f, 0x92, 0xbc, 0xc1, + 0xcc, 0xd4, 0xbc, 0x9b, 0x90, 0x91, 0x90, 0x90, 0x8d, 0x8a, 0x8a, 0x89, + 0x88, 0x87, 0x85, 0x88, 0x8c, 0x8a, 0x83, 0x7c, 0x8e, 0x8a, 0x86, 0x89, + 0x8d, 0x90, 0x8c, 0x89, 0x7f, 0x79, 0x7a, 0x78, 0x75, 0x7e, 0x86, 0x7f, + 0x90, 0x8c, 0x84, 0x7e, 0x81, 0x87, 0x87, 0x82, 0x86, 0x87, 0x87, 0x88, + 0x8a, 0x8f, 0x95, 0x9a, 0x8f, 0x8a, 0x8a, 0x8d, 0x8b, 0x85, 0x86, 0x8c, + 0x8b, 0x90, 0x8f, 0x86, 0x87, 0x90, 0x8b, 0x7e, 0x6f, 0x67, 0x59, 0x4a, + 0x42, 0x45, 0x52, 0x5e, 0x79, 0x7e, 0x7f, 0x84, 0x8b, 0x84, 0x7a, 0x7d, + 0x38, 0x52, 0x5e, 0x3b, 0x16, 0x1c, 0x31, 0x33, 0x99, 0xb3, 0xba, 0xb4, + 0xba, 0xb9, 0xa2, 0x8d, 0x88, 0x82, 0x80, 0x87, 0x85, 0x7e, 0x7b, 0x7f, + 0x81, 0x81, 0x7f, 0x7d, 0x7e, 0x80, 0x83, 0x85, 0x92, 0x93, 0x94, 0x94, + 0x92, 0x92, 0x90, 0x91, 0x8a, 0x86, 0x89, 0x88, 0x84, 0x8c, 0x93, 0x8c, + 0x87, 0x82, 0x78, 0x70, 0x74, 0x81, 0x8b, 0x8e, 0x8b, 0x8b, 0x8b, 0x8a, + 0x8b, 0x8e, 0x93, 0x97, 0x86, 0x88, 0x8e, 0x93, 0x8c, 0x80, 0x7c, 0x80, + 0x7b, 0x7c, 0x78, 0x73, 0x75, 0x7b, 0x78, 0x70, 0x71, 0x5e, 0x49, 0x44, + 0x4e, 0x59, 0x5a, 0x56, 0x68, 0x6d, 0x72, 0x79, 0x7c, 0x6f, 0x68, 0x74, + 0x34, 0x51, 0x67, 0x52, 0x39, 0x48, 0x65, 0x6d, 0xa7, 0xa7, 0x9c, 0x98, + 0xa2, 0x9e, 0x8e, 0x85, 0x80, 0x7a, 0x7b, 0x82, 0x7e, 0x73, 0x71, 0x78, + 0x7a, 0x7e, 0x82, 0x80, 0x7d, 0x7f, 0x88, 0x90, 0x81, 0x87, 0x8c, 0x8b, + 0x86, 0x84, 0x88, 0x8d, 0x8b, 0x8c, 0x91, 0x93, 0x8d, 0x94, 0x98, 0x91, + 0x85, 0x86, 0x81, 0x78, 0x77, 0x80, 0x8a, 0x8f, 0x8b, 0x8c, 0x8c, 0x8b, + 0x8a, 0x8c, 0x8e, 0x91, 0x7b, 0x81, 0x89, 0x8b, 0x84, 0x7b, 0x79, 0x7b, + 0x77, 0x6e, 0x6a, 0x6f, 0x72, 0x70, 0x70, 0x73, 0x7e, 0x67, 0x52, 0x50, + 0x5d, 0x64, 0x5b, 0x4f, 0x68, 0x6f, 0x71, 0x6f, 0x6a, 0x59, 0x56, 0x68, + 0x54, 0x6c, 0x85, 0x82, 0x75, 0x80, 0x92, 0x92, 0x8b, 0x9a, 0x98, 0x8d, + 0x8f, 0x8d, 0x81, 0x7a, 0x78, 0x75, 0x76, 0x7a, 0x76, 0x6d, 0x6e, 0x75, + 0x72, 0x7b, 0x84, 0x86, 0x82, 0x80, 0x86, 0x8c, 0x74, 0x7b, 0x82, 0x82, + 0x7d, 0x7d, 0x84, 0x8c, 0x86, 0x88, 0x8f, 0x91, 0x8b, 0x8f, 0x93, 0x8b, + 0x8b, 0x91, 0x92, 0x89, 0x83, 0x81, 0x81, 0x81, 0x83, 0x85, 0x86, 0x87, + 0x86, 0x88, 0x88, 0x8a, 0x75, 0x78, 0x7b, 0x7a, 0x77, 0x76, 0x7a, 0x7f, + 0x7d, 0x72, 0x6f, 0x79, 0x7e, 0x7a, 0x7c, 0x84, 0x8a, 0x7f, 0x71, 0x66, + 0x5f, 0x58, 0x51, 0x4d, 0x72, 0x7a, 0x76, 0x6b, 0x5f, 0x52, 0x51, 0x61, + 0x80, 0x84, 0x92, 0x96, 0x91, 0x93, 0x95, 0x8e, 0x87, 0x97, 0x9a, 0x8f, + 0x88, 0x7e, 0x73, 0x75, 0x7b, 0x77, 0x73, 0x70, 0x6d, 0x6d, 0x70, 0x73, + 0x74, 0x78, 0x7d, 0x7f, 0x80, 0x81, 0x84, 0x84, 0x7b, 0x7c, 0x7d, 0x7e, + 0x7e, 0x81, 0x85, 0x88, 0x85, 0x86, 0x8c, 0x8c, 0x84, 0x88, 0x8e, 0x88, + 0x91, 0x93, 0x90, 0x89, 0x83, 0x80, 0x7b, 0x77, 0x7b, 0x7f, 0x82, 0x83, + 0x82, 0x82, 0x80, 0x80, 0x83, 0x84, 0x85, 0x82, 0x80, 0x82, 0x86, 0x89, + 0x83, 0x7e, 0x7b, 0x7e, 0x82, 0x82, 0x82, 0x84, 0x92, 0x8e, 0x86, 0x77, + 0x66, 0x5c, 0x5a, 0x5c, 0x77, 0x81, 0x7b, 0x70, 0x6d, 0x6b, 0x6c, 0x74, + 0x94, 0x88, 0x88, 0x8f, 0x8f, 0x90, 0x90, 0x86, 0x97, 0x92, 0x8b, 0x8c, + 0x88, 0x74, 0x6e, 0x7f, 0x88, 0x81, 0x76, 0x6d, 0x6d, 0x72, 0x75, 0x75, + 0x7e, 0x79, 0x74, 0x77, 0x7e, 0x85, 0x88, 0x89, 0x83, 0x7e, 0x79, 0x79, + 0x7d, 0x80, 0x7f, 0x7d, 0x85, 0x84, 0x8a, 0x88, 0x7f, 0x84, 0x8b, 0x86, + 0x92, 0x8d, 0x84, 0x7b, 0x7a, 0x7e, 0x7e, 0x7a, 0x7a, 0x7d, 0x82, 0x82, + 0x80, 0x7d, 0x7a, 0x78, 0x80, 0x84, 0x86, 0x84, 0x82, 0x7f, 0x7c, 0x7b, + 0x83, 0x86, 0x80, 0x78, 0x7a, 0x81, 0x7e, 0x75, 0x80, 0x7c, 0x71, 0x66, + 0x5e, 0x5a, 0x5a, 0x5c, 0x60, 0x6b, 0x67, 0x60, 0x6b, 0x76, 0x78, 0x7a, +}; diff --git a/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_detect_model_data.cpp b/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model/person_detect_model_data.cpp similarity index 100% rename from examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_detect_model_data.cpp rename to examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model/person_detect_model_data.cpp diff --git a/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_image_data.cpp b/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model/person_image_data.cpp similarity index 84% rename from examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_image_data.cpp rename to examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model/person_image_data.cpp index f9c2c85..7291cb5 100644 --- a/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_image_data.cpp +++ b/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model/person_image_data.cpp @@ -1,331 +1,314 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. +#include #include "person_image_data.h" -#include "model_settings.h" - -const int g_person_data_size = kMaxImageSize; -const uint8_t g_person_data[g_person_data_size] = { - 0x0b, 0x10, 0x12, 0x1d, 0x2d, 0x37, 0x44, 0x4e, 0x50, 0x4f, 0x5d, 0x68, +const unsigned int g_person_image_data_size = 9216; +alignas(16) const unsigned char g_person_image_data[] = { + 0xb, 0x10, 0x12, 0x1d, 0x2d, 0x37, 0x44, 0x4e, 0x50, 0x4f, 0x5d, 0x68, 0x61, 0x7f, 0x64, 0x5d, 0x7a, 0x66, 0xb8, 0xc2, 0xc4, 0xaf, 0xa6, 0xa8, 0xc2, 0xcd, 0xc7, 0xb9, 0x4b, 0x7f, 0x7f, 0x72, 0x79, 0x6d, 0x7f, 0x68, 0x7f, 0x7f, 0x7f, 0x78, 0x60, 0x6e, 0x6d, 0x60, 0x53, 0x52, 0x49, 0x3c, - 0x37, 0x2c, 0x28, 0x26, 0x1f, 0x17, 0x12, 0x0f, 0x0d, 0x06, 0xfe, 0xf2, - 0xfe, 0x01, 0xfb, 0xfa, 0xf2, 0xef, 0xe9, 0xe6, 0xe4, 0xe2, 0xe1, 0xde, + 0x37, 0x2c, 0x28, 0x26, 0x1f, 0x17, 0x12, 0xf, 0xd, 0x6, 0xfe, 0xf2, + 0xfe, 0x1, 0xfb, 0xfa, 0xf2, 0xef, 0xe9, 0xe6, 0xe4, 0xe2, 0xe1, 0xde, 0xdd, 0xdd, 0xd8, 0xd7, 0xd5, 0xd5, 0xd7, 0xd6, 0xd2, 0xd2, 0xd0, 0xd3, 0xd5, 0xd1, 0xd4, 0xd1, 0xd1, 0xd3, 0xd4, 0xd6, 0xd6, 0xd8, 0xdb, 0xda, - 0x09, 0x0d, 0x12, 0x1f, 0x2b, 0x39, 0x40, 0x54, 0x54, 0x5f, 0x5f, 0x71, + 0x9, 0xd, 0x12, 0x1f, 0x2b, 0x39, 0x40, 0x54, 0x54, 0x5f, 0x5f, 0x71, 0x70, 0x7f, 0x70, 0x6d, 0x7a, 0x74, 0xb9, 0xc4, 0xc4, 0xaf, 0xa7, 0xa8, 0xc4, 0xd0, 0xc7, 0xc0, 0x6a, 0x7f, 0x5f, 0x77, 0x70, 0x7d, 0x7e, 0x73, 0x7f, 0x7f, 0x70, 0x74, 0x6d, 0x71, 0x71, 0x67, 0x5b, 0x55, 0x45, 0x45, - 0x32, 0x2b, 0x24, 0x1c, 0x15, 0x0f, 0x10, 0x0c, 0x04, 0x03, 0xf9, 0xf2, - 0x01, 0x00, 0xf9, 0xf8, 0xf5, 0xef, 0xeb, 0xe5, 0xe2, 0xe2, 0xe0, 0xdf, + 0x32, 0x2b, 0x24, 0x1c, 0x15, 0xf, 0x10, 0xc, 0x4, 0x3, 0xf9, 0xf2, + 0x1, 0x0, 0xf9, 0xf8, 0xf5, 0xef, 0xeb, 0xe5, 0xe2, 0xe2, 0xe0, 0xdf, 0xdc, 0xd9, 0xd8, 0xd8, 0xd8, 0xd8, 0xd6, 0xd8, 0xd5, 0xcd, 0xd5, 0xd6, 0xd5, 0xd5, 0xd5, 0xd4, 0xd5, 0xd8, 0xd9, 0xd7, 0xd8, 0xd9, 0xdc, 0xdd, - 0x0a, 0x0e, 0x16, 0x23, 0x29, 0x38, 0x3d, 0x50, 0x55, 0x60, 0x68, 0x67, + 0xa, 0xe, 0x16, 0x23, 0x29, 0x38, 0x3d, 0x50, 0x55, 0x60, 0x68, 0x67, 0x67, 0x7f, 0x65, 0x7f, 0x75, 0x7f, 0xb8, 0xc0, 0xc3, 0xaf, 0xa5, 0xa8, 0xc5, 0xce, 0xc3, 0xbe, 0x71, 0x70, 0x7e, 0x7f, 0x7c, 0x79, 0x7f, 0x7f, 0x7a, 0x6a, 0x7d, 0x73, 0x74, 0x6f, 0x67, 0x52, 0x61, 0x49, 0x4b, 0x42, - 0x3c, 0x34, 0x2d, 0x21, 0x18, 0x10, 0x0c, 0x04, 0xfc, 0xf9, 0xf3, 0xfc, - 0x00, 0xff, 0xf8, 0xf5, 0xf4, 0xee, 0xe9, 0xea, 0xe5, 0xdf, 0xde, 0xda, + 0x3c, 0x34, 0x2d, 0x21, 0x18, 0x10, 0xc, 0x4, 0xfc, 0xf9, 0xf3, 0xfc, + 0x0, 0xff, 0xf8, 0xf5, 0xf4, 0xee, 0xe9, 0xea, 0xe5, 0xdf, 0xde, 0xda, 0xd9, 0xd6, 0xdb, 0xd8, 0xdb, 0xdb, 0xda, 0xd4, 0xcf, 0xd4, 0xd7, 0xd6, 0xd7, 0xd6, 0xd2, 0xd2, 0xd5, 0xd6, 0xd5, 0xd9, 0xd9, 0xda, 0xdc, 0xdd, - 0x08, 0x0d, 0x16, 0x1d, 0x26, 0x36, 0x42, 0x43, 0x51, 0x59, 0x71, 0x6a, + 0x8, 0xd, 0x16, 0x1d, 0x26, 0x36, 0x42, 0x43, 0x51, 0x59, 0x71, 0x6a, 0x61, 0x73, 0x78, 0x7f, 0x6b, 0x79, 0xbc, 0xbf, 0xc5, 0xb0, 0xa5, 0xa8, 0xc4, 0xce, 0xc3, 0xc2, 0x68, 0x7b, 0x72, 0x64, 0x5e, 0x7f, 0x7f, 0x6d, 0x7a, 0x64, 0x6e, 0x7f, 0x6d, 0x78, 0x68, 0x68, 0x61, 0x4f, 0x4b, 0x3e, - 0x37, 0x35, 0x2b, 0x23, 0x23, 0x18, 0x11, 0x09, 0x03, 0xfb, 0xf1, 0xf4, + 0x37, 0x35, 0x2b, 0x23, 0x23, 0x18, 0x11, 0x9, 0x3, 0xfb, 0xf1, 0xf4, 0xf0, 0xec, 0xed, 0xed, 0xee, 0xeb, 0xea, 0xe6, 0xe4, 0xde, 0xdf, 0xdf, 0xd7, 0xdd, 0xda, 0xdd, 0xdc, 0xdb, 0xd9, 0xd0, 0xd0, 0xd9, 0xd6, 0xd6, 0xd4, 0xd8, 0xd8, 0xda, 0xd8, 0xd8, 0xda, 0xda, 0xdc, 0xdc, 0xdc, 0xe0, - 0x07, 0x0e, 0x13, 0x1e, 0x2a, 0x37, 0x3a, 0x41, 0x5a, 0x4c, 0x6f, 0x66, + 0x7, 0xe, 0x13, 0x1e, 0x2a, 0x37, 0x3a, 0x41, 0x5a, 0x4c, 0x6f, 0x66, 0x78, 0x67, 0x64, 0x6b, 0x5d, 0x79, 0xc2, 0xc1, 0xc8, 0xb1, 0xa3, 0xa8, 0xc0, 0xcc, 0xc2, 0xc3, 0x75, 0x78, 0x7f, 0x7f, 0x7f, 0x7f, 0x7b, 0x7f, 0x75, 0x76, 0x71, 0x70, 0x5c, 0x77, 0x63, 0x6c, 0x59, 0x51, 0x4b, 0x40, - 0x3c, 0x2f, 0x2c, 0x29, 0x20, 0x14, 0x10, 0x09, 0x04, 0xf6, 0xf4, 0xf7, + 0x3c, 0x2f, 0x2c, 0x29, 0x20, 0x14, 0x10, 0x9, 0x4, 0xf6, 0xf4, 0xf7, 0xf3, 0xf4, 0xe9, 0xe5, 0xe2, 0xdf, 0xdd, 0xe3, 0xde, 0xdd, 0xde, 0xde, 0xdb, 0xdd, 0xdb, 0xdb, 0xdb, 0xdb, 0xd5, 0xcf, 0xd7, 0xdb, 0xd9, 0xd9, 0xdc, 0xd8, 0xd7, 0xd9, 0xd9, 0xda, 0xda, 0xdf, 0xde, 0xdd, 0xdd, 0xe0, - 0x05, 0x0b, 0x14, 0x20, 0x2b, 0x2d, 0x3f, 0x49, 0x52, 0x5c, 0x5a, 0x6f, + 0x5, 0xb, 0x14, 0x20, 0x2b, 0x2d, 0x3f, 0x49, 0x52, 0x5c, 0x5a, 0x6f, 0x6f, 0x76, 0x69, 0x6a, 0x71, 0x7f, 0xc8, 0xc3, 0xca, 0xb2, 0xa5, 0xa7, 0xc3, 0xce, 0xc0, 0xc7, 0x7f, 0x68, 0x7f, 0x70, 0x70, 0x79, 0x7f, 0x7f, 0x67, 0x7d, 0x64, 0x72, 0x74, 0x63, 0x63, 0x66, 0x5f, 0x58, 0x4a, 0x3b, - 0x32, 0x2c, 0x2b, 0x1c, 0x1e, 0x16, 0x0f, 0x09, 0xfd, 0xf3, 0xfb, 0xf8, + 0x32, 0x2c, 0x2b, 0x1c, 0x1e, 0x16, 0xf, 0x9, 0xfd, 0xf3, 0xfb, 0xf8, 0xfb, 0xf7, 0xf1, 0xee, 0xe5, 0xe4, 0xe0, 0xd8, 0xd7, 0xd6, 0xda, 0xdd, 0xdd, 0xdc, 0xe0, 0xdc, 0xdc, 0xd7, 0xd2, 0xd8, 0xd7, 0xdd, 0xdb, 0xda, 0xd8, 0xd8, 0xda, 0xda, 0xdb, 0xda, 0xdc, 0xdf, 0xdf, 0xe0, 0xe3, 0xe6, - 0x05, 0x0c, 0x11, 0x1c, 0x25, 0x33, 0x3a, 0x47, 0x56, 0x5d, 0x66, 0x59, + 0x5, 0xc, 0x11, 0x1c, 0x25, 0x33, 0x3a, 0x47, 0x56, 0x5d, 0x66, 0x59, 0x5d, 0x60, 0x73, 0x71, 0x7d, 0x7f, 0xd1, 0xc8, 0xcc, 0xb5, 0xa3, 0xa6, 0xc5, 0xc9, 0xc0, 0xcb, 0x7f, 0x78, 0x7f, 0x7f, 0x7f, 0x77, 0x70, 0x65, 0x64, 0x7f, 0x5a, 0x66, 0x6e, 0x74, 0x6f, 0x53, 0x52, 0x50, 0x49, 0x40, - 0x32, 0x31, 0x28, 0x20, 0x1b, 0x15, 0x13, 0x05, 0xf9, 0xf8, 0x01, 0xfa, + 0x32, 0x31, 0x28, 0x20, 0x1b, 0x15, 0x13, 0x5, 0xf9, 0xf8, 0x1, 0xfa, 0xf5, 0xf5, 0xf4, 0xf0, 0xea, 0xe7, 0xe5, 0xe2, 0xdd, 0xdc, 0xd7, 0xd3, 0xd1, 0xd4, 0xd3, 0xd6, 0xdc, 0xd0, 0xd6, 0xdb, 0xdb, 0xdb, 0xde, 0xd7, 0xd8, 0xdb, 0xd9, 0xdd, 0xdd, 0xde, 0xdc, 0xe0, 0xe1, 0xe4, 0xe9, 0xee, - 0xfa, 0x08, 0x0d, 0x1a, 0x23, 0x2e, 0x38, 0x40, 0x4c, 0x55, 0x58, 0x68, + 0xfa, 0x8, 0xd, 0x1a, 0x23, 0x2e, 0x38, 0x40, 0x4c, 0x55, 0x58, 0x68, 0x65, 0x68, 0x7b, 0x71, 0x6d, 0x78, 0xdb, 0xcb, 0xd4, 0xba, 0xa5, 0xa6, 0xc6, 0xcb, 0xc0, 0xd2, 0x6a, 0x7f, 0x67, 0x77, 0x7f, 0x6a, 0x69, 0x75, 0x4c, 0x61, 0x75, 0x63, 0x52, 0x76, 0x66, 0x60, 0x5c, 0x44, 0x48, 0x3c, - 0x35, 0x2d, 0x27, 0x22, 0x19, 0x14, 0x0f, 0x07, 0xf8, 0xff, 0xfc, 0xf8, + 0x35, 0x2d, 0x27, 0x22, 0x19, 0x14, 0xf, 0x7, 0xf8, 0xff, 0xfc, 0xf8, 0xf7, 0xf3, 0xef, 0xec, 0xe9, 0xe6, 0xe3, 0xe1, 0xe1, 0xdc, 0xdc, 0xdc, 0xd8, 0xd7, 0xd2, 0xcd, 0xcf, 0xd0, 0xd4, 0xdb, 0xda, 0xd9, 0xdb, 0xdb, 0xdc, 0xdb, 0xdd, 0xdb, 0xde, 0xde, 0xe0, 0xe7, 0xe5, 0xea, 0xea, 0xf2, - 0x07, 0x0f, 0x11, 0x14, 0x19, 0x1c, 0x29, 0x33, 0x46, 0x4a, 0x56, 0x5f, + 0x7, 0xf, 0x11, 0x14, 0x19, 0x1c, 0x29, 0x33, 0x46, 0x4a, 0x56, 0x5f, 0x62, 0x5f, 0x6f, 0x6a, 0x74, 0x74, 0xe9, 0xc8, 0xdd, 0xc2, 0xa5, 0xa5, 0xc5, 0xd1, 0xc3, 0xd9, 0x6f, 0x7e, 0x7f, 0x7d, 0x7c, 0x6e, 0x7f, 0x7b, 0x7d, 0x79, 0x64, 0x74, 0x6b, 0x76, 0x52, 0x5d, 0x53, 0x4b, 0x45, 0x3a, - 0x36, 0x2b, 0x2c, 0x23, 0x1b, 0x12, 0x09, 0xfe, 0xfa, 0xff, 0xfb, 0xf9, + 0x36, 0x2b, 0x2c, 0x23, 0x1b, 0x12, 0x9, 0xfe, 0xfa, 0xff, 0xfb, 0xf9, 0xf6, 0xf7, 0xee, 0xea, 0xe5, 0xe7, 0xe6, 0xe0, 0xdf, 0xe1, 0xdb, 0xda, 0xd7, 0xdb, 0xd8, 0xd2, 0xd1, 0xd3, 0xd2, 0xd3, 0xd3, 0xd3, 0xd7, 0xdc, 0xdd, 0xdb, 0xdd, 0xdf, 0xdc, 0xe2, 0xe5, 0xe7, 0xe9, 0xf0, 0xf1, 0xf4, - 0x0b, 0x13, 0x18, 0x22, 0x28, 0x2d, 0x37, 0x3b, 0x47, 0x4e, 0x55, 0x5c, + 0xb, 0x13, 0x18, 0x22, 0x28, 0x2d, 0x37, 0x3b, 0x47, 0x4e, 0x55, 0x5c, 0x55, 0x79, 0x5f, 0x69, 0x77, 0x7f, 0xfe, 0xcc, 0xe2, 0xc9, 0xa4, 0xa7, 0xd7, 0xdd, 0xca, 0xe0, 0x71, 0x79, 0x70, 0x5f, 0x7d, 0x75, 0x7f, 0x7f, 0x5a, 0x78, 0x71, 0x6b, 0x6f, 0x58, 0x5b, 0x5b, 0x4e, 0x4b, 0x40, 0x3b, - 0x30, 0x31, 0x28, 0x22, 0x19, 0x11, 0x04, 0xf8, 0x07, 0x01, 0xff, 0xf9, + 0x30, 0x31, 0x28, 0x22, 0x19, 0x11, 0x4, 0xf8, 0x7, 0x1, 0xff, 0xf9, 0xf9, 0xf6, 0xf1, 0xec, 0xe8, 0xe7, 0xe4, 0xe6, 0xe2, 0xe0, 0xde, 0xdd, 0xdc, 0xdd, 0xd6, 0xd1, 0xd3, 0xd6, 0xd7, 0xd7, 0xd9, 0xd6, 0xd5, 0xd7, 0xd6, 0xda, 0xdb, 0xdf, 0xe4, 0xe8, 0xea, 0xed, 0xf2, 0xf5, 0xf5, 0xf7, - 0x05, 0x0f, 0x18, 0x1f, 0x28, 0x31, 0x3b, 0x48, 0x54, 0x5a, 0x6a, 0x6c, + 0x5, 0xf, 0x18, 0x1f, 0x28, 0x31, 0x3b, 0x48, 0x54, 0x5a, 0x6a, 0x6c, 0x5e, 0x59, 0x7f, 0x6e, 0x7a, 0x6b, 0x11, 0xc6, 0xe1, 0xcc, 0xa6, 0xa7, 0xdd, 0xe8, 0xcc, 0xe6, 0x75, 0x7f, 0x74, 0x7f, 0x77, 0x6d, 0x6a, 0x79, 0x71, 0x76, 0x56, 0x6e, 0x68, 0x6e, 0x5d, 0x59, 0x4e, 0x48, 0x3f, 0x3a, - 0x2e, 0x2a, 0x26, 0x1d, 0x17, 0x10, 0x02, 0xfa, 0x05, 0x03, 0xff, 0x00, + 0x2e, 0x2a, 0x26, 0x1d, 0x17, 0x10, 0x2, 0xfa, 0x5, 0x3, 0xff, 0x0, 0xf9, 0xf7, 0xf0, 0xec, 0xea, 0xe8, 0xe6, 0xe5, 0xe2, 0xdf, 0xdb, 0xdd, 0xdc, 0xd9, 0xd3, 0xd6, 0xd8, 0xd9, 0xd8, 0xd8, 0xdb, 0xd9, 0xd9, 0xd9, 0xdb, 0xda, 0xdd, 0xe0, 0xe4, 0xea, 0xec, 0xf0, 0xf4, 0xf3, 0xf9, 0xfd, - 0x04, 0x0a, 0x14, 0x1a, 0x25, 0x34, 0x3a, 0x44, 0x56, 0x5e, 0x5f, 0x5c, + 0x4, 0xa, 0x14, 0x1a, 0x25, 0x34, 0x3a, 0x44, 0x56, 0x5e, 0x5f, 0x5c, 0x6b, 0x59, 0x7f, 0x70, 0x51, 0x75, 0x39, 0xc6, 0xe0, 0xce, 0xa9, 0xad, 0xe2, 0xeb, 0xcf, 0xee, 0x7f, 0x7f, 0x7f, 0x76, 0x7b, 0x7f, 0x71, 0x6a, 0x6d, 0x70, 0x4f, 0x6d, 0x67, 0x56, 0x5b, 0x5b, 0x50, 0x46, 0x44, 0x39, - 0x30, 0x29, 0x22, 0x1c, 0x10, 0x0a, 0xfd, 0x08, 0x07, 0x02, 0xfd, 0xfe, + 0x30, 0x29, 0x22, 0x1c, 0x10, 0xa, 0xfd, 0x8, 0x7, 0x2, 0xfd, 0xfe, 0xfa, 0xf6, 0xf0, 0xed, 0xe7, 0xec, 0xe5, 0xe6, 0xe2, 0xe0, 0xe3, 0xe1, 0xdd, 0xd7, 0xd6, 0xda, 0xdd, 0xdd, 0xdd, 0xdd, 0xdb, 0xdd, 0xdf, 0xde, - 0xdd, 0xdf, 0xe1, 0xe1, 0xe4, 0xe6, 0xe8, 0xf2, 0xf7, 0xfa, 0x01, 0x08, - 0xfe, 0x09, 0x11, 0x19, 0x26, 0x2f, 0x38, 0x46, 0x50, 0x57, 0x66, 0x66, + 0xdd, 0xdf, 0xe1, 0xe1, 0xe4, 0xe6, 0xe8, 0xf2, 0xf7, 0xfa, 0x1, 0x8, + 0xfe, 0x9, 0x11, 0x19, 0x26, 0x2f, 0x38, 0x46, 0x50, 0x57, 0x66, 0x66, 0x55, 0x66, 0x67, 0x4c, 0x7f, 0x73, 0x53, 0xc3, 0xe3, 0xce, 0xa5, 0xa9, 0xe3, 0xef, 0xcd, 0xff, 0x72, 0x7d, 0x5e, 0x7f, 0x78, 0x7f, 0x6c, 0x7b, 0x78, 0x5e, 0x75, 0x7b, 0x6b, 0x70, 0x61, 0x55, 0x51, 0x45, 0x3a, 0x35, - 0x2f, 0x20, 0x1c, 0x10, 0x0c, 0x07, 0xfc, 0x0d, 0x08, 0x02, 0x00, 0xfe, + 0x2f, 0x20, 0x1c, 0x10, 0xc, 0x7, 0xfc, 0xd, 0x8, 0x2, 0x0, 0xfe, 0xf5, 0xf1, 0xee, 0xe9, 0xe8, 0xe9, 0xe6, 0xe3, 0xe2, 0xe3, 0xde, 0xdf, 0xde, 0xd5, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdd, 0xde, 0xdb, 0xde, - 0xe2, 0xe4, 0xe4, 0xe6, 0xe7, 0xed, 0xeb, 0xf1, 0xf8, 0x05, 0x0a, 0x14, - 0xfc, 0x08, 0x16, 0x15, 0x21, 0x2a, 0x32, 0x45, 0x4e, 0x5a, 0x67, 0x5d, + 0xe2, 0xe4, 0xe4, 0xe6, 0xe7, 0xed, 0xeb, 0xf1, 0xf8, 0x5, 0xa, 0x14, + 0xfc, 0x8, 0x16, 0x15, 0x21, 0x2a, 0x32, 0x45, 0x4e, 0x5a, 0x67, 0x5d, 0x77, 0x68, 0x72, 0x76, 0x65, 0x7f, 0x68, 0xc0, 0xe0, 0xd2, 0xa7, 0xa7, - 0xdf, 0xeb, 0xcc, 0x0d, 0x7e, 0x5e, 0x6b, 0x75, 0x62, 0x77, 0x7a, 0x73, - 0x78, 0x69, 0x69, 0x70, 0x63, 0x52, 0x54, 0x4a, 0x47, 0x42, 0x2f, 0x0b, - 0xd8, 0xc6, 0xbd, 0xc8, 0xcb, 0xd0, 0xec, 0x06, 0x03, 0x03, 0xfe, 0xf9, + 0xdf, 0xeb, 0xcc, 0xd, 0x7e, 0x5e, 0x6b, 0x75, 0x62, 0x77, 0x7a, 0x73, + 0x78, 0x69, 0x69, 0x70, 0x63, 0x52, 0x54, 0x4a, 0x47, 0x42, 0x2f, 0xb, + 0xd8, 0xc6, 0xbd, 0xc8, 0xcb, 0xd0, 0xec, 0x6, 0x3, 0x3, 0xfe, 0xf9, 0xf4, 0xf2, 0xed, 0xeb, 0xe7, 0xe5, 0xe4, 0xe7, 0xe1, 0xe4, 0xe2, 0xdf, 0xd8, 0xd8, 0xe0, 0xde, 0xdd, 0xdb, 0xdb, 0xdd, 0xdf, 0xe2, 0xe0, 0xdf, - 0xe1, 0xe8, 0xe6, 0xe8, 0xe9, 0xee, 0xef, 0xf9, 0x03, 0x0c, 0x0c, 0x13, - 0xfa, 0x04, 0x0f, 0x10, 0x1b, 0x2c, 0x38, 0x42, 0x4b, 0x4f, 0x5b, 0x68, + 0xe1, 0xe8, 0xe6, 0xe8, 0xe9, 0xee, 0xef, 0xf9, 0x3, 0xc, 0xc, 0x13, + 0xfa, 0x4, 0xf, 0x10, 0x1b, 0x2c, 0x38, 0x42, 0x4b, 0x4f, 0x5b, 0x68, 0x62, 0x5c, 0x6a, 0x57, 0x71, 0x7f, 0x6e, 0xc1, 0xd9, 0xd1, 0xa5, 0xa4, 0xd1, 0xef, 0xcc, 0x1f, 0x67, 0x7f, 0x7e, 0x7b, 0x7f, 0x7b, 0x69, 0x6f, 0x7a, 0x6b, 0x64, 0x73, 0x5f, 0x41, 0x4e, 0x44, 0x30, 0x19, 0xd1, 0xad, - 0xac, 0xb1, 0xb3, 0xb4, 0xb0, 0xac, 0xb3, 0xcd, 0xf2, 0x01, 0xf9, 0xf6, + 0xac, 0xb1, 0xb3, 0xb4, 0xb0, 0xac, 0xb3, 0xcd, 0xf2, 0x1, 0xf9, 0xf6, 0xf1, 0xef, 0xed, 0xe9, 0xe9, 0xec, 0xe5, 0xe2, 0xe1, 0xe2, 0xe1, 0xdd, 0xda, 0xdf, 0xde, 0xdf, 0xde, 0xdd, 0xe0, 0xe4, 0xe2, 0xe1, 0xe3, 0xe7, - 0xe6, 0xe6, 0xe9, 0xea, 0xf0, 0xef, 0xf9, 0x05, 0x09, 0x0e, 0x14, 0x17, - 0xfc, 0x05, 0x0c, 0x12, 0x1f, 0x27, 0x2f, 0x3f, 0x4b, 0x53, 0x46, 0x57, + 0xe6, 0xe6, 0xe9, 0xea, 0xf0, 0xef, 0xf9, 0x5, 0x9, 0xe, 0x14, 0x17, + 0xfc, 0x5, 0xc, 0x12, 0x1f, 0x27, 0x2f, 0x3f, 0x4b, 0x53, 0x46, 0x57, 0x72, 0x73, 0x72, 0x70, 0x64, 0x77, 0x5c, 0xc0, 0xd8, 0xd0, 0xa8, 0xa6, 0xe1, 0xec, 0xcc, 0x33, 0x7b, 0x77, 0x7f, 0x71, 0x65, 0x71, 0x7f, 0x66, - 0x6e, 0x5f, 0x5a, 0x74, 0x6b, 0x49, 0x51, 0x0e, 0xc7, 0xad, 0xa9, 0xa6, + 0x6e, 0x5f, 0x5a, 0x74, 0x6b, 0x49, 0x51, 0xe, 0xc7, 0xad, 0xa9, 0xa6, 0xa1, 0xa0, 0xa2, 0xa2, 0xa1, 0xa2, 0xa6, 0xa9, 0xb2, 0xd7, 0xf4, 0xf2, 0xf3, 0xf1, 0xee, 0xe8, 0xea, 0xe8, 0xe5, 0xe3, 0xe5, 0xe5, 0xe0, 0xda, 0xe0, 0xe1, 0xe0, 0xde, 0xe0, 0xe0, 0xdf, 0xe3, 0xe4, 0xe6, 0xe6, 0xe7, - 0xed, 0xf1, 0xf3, 0xf5, 0xf8, 0xf8, 0x07, 0x09, 0x10, 0x17, 0x1b, 0x22, - 0xfc, 0x05, 0x0e, 0x14, 0x1c, 0x22, 0x2d, 0x3b, 0x44, 0x50, 0x56, 0x4b, + 0xed, 0xf1, 0xf3, 0xf5, 0xf8, 0xf8, 0x7, 0x9, 0x10, 0x17, 0x1b, 0x22, + 0xfc, 0x5, 0xe, 0x14, 0x1c, 0x22, 0x2d, 0x3b, 0x44, 0x50, 0x56, 0x4b, 0x6f, 0x63, 0x7f, 0x6d, 0x5f, 0x79, 0x74, 0xc3, 0xdb, 0xd4, 0xa7, 0xa9, 0xe3, 0xed, 0xcb, 0x4a, 0x71, 0x7b, 0x7f, 0x6f, 0x7f, 0x70, 0x68, 0x72, 0x72, 0x66, 0x68, 0x69, 0x4c, 0x57, 0x19, 0xb7, 0xa7, 0xa0, 0x9d, 0x9b, 0x9a, 0x99, 0x9b, 0x9b, 0x9c, 0x9b, 0xa0, 0xa3, 0xa4, 0xaf, 0xcd, 0xea, 0xef, 0xf0, 0xea, 0xe9, 0xe7, 0xe7, 0xe3, 0xe8, 0xe4, 0xe4, 0xd9, 0xe3, 0xe2, 0xe2, 0xe2, 0xe1, 0xe5, 0xe4, 0xe3, 0xe4, 0xe9, 0xe4, 0xeb, 0xea, - 0xef, 0xf4, 0xf6, 0xfc, 0xfd, 0x0a, 0x0b, 0x10, 0x12, 0x1a, 0x28, 0x31, - 0xf6, 0x04, 0x0b, 0x15, 0x1c, 0x28, 0x2e, 0x30, 0x43, 0x58, 0x65, 0x6d, + 0xef, 0xf4, 0xf6, 0xfc, 0xfd, 0xa, 0xb, 0x10, 0x12, 0x1a, 0x28, 0x31, + 0xf6, 0x4, 0xb, 0x15, 0x1c, 0x28, 0x2e, 0x30, 0x43, 0x58, 0x65, 0x6d, 0x6c, 0x7e, 0x5e, 0x6d, 0x7b, 0x5f, 0x7f, 0xc9, 0xd8, 0xd5, 0xa8, 0xa9, 0xe1, 0xe9, 0xcc, 0x4b, 0x63, 0x70, 0x6e, 0x7f, 0x78, 0x78, 0x7f, 0x62, 0x7f, 0x79, 0x5e, 0x7b, 0x60, 0x51, 0xc4, 0xa4, 0xa1, 0xa0, 0xa0, 0xa2, 0xa1, 0xa2, 0x9c, 0x9d, 0xa0, 0x9d, 0x9a, 0x9e, 0x9c, 0xa4, 0xac, 0xb9, 0xdb, 0xe6, 0xe8, 0xe8, 0xe4, 0xe7, 0xe6, 0xe5, 0xe6, 0xe0, 0xdc, 0xe6, 0xe2, 0xe3, 0xe2, 0xe1, 0xe5, 0xe3, 0xe5, 0xe8, 0xe9, 0xe9, 0xed, 0xf1, - 0xf5, 0xfb, 0x00, 0xfe, 0x08, 0x0c, 0x0f, 0x12, 0x23, 0x29, 0x35, 0x37, - 0xeb, 0x04, 0x0b, 0x12, 0x1a, 0x28, 0x37, 0x41, 0x49, 0x50, 0x57, 0x5a, + 0xf5, 0xfb, 0x0, 0xfe, 0x8, 0xc, 0xf, 0x12, 0x23, 0x29, 0x35, 0x37, + 0xeb, 0x4, 0xb, 0x12, 0x1a, 0x28, 0x37, 0x41, 0x49, 0x50, 0x57, 0x5a, 0x6f, 0x76, 0x63, 0x69, 0x67, 0x5c, 0x7c, 0xce, 0xd6, 0xd3, 0xab, 0xaa, 0xe0, 0xe8, 0xc8, 0x65, 0x73, 0x7f, 0x75, 0x7d, 0x62, 0x70, 0x7f, 0x72, - 0x79, 0x6a, 0x7b, 0x6a, 0x59, 0x02, 0xa5, 0xa1, 0xa7, 0xac, 0xad, 0xaf, + 0x79, 0x6a, 0x7b, 0x6a, 0x59, 0x2, 0xa5, 0xa1, 0xa7, 0xac, 0xad, 0xaf, 0xae, 0xa8, 0xa9, 0xa1, 0x9e, 0xa0, 0x9f, 0x9c, 0x9a, 0x9b, 0xa4, 0xa9, 0xb7, 0xd0, 0xd5, 0xda, 0xd8, 0xe1, 0xe3, 0xe4, 0xdf, 0xd9, 0xe5, 0xe5, 0xe0, 0xe3, 0xe3, 0xe3, 0xe6, 0xe4, 0xe6, 0xe9, 0xec, 0xf2, 0xf0, 0xf6, - 0xfc, 0x00, 0x00, 0x0b, 0x10, 0x0f, 0x1a, 0x1f, 0x28, 0x36, 0x40, 0x47, - 0xe5, 0xf2, 0xfb, 0x02, 0x0c, 0x19, 0x28, 0x33, 0x44, 0x53, 0x50, 0x70, + 0xfc, 0x0, 0x0, 0xb, 0x10, 0xf, 0x1a, 0x1f, 0x28, 0x36, 0x40, 0x47, + 0xe5, 0xf2, 0xfb, 0x2, 0xc, 0x19, 0x28, 0x33, 0x44, 0x53, 0x50, 0x70, 0x79, 0x72, 0x66, 0x7f, 0x7e, 0x6d, 0x7f, 0xdb, 0xd8, 0xd8, 0xa9, 0xa9, 0xdf, 0xeb, 0xc6, 0x5c, 0x7d, 0x77, 0x70, 0x7f, 0x7f, 0x76, 0x4b, 0x7c, 0x6d, 0x62, 0x65, 0x60, 0x5b, 0xb8, 0xa2, 0xa9, 0xb2, 0xb5, 0xb7, 0xb7, 0xb3, 0xb2, 0xad, 0xab, 0xa7, 0xa1, 0xa3, 0x9e, 0x9c, 0x9d, 0x9e, 0xa4, 0xab, 0xbd, 0xc9, 0xcc, 0xcd, 0xd0, 0xce, 0xd2, 0xd2, 0xd9, 0xde, 0xe0, 0xe2, 0xe0, 0xe2, 0xe4, 0xe6, 0xe6, 0xe9, 0xe9, 0xee, 0xf0, 0xf8, 0xff, - 0x01, 0x01, 0x0b, 0x0d, 0x14, 0x19, 0x1f, 0x2b, 0x2d, 0x3f, 0x42, 0x5a, - 0xef, 0xfa, 0x0d, 0x0f, 0x18, 0x26, 0x29, 0x34, 0x3a, 0x46, 0x4d, 0x59, + 0x1, 0x1, 0xb, 0xd, 0x14, 0x19, 0x1f, 0x2b, 0x2d, 0x3f, 0x42, 0x5a, + 0xef, 0xfa, 0xd, 0xf, 0x18, 0x26, 0x29, 0x34, 0x3a, 0x46, 0x4d, 0x59, 0x69, 0x64, 0x6d, 0x72, 0x70, 0x7f, 0x5c, 0xed, 0xd5, 0xd4, 0xa9, 0xa8, 0xe3, 0xe6, 0xca, 0x76, 0x7f, 0x7f, 0x7f, 0x7f, 0x70, 0x70, 0x6d, 0x76, 0x70, 0x7f, 0x7f, 0x6d, 0x1c, 0xa0, 0xa5, 0xad, 0xb2, 0xb5, 0xb2, 0xb4, 0xb5, 0xb3, 0xb1, 0xaf, 0xad, 0xad, 0xab, 0xaa, 0xa3, 0x9e, 0x9f, 0x9f, 0xa2, 0xab, 0xc2, 0xc9, 0xcd, 0xcf, 0xcf, 0xcd, 0xcb, 0xcb, 0xcc, 0xce, - 0xd4, 0xdb, 0xdb, 0xdf, 0xe1, 0xe9, 0xef, 0xf1, 0xf0, 0xf8, 0xfc, 0x04, - 0x03, 0x09, 0x0f, 0x15, 0x22, 0x24, 0x29, 0x37, 0x40, 0x4c, 0x5b, 0x60, - 0xf2, 0xea, 0x03, 0x0f, 0x1c, 0x27, 0x2b, 0x40, 0x45, 0x4b, 0x4f, 0x65, - 0x69, 0x4c, 0x5d, 0x6c, 0x65, 0x6f, 0x68, 0x00, 0xcf, 0xd2, 0xa9, 0xa8, + 0xd4, 0xdb, 0xdb, 0xdf, 0xe1, 0xe9, 0xef, 0xf1, 0xf0, 0xf8, 0xfc, 0x4, + 0x3, 0x9, 0xf, 0x15, 0x22, 0x24, 0x29, 0x37, 0x40, 0x4c, 0x5b, 0x60, + 0xf2, 0xea, 0x3, 0xf, 0x1c, 0x27, 0x2b, 0x40, 0x45, 0x4b, 0x4f, 0x65, + 0x69, 0x4c, 0x5d, 0x6c, 0x65, 0x6f, 0x68, 0x0, 0xcf, 0xd2, 0xa9, 0xa8, 0xe1, 0xe3, 0xcd, 0x7f, 0x7f, 0x72, 0x7e, 0x7f, 0x79, 0x79, 0x7a, 0x7f, 0x79, 0x7f, 0x6e, 0x66, 0xdb, 0x9a, 0xa5, 0xac, 0xaf, 0xb1, 0xb6, 0xb6, 0xb6, 0xb4, 0xb3, 0xb1, 0xb3, 0xb2, 0xb3, 0xb2, 0xb2, 0xac, 0xa7, 0xa0, 0x9f, 0xa9, 0xbf, 0xc7, 0xce, 0xce, 0xcd, 0xcf, 0xce, 0xca, 0xc7, 0xc6, - 0xc4, 0xc3, 0xcb, 0xd0, 0xd9, 0xe1, 0xe9, 0xf1, 0xf5, 0xfa, 0xfe, 0x01, - 0x0a, 0x0c, 0x12, 0x1b, 0x22, 0x2e, 0x37, 0x3e, 0x4c, 0x5e, 0x6c, 0x64, - 0xf3, 0xe7, 0x00, 0x11, 0x1b, 0x28, 0x2e, 0x3b, 0x45, 0x4a, 0x4d, 0x62, + 0xc4, 0xc3, 0xcb, 0xd0, 0xd9, 0xe1, 0xe9, 0xf1, 0xf5, 0xfa, 0xfe, 0x1, + 0xa, 0xc, 0x12, 0x1b, 0x22, 0x2e, 0x37, 0x3e, 0x4c, 0x5e, 0x6c, 0x64, + 0xf3, 0xe7, 0x0, 0x11, 0x1b, 0x28, 0x2e, 0x3b, 0x45, 0x4a, 0x4d, 0x62, 0x6c, 0x6c, 0x76, 0x7e, 0x57, 0x7c, 0x7f, 0x1c, 0xc8, 0xce, 0xa9, 0xaa, 0xda, 0xe1, 0xcb, 0x6b, 0x7f, 0x70, 0x78, 0x7b, 0x7f, 0x7f, 0x7a, 0x76, 0x7f, 0x7f, 0x75, 0x72, 0xc1, 0x9b, 0xab, 0xb1, 0xb2, 0xb4, 0xb4, 0xb5, 0xb4, 0xb3, 0xb5, 0xb2, 0xb5, 0xb7, 0xb7, 0xbc, 0xba, 0xbb, 0xba, 0xab, 0x99, 0xa4, 0xbf, 0xcb, 0xce, 0xcb, 0xd1, 0xcf, 0xc9, 0xc0, 0xbf, 0xc0, - 0xbd, 0xc0, 0xbf, 0xc4, 0xcc, 0xd7, 0xdd, 0xe0, 0xe6, 0xf4, 0x02, 0x0f, - 0x0e, 0x17, 0x1b, 0x24, 0x2e, 0x34, 0x3e, 0x4c, 0x50, 0x65, 0x65, 0x75, - 0xee, 0xf0, 0xf5, 0x09, 0x14, 0x1c, 0x2c, 0x33, 0x3e, 0x4b, 0x53, 0x64, + 0xbd, 0xc0, 0xbf, 0xc4, 0xcc, 0xd7, 0xdd, 0xe0, 0xe6, 0xf4, 0x2, 0xf, + 0xe, 0x17, 0x1b, 0x24, 0x2e, 0x34, 0x3e, 0x4c, 0x50, 0x65, 0x65, 0x75, + 0xee, 0xf0, 0xf5, 0x9, 0x14, 0x1c, 0x2c, 0x33, 0x3e, 0x4b, 0x53, 0x64, 0x64, 0x71, 0x61, 0x7b, 0x7e, 0x7e, 0x7f, 0x48, 0xc1, 0xc3, 0xab, 0xa9, 0xd3, 0xda, 0xca, 0x71, 0x7f, 0x7f, 0x7f, 0x78, 0x7f, 0x7f, 0x7e, 0x6f, 0x7f, 0x7b, 0x6f, 0x5e, 0xb5, 0x9a, 0xac, 0xb3, 0xb2, 0xb1, 0xaf, 0xaf, 0xac, 0xb1, 0xb2, 0xb0, 0xb2, 0xb5, 0xba, 0xbc, 0xbc, 0xba, 0xc3, 0xae, 0x99, 0x9d, 0xb8, 0xcc, 0xcc, 0xcf, 0xc8, 0xc9, 0xc0, 0xbc, 0xbc, 0xbe, - 0xbb, 0xbf, 0xbf, 0xc3, 0xc7, 0xd7, 0xd7, 0xdb, 0xdf, 0xec, 0x09, 0x0e, + 0xbb, 0xbf, 0xbf, 0xc3, 0xc7, 0xd7, 0xd7, 0xdb, 0xdf, 0xec, 0x9, 0xe, 0x12, 0x1f, 0x34, 0x3d, 0x3e, 0x41, 0x4d, 0x58, 0x5b, 0x73, 0x7a, 0x78, - 0xef, 0xf1, 0xe9, 0x05, 0x0f, 0x1c, 0x25, 0x33, 0x38, 0x4c, 0x53, 0x50, + 0xef, 0xf1, 0xe9, 0x5, 0xf, 0x1c, 0x25, 0x33, 0x38, 0x4c, 0x53, 0x50, 0x6c, 0x7b, 0x7a, 0x7e, 0x6e, 0x7f, 0x7a, 0x6a, 0xaf, 0xa8, 0xa8, 0xa8, 0xba, 0xc4, 0xcb, 0x7f, 0x7f, 0x7f, 0x78, 0x7f, 0x71, 0x73, 0x77, 0x79, 0x6e, 0x68, 0x68, 0x56, 0xb0, 0x9d, 0xb3, 0xac, 0xa1, 0x9c, 0x9a, 0x9f, 0xa4, 0xa9, 0xab, 0xac, 0xac, 0xac, 0xb1, 0xb5, 0xbc, 0xc0, 0xc3, 0xa9, 0x9a, 0x9c, 0xb2, 0xcd, 0xcb, 0xcb, 0xcc, 0xc1, 0xbc, 0xb9, 0xbe, 0xbf, - 0xbf, 0xc2, 0xc2, 0xc6, 0xc9, 0xd5, 0xd6, 0xdd, 0xea, 0x07, 0x0b, 0x14, + 0xbf, 0xc2, 0xc2, 0xc6, 0xc9, 0xd5, 0xd6, 0xdd, 0xea, 0x7, 0xb, 0x14, 0x1a, 0x27, 0x2b, 0x3a, 0x40, 0x51, 0x71, 0x73, 0x6c, 0x75, 0x7a, 0x63, - 0xed, 0xf2, 0xec, 0xfc, 0x0c, 0x18, 0x1e, 0x2b, 0x36, 0x42, 0x49, 0x5b, + 0xed, 0xf2, 0xec, 0xfc, 0xc, 0x18, 0x1e, 0x2b, 0x36, 0x42, 0x49, 0x5b, 0x60, 0x72, 0x5e, 0x77, 0x7b, 0x7c, 0x5e, 0x7a, 0xb1, 0xa9, 0xa8, 0xa9, 0xa8, 0xad, 0xcd, 0x78, 0x7f, 0x76, 0x7f, 0x79, 0x74, 0x7f, 0x7e, 0x5f, 0x75, 0x79, 0x70, 0x60, 0xb2, 0xa0, 0xb5, 0xa3, 0x9d, 0x96, 0x93, 0x94, 0x9a, 0x9f, 0xa0, 0xa1, 0x9e, 0x9d, 0x9c, 0xa2, 0xa9, 0xbf, 0xbf, 0xa3, 0x9b, 0xa1, 0xcb, 0xcc, 0xcf, 0xcb, 0xc9, 0xc7, 0xbf, 0xbb, 0xbe, 0xbf, - 0xc0, 0xc2, 0xc5, 0xcb, 0xd1, 0xd7, 0xda, 0xe7, 0x0a, 0x0f, 0x18, 0x1e, + 0xc0, 0xc2, 0xc5, 0xcb, 0xd1, 0xd7, 0xda, 0xe7, 0xa, 0xf, 0x18, 0x1e, 0x26, 0x30, 0x3c, 0x41, 0x50, 0x4b, 0x6c, 0x61, 0x78, 0x7f, 0x78, 0x7f, - 0xe9, 0xef, 0xed, 0xeb, 0x00, 0x10, 0x14, 0x23, 0x2f, 0x3b, 0x4a, 0x5c, + 0xe9, 0xef, 0xed, 0xeb, 0x0, 0x10, 0x14, 0x23, 0x2f, 0x3b, 0x4a, 0x5c, 0x5d, 0x6c, 0x5e, 0x68, 0x7f, 0x7f, 0x6f, 0x6c, 0xc1, 0xc6, 0xac, 0xaa, 0xb5, 0xb6, 0xd0, 0x7e, 0x7f, 0x7f, 0x77, 0x7f, 0x7b, 0x7c, 0x7f, 0x7f, 0x60, 0x7c, 0x70, 0x39, 0xaa, 0xaa, 0xb2, 0x9f, 0x98, 0x93, 0x91, 0x90, 0x95, 0x99, 0x9c, 0x97, 0x94, 0x92, 0x94, 0x97, 0x9e, 0xab, 0xc1, 0xa0, 0x9e, 0xa3, 0xcc, 0xcf, 0xcf, 0xcc, 0xc8, 0xca, 0xc4, 0xc4, 0xc0, 0xc4, - 0xc4, 0xc8, 0xd0, 0xd5, 0xd8, 0xd7, 0xe5, 0x09, 0x11, 0x17, 0x1e, 0x2d, + 0xc4, 0xc8, 0xd0, 0xd5, 0xd8, 0xd7, 0xe5, 0x9, 0x11, 0x17, 0x1e, 0x2d, 0x38, 0x3f, 0x4b, 0x57, 0x55, 0x74, 0x75, 0x75, 0x67, 0x7b, 0x7f, 0x75, - 0xdd, 0xe3, 0xe7, 0xe4, 0xf6, 0x06, 0x09, 0x18, 0x29, 0x39, 0x46, 0x4b, + 0xdd, 0xe3, 0xe7, 0xe4, 0xf6, 0x6, 0x9, 0x18, 0x29, 0x39, 0x46, 0x4b, 0x5c, 0x5b, 0x64, 0x7c, 0x76, 0x78, 0x7d, 0x7b, 0xc8, 0xd4, 0xaf, 0xab, 0xdd, 0xe0, 0xdc, 0x7f, 0x7f, 0x7e, 0x7d, 0x7f, 0x7f, 0x78, 0x73, 0x70, 0x7f, 0x70, 0x54, 0x16, 0xa7, 0xaf, 0xb0, 0x9e, 0x99, 0x92, 0x90, 0x92, 0x94, 0x9f, 0xa4, 0x9a, 0x94, 0x92, 0x92, 0x95, 0x9c, 0xac, 0xc4, 0xa8, 0x9a, 0xa4, 0xcc, 0xce, 0xcd, 0xcc, 0xc9, 0xcd, 0xcb, 0xcd, 0xce, 0xcd, - 0xcf, 0xd7, 0xd4, 0xd6, 0xda, 0xe7, 0x0b, 0x12, 0x18, 0x24, 0x30, 0x37, + 0xcf, 0xd7, 0xd4, 0xd6, 0xda, 0xe7, 0xb, 0x12, 0x18, 0x24, 0x30, 0x37, 0x48, 0x50, 0x59, 0x53, 0x77, 0x7f, 0x6c, 0x78, 0x72, 0x75, 0x7f, 0x7f, - 0xe8, 0xea, 0xf3, 0xf0, 0xeb, 0xf8, 0xff, 0x0a, 0x12, 0x24, 0x3b, 0x47, + 0xe8, 0xea, 0xf3, 0xf0, 0xeb, 0xf8, 0xff, 0xa, 0x12, 0x24, 0x3b, 0x47, 0x5e, 0x61, 0x63, 0x72, 0x65, 0x71, 0x7f, 0x72, 0xcb, 0xd8, 0xb2, 0xac, 0xdf, 0xea, 0xe1, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x79, 0x7e, 0x7f, 0x7e, 0x78, 0x61, 0x1b, 0xaa, 0xb5, 0xb3, 0xa7, 0xa1, 0x98, 0x97, 0x98, 0x9c, 0xac, 0xb3, 0xa1, 0x98, 0x96, 0x90, 0x98, 0x9b, 0xa9, 0xc5, 0xad, 0x9b, 0xa7, 0xd6, 0xd7, 0xd3, 0xcf, 0xd3, 0xce, 0xcf, 0xd0, 0xcf, 0xd6, - 0xd9, 0xd8, 0xdb, 0xdb, 0xe3, 0x0b, 0x15, 0x1b, 0x27, 0x31, 0x3d, 0x4a, + 0xd9, 0xd8, 0xdb, 0xdb, 0xe3, 0xb, 0x15, 0x1b, 0x27, 0x31, 0x3d, 0x4a, 0x51, 0x59, 0x63, 0x63, 0x65, 0x6e, 0x79, 0x57, 0x7f, 0x72, 0x62, 0x78, - 0xe9, 0xe9, 0xf1, 0xf5, 0xe5, 0xf8, 0x02, 0x0e, 0x1e, 0x2c, 0x3d, 0x4c, + 0xe9, 0xe9, 0xf1, 0xf5, 0xe5, 0xf8, 0x2, 0xe, 0x1e, 0x2c, 0x3d, 0x4c, 0x5a, 0x67, 0x76, 0x76, 0x67, 0x67, 0x7c, 0x7f, 0xcc, 0xd8, 0xb4, 0xad, 0xe1, 0xf0, 0xe8, 0x7f, 0x7f, 0x7f, 0x7f, 0x50, 0x7f, 0x7c, 0x75, 0x74, 0x61, 0x74, 0x70, 0x3d, 0xa5, 0xb2, 0xb8, 0xb0, 0xa4, 0x9d, 0x9b, 0x9f, 0xa6, 0xb5, 0xb7, 0xab, 0xa0, 0x99, 0x9a, 0xa0, 0xaa, 0xad, 0xc6, 0xb0, 0x9f, 0xba, 0xe8, 0xe7, 0xeb, 0xe6, 0xe7, 0xe3, 0xe0, 0xdd, 0xde, 0xdd, - 0xde, 0xe0, 0xe3, 0xe2, 0x0a, 0x14, 0x1d, 0x2a, 0x37, 0x3e, 0x44, 0x5a, + 0xde, 0xe0, 0xe3, 0xe2, 0xa, 0x14, 0x1d, 0x2a, 0x37, 0x3e, 0x44, 0x5a, 0x62, 0x7b, 0x76, 0x7a, 0x7e, 0x7f, 0x73, 0x76, 0x75, 0x7f, 0x7f, 0x7f, - 0xe3, 0xea, 0xec, 0xf1, 0xea, 0xec, 0xfa, 0x07, 0x10, 0x23, 0x35, 0x4b, + 0xe3, 0xea, 0xec, 0xf1, 0xea, 0xec, 0xfa, 0x7, 0x10, 0x23, 0x35, 0x4b, 0x51, 0x70, 0x5f, 0x6f, 0x7f, 0x7d, 0x61, 0x66, 0xd6, 0xdb, 0xb5, 0xaa, 0xe3, 0xee, 0xf1, 0x6c, 0x6a, 0x7f, 0x7d, 0x7f, 0x72, 0x6f, 0x7f, 0x6a, 0x68, 0x7a, 0x6d, 0x4b, 0xaf, 0xb0, 0xb5, 0xae, 0xa6, 0x9f, 0xa0, 0xa7, 0xab, 0xb3, 0xbd, 0xb5, 0xab, 0x9e, 0xa0, 0xaa, 0xb2, 0xbb, 0xc8, 0xab, 0xa5, 0xd6, 0xe8, 0xe9, 0xe9, 0xeb, 0xea, 0xed, 0xea, 0xee, 0xf1, 0xf0, - 0xf6, 0xfd, 0xff, 0x0e, 0x13, 0x1f, 0x2c, 0x30, 0x45, 0x43, 0x64, 0x6c, + 0xf6, 0xfd, 0xff, 0xe, 0x13, 0x1f, 0x2c, 0x30, 0x45, 0x43, 0x64, 0x6c, 0x66, 0x61, 0x74, 0x65, 0x70, 0x77, 0x66, 0x7f, 0x6d, 0x7f, 0x7f, 0x60, - 0xe1, 0xe7, 0xea, 0xf0, 0xf1, 0xe8, 0xfa, 0xf9, 0xfd, 0x0c, 0x14, 0x33, + 0xe1, 0xe7, 0xea, 0xf0, 0xf1, 0xe8, 0xfa, 0xf9, 0xfd, 0xc, 0x14, 0x33, 0x4d, 0x6e, 0x6c, 0x76, 0x7d, 0x75, 0x5e, 0x7a, 0xe6, 0xd8, 0xb8, 0xaa, 0xdf, 0xe9, 0xf8, 0x7f, 0x78, 0x7c, 0x7b, 0x6d, 0x6b, 0x7f, 0x72, 0x60, 0x71, 0x6e, 0x5e, 0x50, 0xc0, 0xad, 0xb0, 0xaa, 0xac, 0xa9, 0xa8, 0xa6, 0xaf, 0xb5, 0xbd, 0xba, 0xab, 0xac, 0xa9, 0xae, 0xb9, 0xbf, 0xc8, 0xaa, 0xac, 0xe1, 0xeb, 0xe7, 0xea, 0xeb, 0xec, 0xec, 0xf0, 0xf0, 0xf7, 0xfa, - 0xff, 0x08, 0x0c, 0x0f, 0x1a, 0x29, 0x32, 0x4d, 0x58, 0x59, 0x72, 0x64, + 0xff, 0x8, 0xc, 0xf, 0x1a, 0x29, 0x32, 0x4d, 0x58, 0x59, 0x72, 0x64, 0x63, 0x7c, 0x69, 0x7f, 0x7a, 0x6b, 0x7f, 0x62, 0x69, 0x76, 0x71, 0x7f, - 0xde, 0xe5, 0xe5, 0xed, 0xf1, 0xeb, 0xf8, 0xfb, 0x02, 0x03, 0x00, 0x04, - 0x08, 0x2a, 0x66, 0x79, 0x7f, 0x7f, 0x7f, 0x7e, 0x01, 0xd1, 0xb9, 0xaa, - 0xe5, 0xeb, 0x05, 0x7f, 0x7f, 0x7f, 0x7f, 0x7b, 0x7f, 0x74, 0x77, 0x7b, + 0xde, 0xe5, 0xe5, 0xed, 0xf1, 0xeb, 0xf8, 0xfb, 0x2, 0x3, 0x0, 0x4, + 0x8, 0x2a, 0x66, 0x79, 0x7f, 0x7f, 0x7f, 0x7e, 0x1, 0xd1, 0xb9, 0xaa, + 0xe5, 0xeb, 0x5, 0x7f, 0x7f, 0x7f, 0x7f, 0x7b, 0x7f, 0x74, 0x77, 0x7b, 0x6d, 0x5f, 0x5c, 0x3c, 0xe7, 0xab, 0xac, 0xad, 0xac, 0xad, 0xa6, 0xa3, 0xb0, 0xb1, 0xb9, 0xbc, 0xa9, 0xaf, 0xb1, 0xb2, 0xb7, 0xbf, 0xc5, 0xb5, - 0xbf, 0xe3, 0xe8, 0xe8, 0xed, 0xdd, 0xd6, 0xec, 0xf0, 0xf5, 0xfb, 0x05, - 0x09, 0x10, 0x15, 0x18, 0x25, 0x2f, 0x38, 0x4a, 0x51, 0x68, 0x67, 0x67, + 0xbf, 0xe3, 0xe8, 0xe8, 0xed, 0xdd, 0xd6, 0xec, 0xf0, 0xf5, 0xfb, 0x5, + 0x9, 0x10, 0x15, 0x18, 0x25, 0x2f, 0x38, 0x4a, 0x51, 0x68, 0x67, 0x67, 0x6f, 0x7f, 0x7f, 0x75, 0x68, 0x7f, 0x71, 0x7f, 0x79, 0x78, 0x7f, 0x7e, - 0xd9, 0xdf, 0xe3, 0xe7, 0xeb, 0xea, 0xf4, 0x00, 0x07, 0x03, 0x05, 0x06, + 0xd9, 0xdf, 0xe3, 0xe7, 0xeb, 0xea, 0xf4, 0x0, 0x7, 0x3, 0x5, 0x6, 0x1c, 0x28, 0x57, 0x70, 0x7f, 0x7f, 0x7f, 0x4c, 0x25, 0xcd, 0xbc, 0xaa, 0xdd, 0xea, 0x12, 0x7b, 0x7f, 0x7f, 0x7f, 0x7d, 0x77, 0x7e, 0x54, 0x78, 0x69, 0x4f, 0x13, 0xce, 0xa9, 0x9b, 0xa8, 0xa9, 0xac, 0xa8, 0xa1, 0xa2, 0x9e, 0xa2, 0xa4, 0xab, 0xa5, 0xaa, 0xb1, 0xb2, 0xb4, 0xb8, 0xbf, 0xbe, - 0xdd, 0xe4, 0xe7, 0xe9, 0xe7, 0xdb, 0xd4, 0xef, 0xf6, 0xfb, 0x02, 0x04, - 0x0d, 0x0f, 0x1d, 0x26, 0x31, 0x39, 0x47, 0x57, 0x5b, 0x78, 0x7c, 0x7a, + 0xdd, 0xe4, 0xe7, 0xe9, 0xe7, 0xdb, 0xd4, 0xef, 0xf6, 0xfb, 0x2, 0x4, + 0xd, 0xf, 0x1d, 0x26, 0x31, 0x39, 0x47, 0x57, 0x5b, 0x78, 0x7c, 0x7a, 0x68, 0x79, 0x7f, 0x7c, 0x6e, 0x7f, 0x65, 0x7f, 0x69, 0x78, 0x70, 0x5e, - 0xe1, 0xe3, 0xe6, 0xe9, 0xe7, 0xe9, 0xe9, 0xfa, 0x0a, 0xfd, 0x07, 0x0b, + 0xe1, 0xe3, 0xe6, 0xe9, 0xe7, 0xe9, 0xe9, 0xfa, 0xa, 0xfd, 0x7, 0xb, 0x22, 0x1b, 0x5c, 0x70, 0x71, 0x71, 0x7c, 0x73, 0x48, 0xcc, 0xbe, 0xa8, 0xde, 0xea, 0x23, 0x7f, 0x7f, 0x7f, 0x7f, 0x74, 0x7e, 0x7b, 0x68, 0xf5, 0xc9, 0xb5, 0xa9, 0xa4, 0x97, 0x99, 0xa7, 0xaa, 0xa7, 0xa3, 0xa5, 0xa1, 0x98, 0x96, 0x9a, 0x9c, 0xa6, 0xa4, 0xad, 0xb3, 0xb5, 0xb7, 0xba, 0xd2, - 0xe0, 0xe3, 0xe7, 0xe7, 0xe8, 0xe7, 0xee, 0xf3, 0xfa, 0x05, 0x05, 0x0c, + 0xe0, 0xe3, 0xe7, 0xe7, 0xe8, 0xe7, 0xee, 0xf3, 0xfa, 0x5, 0x5, 0xc, 0x11, 0x1a, 0x26, 0x2f, 0x38, 0x47, 0x5a, 0x67, 0x6b, 0x79, 0x7f, 0x6f, 0x7f, 0x7f, 0x7d, 0x75, 0x7b, 0x7f, 0x7f, 0x65, 0x6c, 0x7f, 0x7b, 0x7f, - 0xe0, 0xe8, 0xe8, 0xee, 0xf0, 0xf7, 0xf6, 0xf7, 0x04, 0x0a, 0x0d, 0x10, + 0xe0, 0xe8, 0xe8, 0xee, 0xf0, 0xf7, 0xf6, 0xf7, 0x4, 0xa, 0xd, 0x10, 0x1c, 0x2e, 0x59, 0x55, 0x76, 0x7f, 0x65, 0x77, 0x60, 0xc6, 0xbc, 0xa7, 0xdd, 0xe6, 0x2b, 0x7f, 0x7a, 0x7f, 0x70, 0x4c, 0xf3, 0xc5, 0xb1, 0xa7, 0x9d, 0x9c, 0x9b, 0x97, 0x91, 0x94, 0x9d, 0xa8, 0xa5, 0xa6, 0xa7, 0xa4, 0x9d, 0x98, 0x9d, 0xa4, 0xac, 0xa9, 0xac, 0xb3, 0xb6, 0xb1, 0xa1, 0xad, - 0xbd, 0xd6, 0xe0, 0xe4, 0xe8, 0xed, 0xf1, 0xf8, 0x02, 0x04, 0x0b, 0x11, + 0xbd, 0xd6, 0xe0, 0xe4, 0xe8, 0xed, 0xf1, 0xf8, 0x2, 0x4, 0xb, 0x11, 0x1c, 0x26, 0x30, 0x3a, 0x4c, 0x58, 0x63, 0x71, 0x71, 0x73, 0x7c, 0x57, 0x78, 0x7f, 0x7f, 0x77, 0x7f, 0x7f, 0x7e, 0x7f, 0x75, 0x71, 0x7d, 0x6b, - 0xdd, 0xe0, 0xe5, 0xee, 0xf1, 0xf5, 0xfb, 0xf4, 0x04, 0x04, 0x09, 0x0a, + 0xdd, 0xe0, 0xe5, 0xee, 0xf1, 0xf5, 0xfb, 0xf4, 0x4, 0x4, 0x9, 0xa, 0x1e, 0x37, 0x6f, 0x7f, 0x63, 0x7f, 0x71, 0x7f, 0x71, 0xbf, 0xb9, 0xa7, 0xd5, 0xdf, 0x39, 0x77, 0x7a, 0xd9, 0xbe, 0xb2, 0xab, 0xa5, 0xa0, 0x9a, 0x97, 0x97, 0x95, 0x94, 0x91, 0x90, 0x96, 0xa5, 0xa4, 0xa3, 0x9d, 0x9f, 0x9e, 0xa0, 0xa2, 0xa2, 0xa6, 0xaa, 0xaa, 0xb3, 0xb1, 0xac, 0x9a, 0xa3, - 0xa6, 0xae, 0xbf, 0xdc, 0xed, 0xf0, 0xf7, 0xff, 0x04, 0x0e, 0x13, 0x1b, + 0xa6, 0xae, 0xbf, 0xdc, 0xed, 0xf0, 0xf7, 0xff, 0x4, 0xe, 0x13, 0x1b, 0x28, 0x31, 0x3a, 0x46, 0x59, 0x6b, 0x79, 0x50, 0x76, 0x7f, 0x79, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x77, 0x7f, 0x7e, 0x73, 0x79, 0x76, 0x76, - 0xdf, 0xdc, 0xe4, 0xe7, 0xf1, 0xf6, 0xfc, 0xf4, 0x05, 0x10, 0x22, 0x30, + 0xdf, 0xdc, 0xe4, 0xe7, 0xf1, 0xf6, 0xfc, 0xf4, 0x5, 0x10, 0x22, 0x30, 0x3f, 0x5a, 0x65, 0x78, 0x77, 0x6e, 0x6b, 0x7c, 0x74, 0xc3, 0xb6, 0xa5, 0xca, 0xd6, 0x2a, 0x44, 0xba, 0xa8, 0xa1, 0xa0, 0x9f, 0x9d, 0x9a, 0x9a, 0x97, 0x96, 0x96, 0x92, 0x91, 0x91, 0x93, 0xa3, 0xa1, 0x99, 0x95, 0x95, 0x94, 0x94, 0x96, 0x9b, 0x9c, 0xa1, 0xa9, 0xb0, 0xab, 0x99, 0x95, 0x9a, - 0x9f, 0xa5, 0xa7, 0xaf, 0xbe, 0xe0, 0xf6, 0x03, 0x0c, 0x1a, 0x22, 0x2b, + 0x9f, 0xa5, 0xa7, 0xaf, 0xbe, 0xe0, 0xf6, 0x3, 0xc, 0x1a, 0x22, 0x2b, 0x40, 0x49, 0x5b, 0x63, 0x7f, 0x68, 0x78, 0x7f, 0x6b, 0x76, 0x7f, 0x75, 0x76, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x72, 0x7f, 0x76, 0x75, 0x72, 0x65, - 0xd5, 0xd7, 0xdb, 0xe2, 0xe7, 0xed, 0xf7, 0xf9, 0xfe, 0x0f, 0x19, 0x2e, + 0xd5, 0xd7, 0xdb, 0xe2, 0xe7, 0xed, 0xf7, 0xf9, 0xfe, 0xf, 0x19, 0x2e, 0x3d, 0x45, 0x63, 0x71, 0x7b, 0x7f, 0x68, 0x7f, 0x7f, 0xc4, 0xba, 0xa4, 0xc7, 0xcf, 0xc3, 0xae, 0xa4, 0xa0, 0x9f, 0x9e, 0x9c, 0x9b, 0x9c, 0x9a, 0x96, 0x95, 0x96, 0x91, 0x91, 0x91, 0x90, 0x9d, 0x9e, 0x98, 0x99, 0x97, @@ -333,7 +316,7 @@ const uint8_t g_person_data[g_person_data_size] = { 0x97, 0x9b, 0x9d, 0xa6, 0xae, 0xb3, 0xc0, 0xd1, 0xfb, 0x21, 0x2d, 0x39, 0x3b, 0x4a, 0x63, 0x63, 0x7b, 0x75, 0x6b, 0x68, 0x67, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7e, 0x7f, 0x73, 0x6d, 0x7f, 0x71, - 0xd6, 0xdb, 0xd9, 0xdd, 0xe0, 0xe6, 0xed, 0xf3, 0xf4, 0x0d, 0x16, 0x2a, + 0xd6, 0xdb, 0xd9, 0xdd, 0xe0, 0xe6, 0xed, 0xf3, 0xf4, 0xd, 0x16, 0x2a, 0x3a, 0x46, 0x49, 0x5e, 0x70, 0x68, 0x72, 0x75, 0x7f, 0xc7, 0xb8, 0xa3, 0xc4, 0xcd, 0xaa, 0xa2, 0x9d, 0x9c, 0x9c, 0x99, 0x99, 0x98, 0x98, 0x9a, 0x96, 0x95, 0x92, 0x90, 0x92, 0x91, 0x8f, 0x9a, 0x9b, 0x95, 0x94, 0x93, @@ -341,7 +324,7 @@ const uint8_t g_person_data[g_person_data_size] = { 0x97, 0x98, 0x99, 0x9c, 0x9f, 0xa3, 0xa5, 0xad, 0xb7, 0xe0, 0x32, 0x4c, 0x5c, 0x62, 0x69, 0x6a, 0x5f, 0x6c, 0x7c, 0x73, 0x75, 0x7f, 0x7f, 0x7f, 0x7f, 0x77, 0x7f, 0x73, 0x7f, 0x68, 0x70, 0x5b, 0x6f, 0x75, 0x69, 0x5c, - 0xd4, 0xd8, 0xdc, 0xe3, 0xe5, 0xea, 0xf2, 0xfa, 0xf6, 0x0f, 0x1b, 0x28, + 0xd4, 0xd8, 0xdc, 0xe3, 0xe5, 0xea, 0xf2, 0xfa, 0xf6, 0xf, 0x1b, 0x28, 0x37, 0x48, 0x57, 0x6a, 0x63, 0x74, 0x6e, 0x7b, 0x53, 0xcc, 0xb4, 0xa3, 0xc0, 0xc3, 0xa2, 0x9d, 0x99, 0x98, 0x96, 0x95, 0x97, 0x96, 0x96, 0x95, 0x93, 0x95, 0x92, 0x8e, 0x90, 0x91, 0x8f, 0x96, 0x9a, 0x97, 0x96, 0x97, @@ -349,7 +332,7 @@ const uint8_t g_person_data[g_person_data_size] = { 0x97, 0x97, 0x99, 0x9a, 0x9c, 0x9d, 0x9c, 0xa2, 0xa7, 0xb4, 0xf5, 0x59, 0x61, 0x68, 0x69, 0x73, 0x7d, 0x7f, 0x7f, 0x75, 0x7e, 0x7f, 0x70, 0x7e, 0x7e, 0x7f, 0x78, 0x7f, 0x7f, 0x6c, 0x6d, 0x69, 0x65, 0x62, 0x53, 0x43, - 0xd2, 0xd2, 0xdb, 0xdd, 0xe7, 0xe7, 0xee, 0xf6, 0xfa, 0x02, 0x1a, 0x2c, + 0xd2, 0xd2, 0xdb, 0xdd, 0xe7, 0xe7, 0xee, 0xf6, 0xfa, 0x2, 0x1a, 0x2c, 0x3c, 0x4e, 0x5f, 0x5d, 0x5d, 0x7b, 0x7f, 0x7b, 0x6b, 0xd3, 0xa6, 0xa2, 0xa8, 0xaa, 0x9d, 0x99, 0x96, 0x96, 0x94, 0x95, 0x95, 0x94, 0x94, 0x94, 0x94, 0x91, 0x91, 0x90, 0x91, 0x91, 0x8e, 0x92, 0x9a, 0x99, 0x96, 0x97, @@ -365,8 +348,8 @@ const uint8_t g_person_data[g_person_data_size] = { 0x96, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x9b, 0x9e, 0xa4, 0xb0, 0x1d, 0x71, 0x76, 0x7d, 0x79, 0x76, 0x7f, 0x7f, 0x6f, 0x7c, 0x60, 0x7f, 0x7f, 0x7f, 0x7f, 0x72, 0x63, 0x71, 0x6a, 0x74, 0x59, 0x1c, 0xd0, 0xb7, 0xb9, - 0xc8, 0xca, 0xcc, 0xd1, 0xd5, 0xda, 0xe1, 0xeb, 0xf0, 0xf7, 0x08, 0x19, - 0x2b, 0x3c, 0x4d, 0x60, 0x69, 0x71, 0x77, 0x7f, 0x7f, 0x0a, 0xb8, 0xa1, + 0xc8, 0xca, 0xcc, 0xd1, 0xd5, 0xda, 0xe1, 0xeb, 0xf0, 0xf7, 0x8, 0x19, + 0x2b, 0x3c, 0x4d, 0x60, 0x69, 0x71, 0x77, 0x7f, 0x7f, 0xa, 0xb8, 0xa1, 0xa7, 0x9d, 0x96, 0x95, 0x94, 0x95, 0x93, 0x93, 0x92, 0x91, 0x92, 0x92, 0x91, 0x90, 0x90, 0x90, 0x90, 0x8e, 0x8c, 0x8e, 0x93, 0x94, 0x93, 0x92, 0x90, 0x91, 0x94, 0x96, 0x95, 0x98, 0x90, 0x8d, 0x92, 0x91, 0x90, 0x94, @@ -381,7 +364,7 @@ const uint8_t g_person_data[g_person_data_size] = { 0x92, 0x94, 0x94, 0x95, 0x95, 0x95, 0x97, 0x98, 0x98, 0x9d, 0xa5, 0xc2, 0x75, 0x7c, 0x67, 0x7f, 0x7f, 0x7f, 0x7f, 0x75, 0x66, 0x6c, 0x5f, 0x65, 0x7f, 0x7a, 0x5f, 0x62, 0x51, 0xf6, 0xc0, 0xc1, 0xb9, 0xb7, 0xcf, 0xf4, - 0xc2, 0xc3, 0xc9, 0xcb, 0xcf, 0xd5, 0xd9, 0xe2, 0xe4, 0xec, 0xed, 0x06, + 0xc2, 0xc3, 0xc9, 0xcb, 0xcf, 0xd5, 0xd9, 0xe2, 0xe4, 0xec, 0xed, 0x6, 0x14, 0x27, 0x33, 0x56, 0x67, 0x74, 0x7b, 0x7f, 0x7f, 0x50, 0xbe, 0xa3, 0x9f, 0x99, 0x96, 0x95, 0x95, 0x93, 0x93, 0x91, 0x92, 0x93, 0x91, 0x90, 0x90, 0x8f, 0x91, 0x90, 0x90, 0x8f, 0x8e, 0x8f, 0x8f, 0x90, 0x91, 0x90, @@ -390,7 +373,7 @@ const uint8_t g_person_data[g_person_data_size] = { 0x7f, 0x7f, 0x74, 0x7f, 0x78, 0x76, 0x7f, 0x78, 0x78, 0x6b, 0x65, 0x7f, 0x6c, 0x67, 0x66, 0x12, 0xcd, 0xc0, 0xbb, 0xb5, 0xcb, 0xed, 0xf6, 0xf5, 0xbf, 0xbe, 0xbe, 0xc1, 0xc1, 0xc8, 0xd5, 0xd7, 0xdd, 0xe1, 0xe3, 0xed, - 0x04, 0x07, 0xf3, 0x3a, 0x6c, 0x6e, 0x77, 0x7f, 0x6c, 0x65, 0xc5, 0xa5, + 0x4, 0x7, 0xf3, 0x3a, 0x6c, 0x6e, 0x77, 0x7f, 0x6c, 0x65, 0xc5, 0xa5, 0x9c, 0x95, 0x95, 0x93, 0x94, 0x91, 0x92, 0x92, 0x91, 0x91, 0x90, 0x90, 0x90, 0x91, 0x8e, 0x90, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x90, 0x90, 0x8f, 0x8f, 0x91, 0x91, 0x90, 0x8f, 0x8e, 0x8d, 0x91, 0x8f, 0x8f, 0x8f, @@ -398,7 +381,7 @@ const uint8_t g_person_data[g_person_data_size] = { 0x61, 0x7f, 0x71, 0x7f, 0x71, 0x6f, 0x6f, 0x6b, 0x6e, 0x72, 0x66, 0x7f, 0x6b, 0x37, 0xdb, 0xc2, 0xbf, 0xb3, 0xc3, 0xe7, 0xfd, 0xfe, 0xef, 0xee, 0xbe, 0xbc, 0xb8, 0xbc, 0xb5, 0xb8, 0xbd, 0xd2, 0xcf, 0xd2, 0xd8, 0xd6, - 0xe8, 0xf5, 0x02, 0x27, 0x48, 0x47, 0x6c, 0x7a, 0x6c, 0x58, 0xc1, 0xa4, + 0xe8, 0xf5, 0x2, 0x27, 0x48, 0x47, 0x6c, 0x7a, 0x6c, 0x58, 0xc1, 0xa4, 0x99, 0x95, 0x94, 0x94, 0x91, 0x92, 0x90, 0x90, 0x91, 0x90, 0x91, 0x90, 0x91, 0x90, 0x8f, 0x8f, 0x8f, 0x91, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x8f, 0x8d, 0x8f, 0x8f, 0x91, 0x90, 0x8f, 0x90, @@ -406,15 +389,15 @@ const uint8_t g_person_data[g_person_data_size] = { 0x3a, 0x7f, 0x74, 0x7c, 0x7f, 0x68, 0x61, 0x7f, 0x6c, 0x79, 0x6d, 0x54, 0xea, 0xbe, 0xc0, 0xb5, 0xc0, 0xe7, 0xf7, 0xf5, 0xf6, 0xf4, 0xeb, 0xec, 0xba, 0xb9, 0xb7, 0xb5, 0xb9, 0xb9, 0xbc, 0xc3, 0xc4, 0xcb, 0xcf, 0xd0, - 0xdc, 0xe5, 0xee, 0xf9, 0x07, 0x1a, 0x31, 0x46, 0x53, 0x41, 0xba, 0xa5, + 0xdc, 0xe5, 0xee, 0xf9, 0x7, 0x1a, 0x31, 0x46, 0x53, 0x41, 0xba, 0xa5, 0x96, 0x95, 0x93, 0x91, 0x90, 0x8f, 0x90, 0x8f, 0x8f, 0x8e, 0x8e, 0x8f, 0x90, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8f, 0x8f, 0x91, 0x8f, 0x91, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x90, 0x8f, 0x8f, 0x8e, 0x8f, 0x8f, 0x91, 0x90, 0x90, 0x92, 0x93, 0x94, 0x96, 0x96, 0x99, 0xa6, - 0x1b, 0x7f, 0x7f, 0x77, 0x70, 0x6a, 0x5c, 0x7f, 0x76, 0x5d, 0x08, 0xc3, + 0x1b, 0x7f, 0x7f, 0x77, 0x70, 0x6a, 0x5c, 0x7f, 0x76, 0x5d, 0x8, 0xc3, 0xaf, 0xb1, 0xb3, 0xd9, 0xf1, 0xfa, 0xf3, 0xef, 0xf0, 0xec, 0xe9, 0xe9, 0xb3, 0xb5, 0xb2, 0xb1, 0xb4, 0xb5, 0xb8, 0xba, 0xbb, 0xc2, 0xc3, 0xc7, - 0xc2, 0xd1, 0xd7, 0xdc, 0xe5, 0xee, 0x01, 0x0d, 0x16, 0x0b, 0xb7, 0xa6, + 0xc2, 0xd1, 0xd7, 0xdc, 0xe5, 0xee, 0x1, 0xd, 0x16, 0xb, 0xb7, 0xa6, 0x94, 0x95, 0x91, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x90, 0x90, 0x91, 0x92, 0x91, 0x90, 0x8f, 0x8e, 0x8f, 0x8e, 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, @@ -428,54 +411,54 @@ const uint8_t g_person_data[g_person_data_size] = { 0x90, 0x90, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x8e, 0x8f, 0x90, 0x8f, 0x8f, 0x92, 0x91, 0x92, 0x93, 0x94, 0x98, 0xa1, 0xd5, 0x7f, 0x73, 0x66, 0x5a, 0x63, 0x4e, 0xe2, 0xbe, 0xb6, 0xb6, 0xce, - 0xf8, 0x00, 0xfd, 0xf8, 0xf2, 0xef, 0xef, 0xe8, 0xec, 0xeb, 0xeb, 0xef, + 0xf8, 0x0, 0xfd, 0xf8, 0xf2, 0xef, 0xef, 0xe8, 0xec, 0xeb, 0xeb, 0xef, 0xaf, 0xb1, 0xad, 0xae, 0xaf, 0xac, 0xa8, 0xa8, 0xa8, 0xa7, 0xa9, 0xa9, 0xab, 0xb2, 0xb6, 0xbb, 0xbc, 0xbe, 0xbe, 0xc1, 0xc3, 0xbe, 0xae, 0x9e, 0x95, 0x93, 0x90, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, 0x90, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8f, 0x8e, 0x8e, 0x8d, 0x8f, 0x8e, 0x8d, 0x8e, 0x8f, 0x8d, 0x8f, 0x8e, 0x8e, 0x8f, 0x90, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x92, 0x92, 0x97, 0x9d, - 0xbd, 0x65, 0x6f, 0x6b, 0x4e, 0xfb, 0xc6, 0xbc, 0xb5, 0xce, 0xf6, 0x00, - 0xf8, 0xf8, 0xf5, 0xf1, 0xf0, 0xef, 0xee, 0xed, 0xf5, 0xf8, 0xfc, 0x06, + 0xbd, 0x65, 0x6f, 0x6b, 0x4e, 0xfb, 0xc6, 0xbc, 0xb5, 0xce, 0xf6, 0x0, + 0xf8, 0xf8, 0xf5, 0xf1, 0xf0, 0xef, 0xee, 0xed, 0xf5, 0xf8, 0xfc, 0x6, 0xae, 0xaf, 0xae, 0xab, 0xad, 0xab, 0xac, 0xac, 0xad, 0xad, 0xaa, 0xac, 0xad, 0xac, 0xb3, 0xb1, 0xb2, 0xb4, 0xb2, 0xb1, 0xb2, 0xaf, 0xaa, 0x99, 0x97, 0x92, 0x91, 0x8e, 0x90, 0x8e, 0x8e, 0x8e, 0x8d, 0x8b, 0x8c, 0x8e, 0x8e, 0x90, 0x8e, 0x8e, 0x8f, 0x8d, 0x90, 0x8e, 0x8e, 0x8f, 0x8e, 0x8f, 0x8d, 0x8e, 0x8f, 0x8d, 0x8f, 0x8d, 0x8e, 0x8f, 0x90, 0x8f, 0x8d, 0x8e, 0x8e, 0x8c, 0x8e, 0x8e, 0x8c, 0x8f, 0x8f, 0x90, 0x91, 0x91, 0x95, 0x9c, - 0xae, 0x6a, 0x5b, 0x1b, 0xca, 0xc1, 0xb7, 0xc8, 0xf8, 0x05, 0xfe, 0xfc, - 0xf7, 0xf2, 0xf6, 0xf0, 0xf3, 0xf5, 0xf8, 0x05, 0x0f, 0x1f, 0x2f, 0x31, + 0xae, 0x6a, 0x5b, 0x1b, 0xca, 0xc1, 0xb7, 0xc8, 0xf8, 0x5, 0xfe, 0xfc, + 0xf7, 0xf2, 0xf6, 0xf0, 0xf3, 0xf5, 0xf8, 0x5, 0xf, 0x1f, 0x2f, 0x31, 0xb5, 0xb3, 0xb2, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xae, 0xaf, 0xb1, 0xb1, 0xb0, 0xaf, 0xad, 0xb2, 0xb3, 0xb2, 0xb1, 0xaf, 0xac, 0xa9, 0xa7, 0x98, 0x95, 0x91, 0x90, 0x90, 0x8f, 0x8e, 0x8f, 0x8d, 0x8c, 0x8c, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x8c, 0x8e, 0x8e, 0x8d, 0x8d, 0x8f, 0x8f, 0x8c, 0x8f, 0x8d, 0x8e, 0x90, 0x8f, 0x8e, 0x8f, 0x8e, 0x8d, 0x8e, 0x8c, 0x8c, 0x8b, 0x8e, 0x90, 0x91, 0x90, 0x92, 0x96, 0x99, - 0xa3, 0x11, 0xd5, 0xc2, 0xb4, 0xc3, 0xeb, 0x01, 0xfe, 0xf9, 0xf6, 0xf7, - 0xf6, 0xf5, 0xf4, 0xf5, 0xfc, 0xff, 0x09, 0x1a, 0x21, 0x27, 0x35, 0x49, + 0xa3, 0x11, 0xd5, 0xc2, 0xb4, 0xc3, 0xeb, 0x1, 0xfe, 0xf9, 0xf6, 0xf7, + 0xf6, 0xf5, 0xf4, 0xf5, 0xfc, 0xff, 0x9, 0x1a, 0x21, 0x27, 0x35, 0x49, 0xc7, 0xb8, 0xb7, 0xb5, 0xb4, 0xb5, 0xb1, 0xb0, 0xb1, 0xb1, 0xb0, 0xb0, 0xb0, 0xb0, 0xaf, 0xa7, 0xab, 0xaa, 0xad, 0xad, 0xac, 0xa5, 0xa3, 0x97, 0x94, 0x91, 0x91, 0x90, 0x90, 0x90, 0x8f, 0x8e, 0x8f, 0x8b, 0x8c, 0x8d, 0x8d, 0x8e, 0x8f, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8c, 0x8e, 0x8d, 0x8c, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8d, 0x8d, 0x8c, 0x8b, 0x8d, 0x8c, 0x8e, 0x90, 0x91, 0x92, 0x95, 0x97, - 0xa1, 0xb2, 0xb6, 0xba, 0xe9, 0x03, 0x03, 0xff, 0xf8, 0xf3, 0xf5, 0xef, - 0xf2, 0xf7, 0x01, 0x09, 0x1a, 0x2b, 0x30, 0x34, 0x48, 0x56, 0x62, 0x52, + 0xa1, 0xb2, 0xb6, 0xba, 0xe9, 0x3, 0x3, 0xff, 0xf8, 0xf3, 0xf5, 0xef, + 0xf2, 0xf7, 0x1, 0x9, 0x1a, 0x2b, 0x30, 0x34, 0x48, 0x56, 0x62, 0x52, 0xda, 0xd4, 0xc8, 0xc0, 0xbc, 0xbd, 0xba, 0xb7, 0xb3, 0xb6, 0xb8, 0xb8, 0xb8, 0xb8, 0xb4, 0xbe, 0xc1, 0xc3, 0xc0, 0xb5, 0xa4, 0xab, 0xa8, 0x9c, 0x95, 0x92, 0x92, 0x91, 0x8f, 0x8f, 0x90, 0x90, 0x8e, 0x8d, 0x8d, 0x8c, 0x8c, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8d, 0x8e, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8e, 0x8f, 0x8d, 0x8f, 0x90, 0x8e, 0x8f, 0x8e, 0x8e, 0x8f, 0x93, 0x9f, 0x8e, 0x8d, 0x8e, 0x90, 0x90, 0x91, 0x92, 0x96, 0x97, - 0x9f, 0xb2, 0xdb, 0x00, 0xfb, 0xfc, 0xfa, 0xf4, 0xf3, 0xf5, 0xf9, 0xf8, - 0x01, 0x09, 0x19, 0x2d, 0x31, 0x45, 0x4f, 0x5f, 0x6d, 0x51, 0x70, 0xc7, + 0x9f, 0xb2, 0xdb, 0x0, 0xfb, 0xfc, 0xfa, 0xf4, 0xf3, 0xf5, 0xf9, 0xf8, + 0x1, 0x9, 0x19, 0x2d, 0x31, 0x45, 0x4f, 0x5f, 0x6d, 0x51, 0x70, 0xc7, 0xff, 0xee, 0xe2, 0xd5, 0xcf, 0xc6, 0xc1, 0xc0, 0xbe, 0xbe, 0xc1, 0xbe, 0xc1, 0xc1, 0xc1, 0xc6, 0xcf, 0xd0, 0xb6, 0xa9, 0xac, 0xaf, 0xaa, 0xb4, 0x9b, 0x94, 0x91, 0x92, 0x92, 0x90, 0x91, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8d, 0x8d, 0x8f, 0x8e, 0x8e, 0x8d, 0x8f, 0x8e, 0x8d, 0x8c, 0x8e, 0x8c, 0x8e, 0x8d, 0x8c, 0x8c, 0x8d, 0x8f, 0x8e, 0x90, 0x8e, 0x8f, 0x8d, 0x8c, 0x8f, 0xb7, 0x42, 0x92, 0x8d, 0x8f, 0x90, 0x8f, 0x90, 0x93, 0x95, 0x96, - 0xa0, 0xdb, 0xff, 0xfc, 0xf6, 0xf0, 0xf0, 0xf3, 0xf7, 0xfc, 0x07, 0x11, + 0xa0, 0xdb, 0xff, 0xfc, 0xf6, 0xf0, 0xf0, 0xf3, 0xf7, 0xfc, 0x7, 0x11, 0x2a, 0x32, 0x49, 0x54, 0x71, 0x65, 0x7f, 0x7d, 0x71, 0x7f, 0x62, 0xa0, 0x34, 0x12, 0xff, 0xeb, 0xe1, 0xd5, 0xcd, 0xca, 0xc7, 0xc3, 0xc5, 0xc8, 0xc8, 0xc7, 0xd0, 0xd8, 0xdd, 0xb0, 0xaa, 0xae, 0xac, 0xb5, 0x29, 0x62, @@ -483,31 +466,31 @@ const uint8_t g_person_data[g_person_data_size] = { 0x8d, 0x8e, 0x8d, 0x8c, 0x8c, 0x8e, 0x8d, 0x8f, 0x8d, 0x8c, 0x8e, 0x8d, 0x8e, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x8f, 0x8e, 0x8e, 0x8c, 0x8f, 0x94, 0x34, 0x6d, 0x99, 0x8f, 0x90, 0x90, 0x92, 0x91, 0x93, 0x95, 0x9a, - 0xd2, 0xf5, 0xf6, 0xf4, 0xf6, 0xf2, 0xf6, 0xff, 0x07, 0x11, 0x27, 0x36, + 0xd2, 0xf5, 0xf6, 0xf4, 0xf6, 0xf2, 0xf6, 0xff, 0x7, 0x11, 0x27, 0x36, 0x48, 0x5b, 0x66, 0x7f, 0x7f, 0x7e, 0x7f, 0x77, 0x7f, 0x6a, 0xf4, 0x9c, - 0x69, 0x41, 0x25, 0x0b, 0xf5, 0xe9, 0xdd, 0xd7, 0xd2, 0xcd, 0xc9, 0xce, + 0x69, 0x41, 0x25, 0xb, 0xf5, 0xe9, 0xdd, 0xd7, 0xd2, 0xcd, 0xc9, 0xce, 0xd0, 0xc8, 0xd4, 0xcd, 0xa9, 0xaa, 0xaa, 0xaa, 0xbd, 0x5e, 0x7f, 0x7c, 0xc9, 0x9c, 0x9a, 0x96, 0x93, 0x93, 0x92, 0x90, 0x93, 0xaf, 0x95, 0x8d, 0x8c, 0x8d, 0x8e, 0x8e, 0x8e, 0x8c, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8b, 0x8c, 0x8d, 0x8e, 0x8e, 0x8d, 0x8e, 0x8c, 0x8d, 0x91, 0xa7, 0x72, 0x6c, 0x9d, 0x90, 0x8f, 0x8f, 0x91, 0x93, 0x94, 0x95, 0xab, - 0xf1, 0xf5, 0xf5, 0xf8, 0xf9, 0x04, 0x09, 0x1a, 0x27, 0x3c, 0x52, 0x61, + 0xf1, 0xf5, 0xf5, 0xf8, 0xf9, 0x4, 0x9, 0x1a, 0x27, 0x3c, 0x52, 0x61, 0x7f, 0x7a, 0x7a, 0x70, 0x7f, 0x7f, 0x7c, 0x79, 0x7f, 0x71, 0xbe, 0x98, - 0x68, 0x56, 0x45, 0x1f, 0x0d, 0xf7, 0xea, 0xe0, 0xdb, 0xd8, 0xd0, 0xd2, + 0x68, 0x56, 0x45, 0x1f, 0xd, 0xf7, 0xea, 0xe0, 0xdb, 0xd8, 0xd0, 0xd2, 0xd4, 0xc2, 0xb7, 0xa8, 0xa9, 0xaa, 0xa7, 0xc6, 0x55, 0x6a, 0x79, 0x63, 0xc3, 0x9b, 0x9a, 0x9a, 0x99, 0x98, 0x97, 0x97, 0x98, 0xb7, 0x99, 0x91, 0x8c, 0x8d, 0x8d, 0x8e, 0x8d, 0x8c, 0x8d, 0x8e, 0x90, 0x8c, 0x8b, 0x8c, 0x8e, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, 0x8e, 0x8d, 0x8c, 0x8d, 0x8d, 0x93, - 0x0b, 0x6b, 0x5f, 0xa3, 0x92, 0x90, 0x93, 0x94, 0x94, 0x98, 0x9c, 0xbb, - 0xf0, 0xf8, 0xfe, 0x06, 0x10, 0x27, 0x36, 0x45, 0x5e, 0x65, 0x7b, 0x7f, + 0xb, 0x6b, 0x5f, 0xa3, 0x92, 0x90, 0x93, 0x94, 0x94, 0x98, 0x9c, 0xbb, + 0xf0, 0xf8, 0xfe, 0x6, 0x10, 0x27, 0x36, 0x45, 0x5e, 0x65, 0x7b, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x73, 0x6d, 0x6b, 0x4b, 0xab, 0x95, - 0x7f, 0x6b, 0x58, 0x43, 0x25, 0x08, 0xf5, 0xeb, 0xe3, 0xdd, 0xd6, 0xd1, + 0x7f, 0x6b, 0x58, 0x43, 0x25, 0x8, 0xf5, 0xeb, 0xe3, 0xdd, 0xd6, 0xd1, 0xd1, 0xac, 0xa8, 0xab, 0xa8, 0xaa, 0xd9, 0x3f, 0x60, 0x66, 0x7d, 0x71, 0xc0, 0x9c, 0x9a, 0x99, 0x99, 0x99, 0x9a, 0x97, 0x9a, 0xc8, 0xba, 0x95, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8f, 0x8d, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8e, 0x9e, 0x57, 0x38, 0xcb, 0x98, 0x94, 0x96, 0x96, 0x98, 0x97, 0x99, 0x9d, 0xc7, - 0xff, 0x0a, 0x19, 0x2b, 0x36, 0x4d, 0x65, 0x6b, 0x7f, 0x6d, 0x7f, 0x7f, + 0xff, 0xa, 0x19, 0x2b, 0x36, 0x4d, 0x65, 0x6b, 0x7f, 0x6d, 0x7f, 0x7f, 0x7f, 0x7f, 0x75, 0x7f, 0x7d, 0x6e, 0x55, 0x46, 0x2c, 0x12, 0xa3, 0x95, 0xdf, 0xe0, 0xdc, 0xd8, 0xd4, 0xd1, 0xce, 0xca, 0xcc, 0xcc, 0xcc, 0xc9, 0xaf, 0xb1, 0xb1, 0xa9, 0xab, 0xdc, 0x19, 0x3b, 0x60, 0x7f, 0x76, 0x7f, @@ -516,15 +499,15 @@ const uint8_t g_person_data[g_person_data_size] = { 0x8d, 0x8c, 0x8c, 0x8e, 0x8d, 0x8e, 0x8c, 0x8c, 0x8d, 0x8f, 0x90, 0xb7, 0xd8, 0xbb, 0xb7, 0x98, 0x95, 0x96, 0x98, 0x9a, 0x99, 0x99, 0xa1, 0xd0, 0x1d, 0x2a, 0x3e, 0x56, 0x5e, 0x70, 0x7f, 0x7f, 0x7f, 0x61, 0x7f, 0x7f, - 0x6e, 0x7f, 0x7f, 0x61, 0x4d, 0x37, 0x27, 0x19, 0x0f, 0x05, 0xad, 0x95, - 0x5f, 0x48, 0x28, 0x04, 0xe1, 0xd0, 0xc8, 0xc3, 0xc2, 0xc6, 0xc4, 0xcd, - 0xcc, 0xcc, 0xcb, 0xcc, 0xd6, 0xdb, 0xde, 0xdf, 0xeb, 0xf2, 0x06, 0x06, + 0x6e, 0x7f, 0x7f, 0x61, 0x4d, 0x37, 0x27, 0x19, 0xf, 0x5, 0xad, 0x95, + 0x5f, 0x48, 0x28, 0x4, 0xe1, 0xd0, 0xc8, 0xc3, 0xc2, 0xc6, 0xc4, 0xcd, + 0xcc, 0xcc, 0xcb, 0xcc, 0xd6, 0xdb, 0xde, 0xdf, 0xeb, 0xf2, 0x6, 0x6, 0xaa, 0x9b, 0x9b, 0x9a, 0x98, 0x9a, 0x98, 0x96, 0x9c, 0xb9, 0xa6, 0x97, 0x8f, 0x8c, 0x8e, 0x8d, 0x8d, 0x8d, 0x8b, 0x8c, 0x8c, 0x8d, 0x8c, 0x8d, 0x8e, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, 0x8c, 0x8b, 0x8d, 0x8d, 0x90, 0xaf, 0xb9, 0xdf, 0xea, 0x98, 0x94, 0x96, 0x97, 0x98, 0x98, 0x9b, 0x9f, 0xe7, 0x42, 0x51, 0x6e, 0x77, 0x7f, 0x7f, 0x72, 0x7f, 0x69, 0x7f, 0x6b, 0x7a, - 0x65, 0x49, 0x3b, 0x24, 0x1a, 0x15, 0x11, 0x1e, 0x00, 0xd4, 0xa5, 0x95, + 0x65, 0x49, 0x3b, 0x24, 0x1a, 0x15, 0x11, 0x1e, 0x0, 0xd4, 0xa5, 0x95, 0x6c, 0x6f, 0x60, 0x77, 0x79, 0x5e, 0x4a, 0x29, 0xd8, 0xbe, 0xc2, 0xcc, 0xcb, 0xcf, 0xcc, 0xd5, 0xd1, 0xe0, 0xe7, 0xed, 0xee, 0xe7, 0xec, 0xae, 0xa2, 0x9c, 0x9b, 0x9a, 0x98, 0x98, 0x98, 0x97, 0x9b, 0xb8, 0xa5, 0x96, @@ -534,21 +517,21 @@ const uint8_t g_person_data[g_person_data_size] = { 0x7e, 0x6e, 0x7f, 0x7a, 0x74, 0x7f, 0x7f, 0x6a, 0x51, 0x6d, 0x48, 0x35, 0x27, 0x1a, 0x10, 0x18, 0x1a, 0xf0, 0xd3, 0xd0, 0xef, 0x14, 0xdc, 0x97, 0x5f, 0x77, 0x7f, 0x79, 0x7f, 0x6d, 0x5f, 0xe0, 0xcb, 0xcc, 0xca, 0xcc, - 0xcb, 0xce, 0xc9, 0xd7, 0xeb, 0x0e, 0x26, 0x3e, 0x5c, 0x6e, 0x78, 0xb6, + 0xcb, 0xce, 0xc9, 0xd7, 0xeb, 0xe, 0x26, 0x3e, 0x5c, 0x6e, 0x78, 0xb6, 0xa1, 0x9c, 0x9a, 0x98, 0x98, 0x96, 0x95, 0x97, 0x96, 0x9f, 0x9d, 0x95, 0x90, 0x8e, 0x8d, 0x8b, 0x8d, 0x8c, 0x8b, 0x8b, 0x8c, 0x8c, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8c, 0x8b, 0x8b, 0x8c, 0x8d, 0x8c, 0x8e, 0xa7, 0xec, 0xf4, 0xef, 0xe9, 0x9c, 0x95, 0x95, 0x95, 0x98, 0x97, 0x9b, 0xa4, 0x63, 0x7f, 0x78, 0x7a, 0x62, 0x7f, 0x71, 0x59, 0x49, 0x2f, 0x1e, 0x14, 0x13, - 0x1a, 0x0d, 0xe3, 0xce, 0xde, 0x0a, 0x26, 0x36, 0x53, 0x72, 0x6a, 0x9d, - 0x0d, 0x29, 0x4a, 0x6d, 0x78, 0x4c, 0xe3, 0xd6, 0xd8, 0xd3, 0xcd, 0xd0, + 0x1a, 0xd, 0xe3, 0xce, 0xde, 0xa, 0x26, 0x36, 0x53, 0x72, 0x6a, 0x9d, + 0xd, 0x29, 0x4a, 0x6d, 0x78, 0x4c, 0xe3, 0xd6, 0xd8, 0xd3, 0xcd, 0xd0, 0xcd, 0xd0, 0xc4, 0xd5, 0xf6, 0x1b, 0x32, 0x41, 0x5c, 0x72, 0x6a, 0xb1, 0x9f, 0xa0, 0x9a, 0x9a, 0x97, 0x96, 0x95, 0x94, 0x95, 0x9d, 0x9a, 0x94, 0x90, 0x8e, 0x8d, 0x8c, 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, 0x8d, 0x8c, 0x8c, 0x8d, 0x8c, 0x8c, 0x8c, 0x8c, 0x8a, 0x8c, 0x8d, 0x8c, 0x91, 0xa6, 0xbf, 0xdf, 0xeb, 0xea, 0xa3, 0x95, 0x94, 0x96, 0x95, 0x97, 0x99, 0xa7, 0xea, - 0x1b, 0x1d, 0x18, 0x0e, 0x07, 0xf3, 0xe9, 0xe6, 0xe5, 0xe9, 0xf1, 0xd8, - 0xc7, 0xe1, 0x07, 0x28, 0x40, 0x6d, 0x71, 0x7f, 0x5b, 0x7f, 0x3b, 0x9e, + 0x1b, 0x1d, 0x18, 0xe, 0x7, 0xf3, 0xe9, 0xe6, 0xe5, 0xe9, 0xf1, 0xd8, + 0xc7, 0xe1, 0x7, 0x28, 0x40, 0x6d, 0x71, 0x7f, 0x5b, 0x7f, 0x3b, 0x9e, 0xce, 0xdc, 0xd6, 0xec, 0xf2, 0xda, 0xd8, 0xd4, 0xd0, 0xce, 0xce, 0xcd, 0xcc, 0xce, 0xc0, 0xd7, 0xf0, 0x17, 0x2b, 0x3a, 0x53, 0x68, 0x60, 0xad, 0xa4, 0xa0, 0x9c, 0x9c, 0x99, 0x95, 0x94, 0x92, 0x96, 0x99, 0x98, 0x92, @@ -562,11 +545,11 @@ const uint8_t g_person_data[g_person_data_size] = { 0xa7, 0xa1, 0x9d, 0x9c, 0x9a, 0x95, 0x93, 0x96, 0x96, 0x94, 0x94, 0x91, 0x8e, 0x8d, 0x8d, 0x8c, 0x8c, 0x8d, 0x8b, 0x8c, 0x8b, 0x8d, 0x8c, 0x8b, 0x8b, 0x8c, 0x8c, 0x8a, 0x8c, 0x8d, 0x8c, 0x8d, 0x8d, 0x95, 0xa7, 0xb4, - 0x1e, 0x73, 0x7d, 0x62, 0x9c, 0x93, 0x95, 0x96, 0x98, 0x9b, 0xad, 0x0d, - 0x17, 0x1d, 0x15, 0x06, 0xe5, 0xc4, 0xa4, 0xa0, 0xa0, 0xa2, 0xa4, 0xa4, + 0x1e, 0x73, 0x7d, 0x62, 0x9c, 0x93, 0x95, 0x96, 0x98, 0x9b, 0xad, 0xd, + 0x17, 0x1d, 0x15, 0x6, 0xe5, 0xc4, 0xa4, 0xa0, 0xa0, 0xa2, 0xa4, 0xa4, 0xa6, 0xa7, 0xa7, 0xa9, 0xaa, 0xaa, 0xaa, 0xac, 0xb1, 0xb0, 0xb0, 0xc3, 0xc1, 0xbf, 0xb8, 0xaf, 0xac, 0xa5, 0xa2, 0xa0, 0xa9, 0xcc, 0xd0, 0xc9, - 0xc9, 0xc9, 0xbd, 0xcd, 0xcf, 0xd2, 0xdc, 0xe5, 0xf1, 0x0d, 0xfc, 0xa1, + 0xc9, 0xc9, 0xbd, 0xcd, 0xcf, 0xd2, 0xdc, 0xe5, 0xf1, 0xd, 0xfc, 0xa1, 0xa5, 0xa0, 0x9e, 0x9a, 0x9a, 0x96, 0x94, 0x97, 0x97, 0x95, 0x93, 0x90, 0x8d, 0x8d, 0x8c, 0x8b, 0x8b, 0x8c, 0x8c, 0x8b, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8b, 0x8c, 0x8c, 0x8d, 0x8e, 0x96, 0xa9, 0x1a, @@ -578,7 +561,7 @@ const uint8_t g_person_data[g_person_data_size] = { 0xa1, 0x9e, 0x9d, 0x9a, 0x9c, 0x97, 0x94, 0x98, 0x9a, 0x94, 0x90, 0x90, 0x8e, 0x8d, 0x8d, 0x8d, 0x8c, 0x8c, 0x8b, 0x8c, 0x8d, 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8d, 0x8d, 0x91, 0x9a, 0xaa, 0x6a, - 0x79, 0x7f, 0x7e, 0x7a, 0xad, 0x96, 0x95, 0x96, 0x99, 0x9c, 0xa5, 0x01, + 0x79, 0x7f, 0x7e, 0x7a, 0xad, 0x96, 0x95, 0x96, 0x99, 0x9c, 0xa5, 0x1, 0x47, 0x6f, 0x7f, 0x6f, 0x7f, 0x4a, 0xac, 0xa1, 0xa1, 0xa4, 0xa6, 0xa7, 0xa5, 0xa8, 0xa9, 0xaa, 0xa9, 0xa8, 0xa8, 0xa7, 0xac, 0xaf, 0xaf, 0xb7, 0xbe, 0xc8, 0xcf, 0xc8, 0xc3, 0xb8, 0xbe, 0xc9, 0xc9, 0xca, 0xc5, 0xc1, @@ -587,7 +570,7 @@ const uint8_t g_person_data[g_person_data_size] = { 0x8e, 0x8c, 0x8c, 0x8c, 0x8b, 0x8b, 0x8c, 0x8c, 0x8b, 0x8c, 0x8b, 0x8b, 0x8c, 0x8c, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8d, 0x91, 0x9a, 0xa8, 0x27, 0x7f, 0x69, 0x71, 0x7b, 0xb2, 0x96, 0x98, 0x96, 0x98, 0x9a, 0xa5, 0x62, - 0x71, 0x7f, 0x7f, 0x7f, 0x08, 0xdd, 0xa8, 0xa2, 0xa3, 0xa4, 0xa7, 0xa7, + 0x71, 0x7f, 0x7f, 0x7f, 0x8, 0xdd, 0xa8, 0xa2, 0xa3, 0xa4, 0xa7, 0xa7, 0xa6, 0xa8, 0xa9, 0xa8, 0xa9, 0xa8, 0xa5, 0xa8, 0xa6, 0xab, 0xac, 0xaf, 0xd0, 0xda, 0xde, 0xe5, 0xe1, 0xc5, 0xc1, 0xc7, 0xc8, 0xc7, 0xc4, 0xbe, 0xc3, 0xbc, 0xc0, 0xcd, 0xcd, 0xcd, 0xcb, 0xca, 0xc8, 0xc4, 0xae, 0x9e, @@ -788,5 +771,5 @@ const uint8_t g_person_data[g_person_data_size] = { 0x8b, 0x8b, 0x8c, 0x8b, 0x8c, 0x8d, 0x8f, 0x90, 0x8e, 0x8f, 0x91, 0x93, 0x99, 0x9c, 0x9b, 0x9c, 0x9f, 0x9d, 0xa1, 0xa5, 0xac, 0x9d, 0x8d, 0x8c, 0x8b, 0x92, 0x9d, 0x9d, 0xa6, 0xa4, 0x9a, 0x98, 0x94, 0x9b, 0x94, 0x94, - 0x95, 0x97, 0x96, 0x94, 0x94, 0x94, 0x9b, 0x95, 0x95, 0x95, 0x93, 0xa5}; -unsigned int _tmp_dump_person_4_bmp_len = 9216; + 0x95, 0x97, 0x96, 0x94, 0x94, 0x94, 0x9b, 0x95, 0x95, 0x95, 0x93, 0xa5, +}; diff --git a/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model_int8/no_person_image_data.cpp b/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model_int8/no_person_image_data.cpp deleted file mode 100644 index 0c87e8f..0000000 --- a/examples/person_detection/tensorflow/lite/micro/tools/make/downloads/person_model_int8/no_person_image_data.cpp +++ /dev/null @@ -1,792 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// See the header for documentation on the meaning of this data. - -#include "no_person_image_data.h" - -#include "model_settings.h" - -const int g_no_person_data_size = kMaxImageSize; -const uint8_t g_no_person_data[g_no_person_data_size] = { - 0xe7, 0xe7, 0xe5, 0xe1, 0xe5, 0xe8, 0xef, 0xf0, 0xf9, 0xfb, 0x02, 0x03, - 0x0c, 0x12, 0x1c, 0x1f, 0x30, 0x41, 0x53, 0x5d, 0x62, 0x71, 0x77, 0x66, - 0x74, 0x74, 0x7f, 0x7f, 0x75, 0x7f, 0x64, 0xee, 0xc8, 0xb4, 0xec, 0xec, - 0x7f, 0x7f, 0x7f, 0x7e, 0x65, 0x79, 0x7a, 0x71, 0x65, 0x79, 0x73, 0x53, - 0x5c, 0x53, 0x48, 0x3d, 0x19, 0x28, 0x1e, 0x19, 0x15, 0x12, 0x08, 0x07, - 0x05, 0x05, 0x02, 0x00, 0xf9, 0xfb, 0x01, 0x03, 0x07, 0x09, 0x09, 0x08, - 0x12, 0x17, 0x20, 0x2d, 0x2f, 0x43, 0x4a, 0x55, 0x5e, 0x60, 0x56, 0x6e, - 0x74, 0x7c, 0x7e, 0x7f, 0x74, 0x7f, 0x72, 0x6f, 0x7f, 0x7a, 0x7f, 0x7f, - 0xe6, 0xe7, 0xe3, 0xe5, 0xdb, 0xe6, 0xed, 0xef, 0xf3, 0xf7, 0xfa, 0x03, - 0x0b, 0x0f, 0x19, 0x26, 0x1b, 0x41, 0x53, 0x61, 0x71, 0x68, 0x7a, 0x76, - 0x7f, 0x7f, 0x69, 0x7c, 0x7f, 0x7f, 0x50, 0xea, 0xc9, 0xb2, 0xef, 0xeb, - 0x7f, 0x7f, 0x7a, 0x78, 0x71, 0x7b, 0x7f, 0x76, 0x6a, 0x76, 0x62, 0x6e, - 0x50, 0x53, 0x41, 0x31, 0x1f, 0x24, 0x19, 0x15, 0x0d, 0x0b, 0x07, 0x07, - 0x06, 0x02, 0x01, 0xfa, 0xf8, 0x03, 0x04, 0x07, 0x0a, 0x0a, 0x0e, 0x13, - 0x17, 0x24, 0x29, 0x35, 0x44, 0x4e, 0x55, 0x69, 0x69, 0x63, 0x5c, 0x7a, - 0x7a, 0x70, 0x6f, 0x72, 0x5c, 0x7c, 0x75, 0x7f, 0x7e, 0x7f, 0x7f, 0x7f, - 0xe6, 0xe3, 0xe5, 0xe5, 0xdf, 0xdc, 0xe4, 0xe8, 0xf0, 0xf1, 0xf3, 0x00, - 0x00, 0x08, 0x0f, 0x18, 0x17, 0x32, 0x46, 0x4f, 0x50, 0x58, 0x67, 0x7c, - 0x6f, 0x7f, 0x7f, 0x7f, 0x7c, 0x6e, 0x73, 0xe1, 0xcc, 0xb0, 0xed, 0xed, - 0x7f, 0x71, 0x7f, 0x77, 0x7c, 0x76, 0x6d, 0x5b, 0x7e, 0x55, 0x69, 0x50, - 0x49, 0x3d, 0x2c, 0x16, 0x15, 0x14, 0x11, 0x0d, 0x09, 0x06, 0x00, 0x05, - 0xfe, 0x00, 0xfc, 0xf2, 0x02, 0x02, 0x05, 0x06, 0x0b, 0x0e, 0x17, 0x1b, - 0x1f, 0x2d, 0x39, 0x3f, 0x4f, 0x5a, 0x66, 0x63, 0x6e, 0x72, 0x7a, 0x7f, - 0x76, 0x6f, 0x7f, 0x7f, 0x7b, 0x7e, 0x7f, 0x7f, 0x66, 0x7f, 0x7f, 0x7b, - 0xe1, 0xde, 0xe2, 0xdd, 0xe2, 0xd8, 0xdb, 0xe1, 0xe8, 0xec, 0xee, 0xf4, - 0xf9, 0x03, 0x06, 0x0c, 0x11, 0x16, 0x35, 0x3e, 0x30, 0x0e, 0x59, 0x7f, - 0x64, 0x73, 0x7f, 0x7c, 0x6e, 0x79, 0x6a, 0xde, 0xce, 0xae, 0xea, 0xec, - 0x7e, 0x6a, 0x75, 0x7f, 0x74, 0x7f, 0x72, 0x78, 0x7f, 0x69, 0x64, 0x50, - 0x48, 0x3a, 0x32, 0x0f, 0x1e, 0x15, 0x11, 0x08, 0x02, 0xfb, 0xf8, 0xf8, - 0xf6, 0xf4, 0xf1, 0xf9, 0xfe, 0x04, 0x03, 0x0b, 0x0e, 0x15, 0x1a, 0x20, - 0x2d, 0x35, 0x43, 0x4c, 0x57, 0x57, 0x5d, 0x76, 0x6f, 0x7c, 0x79, 0x70, - 0x79, 0x7f, 0x7f, 0x7f, 0x6f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x76, - 0xe2, 0xe0, 0xdd, 0xde, 0xdb, 0xdb, 0xd2, 0xdd, 0xdd, 0xde, 0xe2, 0xef, - 0xf8, 0x01, 0x02, 0x07, 0x07, 0x06, 0x27, 0x32, 0x3b, 0x43, 0x5c, 0x6a, - 0x7b, 0x74, 0x77, 0x6a, 0x76, 0x77, 0x7f, 0xdc, 0xcb, 0xae, 0xe7, 0xec, - 0x7a, 0x7b, 0x77, 0x73, 0x60, 0x7f, 0x6e, 0x7f, 0x61, 0x53, 0x28, 0x45, - 0x43, 0x2e, 0x1d, 0x21, 0x1b, 0x11, 0x0e, 0x04, 0x00, 0xfc, 0x00, 0xfa, - 0xf9, 0xf8, 0xf4, 0xff, 0x03, 0x07, 0x08, 0x0b, 0x0c, 0x18, 0x1f, 0x2e, - 0x34, 0x3f, 0x54, 0x5a, 0x5b, 0x60, 0x6e, 0x7b, 0x6c, 0x6e, 0x79, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7d, 0x7f, 0x7e, 0x7d, 0x7f, 0x7f, 0x7c, 0x7f, - 0xe1, 0xe0, 0xdf, 0xde, 0xd9, 0xd8, 0xd7, 0xd3, 0xd8, 0xc6, 0xc8, 0xd6, - 0xdd, 0x02, 0xee, 0xf3, 0xf6, 0xfb, 0x04, 0x14, 0x1e, 0x2d, 0x3e, 0x58, - 0x50, 0x60, 0x75, 0x7f, 0x7f, 0x62, 0x7f, 0xce, 0xc7, 0xad, 0xdb, 0xe6, - 0x6c, 0x7f, 0x66, 0x75, 0x77, 0x68, 0x66, 0x76, 0x5b, 0x36, 0x27, 0x3a, - 0x30, 0x26, 0x0c, 0x1a, 0x11, 0x0d, 0x0c, 0x0b, 0x01, 0xfe, 0x02, 0xfd, - 0xfc, 0xf1, 0xfe, 0x02, 0x04, 0x09, 0x0e, 0x13, 0x18, 0x1e, 0x25, 0x36, - 0x3f, 0x4a, 0x5d, 0x59, 0x6e, 0x66, 0x68, 0x72, 0x7c, 0x7f, 0x79, 0x7f, - 0x7f, 0x79, 0x7f, 0x7f, 0x7f, 0x75, 0x7f, 0x7f, 0x7f, 0x7f, 0x76, 0x74, - 0xdd, 0xde, 0xd9, 0xda, 0xd7, 0xda, 0xd6, 0xd0, 0xd3, 0xd6, 0xd1, 0xd3, - 0xd1, 0xdc, 0xea, 0xed, 0xf3, 0xf7, 0xf6, 0x0f, 0x17, 0x1e, 0x30, 0x3d, - 0x4f, 0x5f, 0x71, 0x7f, 0x79, 0x70, 0x7f, 0xbe, 0xac, 0xb0, 0xb4, 0xd2, - 0x62, 0x7f, 0x59, 0x62, 0x6f, 0x74, 0x6a, 0x51, 0x4b, 0x3e, 0x35, 0x2c, - 0x22, 0x13, 0x0a, 0x11, 0x0d, 0x06, 0x02, 0x06, 0x00, 0xfc, 0xfc, 0xfd, - 0xf3, 0xfb, 0xfd, 0x05, 0x08, 0x07, 0x11, 0x19, 0x1c, 0x29, 0x30, 0x42, - 0x47, 0x55, 0x61, 0x6d, 0x71, 0x7b, 0x79, 0x75, 0x70, 0x70, 0x7d, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7e, 0x7f, 0x7f, 0x7f, 0x78, - 0xdc, 0xdb, 0xda, 0xd6, 0xd8, 0xd7, 0xd6, 0xd1, 0xcb, 0xd3, 0xd1, 0xd4, - 0xdb, 0xde, 0xe0, 0xe5, 0xe6, 0xec, 0xeb, 0xf7, 0x02, 0x09, 0x15, 0x1a, - 0x27, 0x35, 0x4a, 0x5b, 0x5b, 0x6c, 0x58, 0xcb, 0xce, 0xac, 0xe7, 0xe2, - 0x48, 0x5d, 0x6f, 0x58, 0x4c, 0x5e, 0x4b, 0x45, 0x37, 0x2b, 0x1f, 0x18, - 0x12, 0xfc, 0x03, 0x01, 0xfc, 0xfc, 0xfa, 0xfb, 0xfa, 0xfc, 0xf8, 0xf5, - 0xf0, 0xfb, 0xfd, 0x02, 0x0b, 0x0f, 0x17, 0x1b, 0x29, 0x32, 0x3f, 0x4b, - 0x58, 0x62, 0x73, 0x60, 0x6d, 0x79, 0x7f, 0x72, 0x7e, 0x68, 0x7f, 0x7f, - 0x7f, 0x7a, 0x73, 0x7e, 0x7f, 0x7f, 0x7f, 0x79, 0x79, 0x7e, 0x72, 0x75, - 0xd8, 0xd9, 0xd6, 0xd6, 0xd4, 0xd3, 0xd1, 0xd0, 0xcc, 0xcb, 0xd0, 0xd2, - 0xd4, 0xd6, 0xd9, 0xdc, 0xdf, 0xe1, 0xe4, 0xde, 0xec, 0xf5, 0xf9, 0xfe, - 0x03, 0x0e, 0x1d, 0x26, 0x2d, 0x2f, 0x44, 0xc5, 0xd6, 0xad, 0xf4, 0xdf, - 0x58, 0x59, 0x4a, 0x35, 0x3d, 0x32, 0x2b, 0x3a, 0x35, 0x14, 0x13, 0x0d, - 0x05, 0xf3, 0x02, 0xff, 0xf9, 0xf7, 0xf7, 0xf8, 0xf5, 0xf3, 0xf3, 0xec, - 0xf9, 0xfb, 0xfe, 0x06, 0x0d, 0x12, 0x14, 0x1e, 0x29, 0x37, 0x47, 0x4f, - 0x5e, 0x5c, 0x66, 0x65, 0x7f, 0x7f, 0x7f, 0x7f, 0x76, 0x7f, 0x7f, 0x7f, - 0x79, 0x69, 0x7f, 0x7f, 0x7f, 0x6f, 0x7d, 0x7f, 0x7f, 0x60, 0x7f, 0x70, - 0xdb, 0xd1, 0xd0, 0xd0, 0xcb, 0xce, 0xcc, 0xcd, 0xcc, 0xc8, 0xc2, 0xc3, - 0xc2, 0xc5, 0xc6, 0xca, 0xcc, 0xcc, 0xd2, 0xcf, 0xdc, 0xe1, 0xe2, 0xe8, - 0xea, 0xf1, 0xf8, 0xff, 0x04, 0xf5, 0x0d, 0xc8, 0xd7, 0xad, 0xf4, 0xdd, - 0x1f, 0x24, 0x16, 0x0c, 0x17, 0x0b, 0x0a, 0xf7, 0xf6, 0xec, 0x02, 0xfa, - 0xf6, 0xf7, 0xf9, 0xf6, 0xf1, 0xf2, 0xf3, 0xef, 0xf2, 0xf8, 0xf0, 0xf7, - 0xfa, 0xf9, 0xf0, 0xe3, 0xd1, 0xc9, 0xbd, 0xb7, 0xb3, 0xb0, 0xaf, 0xae, - 0xab, 0xac, 0xb0, 0xb1, 0xb4, 0xbf, 0xc5, 0xd7, 0xee, 0x09, 0x31, 0x57, - 0x74, 0x73, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x6f, 0x69, 0x7f, 0x70, 0x09, - 0xd3, 0xd4, 0xcd, 0xca, 0xca, 0xca, 0xc7, 0xca, 0xc7, 0xc7, 0xc3, 0xb9, - 0xba, 0xb9, 0xba, 0xba, 0xbb, 0xc0, 0xc2, 0xc0, 0xd0, 0xd6, 0xd9, 0xda, - 0xdb, 0xdf, 0xdf, 0xe2, 0xe4, 0xd9, 0xea, 0xc5, 0xd3, 0xae, 0xf6, 0xd3, - 0xfa, 0xf7, 0xf0, 0xee, 0xf1, 0xf5, 0xf2, 0xf3, 0xf2, 0xf3, 0xf5, 0xf2, - 0xe7, 0xee, 0xf1, 0xee, 0xee, 0xec, 0xef, 0xef, 0xf0, 0xf3, 0xf0, 0xf2, - 0xbc, 0x9d, 0x97, 0x95, 0x93, 0x95, 0x92, 0x92, 0x93, 0x93, 0x92, 0x94, - 0x96, 0x95, 0x97, 0x96, 0x95, 0x98, 0x99, 0x9a, 0x9e, 0x9e, 0xa4, 0xa5, - 0xac, 0xb0, 0xb3, 0xba, 0xc9, 0x11, 0x7b, 0x75, 0x7f, 0x32, 0xe0, 0xe6, - 0xcf, 0xcd, 0xd0, 0xc9, 0xc7, 0xc8, 0xc5, 0xc9, 0xc8, 0xc7, 0xc5, 0xc0, - 0xbc, 0xb7, 0xb9, 0xb6, 0xb9, 0xb9, 0xb8, 0xbc, 0xc3, 0xcb, 0xcc, 0xce, - 0xcd, 0xcd, 0xce, 0xcf, 0xd3, 0xcc, 0xd8, 0xc6, 0xd3, 0xad, 0xf7, 0xd2, - 0xe6, 0xe3, 0xdf, 0xe3, 0xe8, 0xe6, 0xe8, 0xeb, 0xe8, 0xeb, 0xec, 0xe7, - 0xe2, 0xeb, 0xea, 0xea, 0xea, 0xea, 0xea, 0xea, 0xf0, 0xee, 0xf1, 0xae, - 0x93, 0x91, 0x8f, 0x91, 0x90, 0x90, 0x92, 0x91, 0x91, 0x90, 0x91, 0x91, - 0x92, 0x92, 0x93, 0x94, 0x94, 0x93, 0x95, 0x95, 0x96, 0x97, 0x98, 0x9a, - 0x9b, 0x9c, 0x9f, 0xa0, 0xa2, 0xa7, 0xda, 0x58, 0xea, 0xcc, 0xcf, 0xcb, - 0xd4, 0xd3, 0xd3, 0xd4, 0xce, 0xca, 0xc6, 0xc8, 0xc6, 0xc3, 0xc3, 0xc0, - 0xbd, 0xbc, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xb9, 0xc8, 0xca, 0xcb, - 0xcd, 0xc9, 0xc9, 0xc6, 0xc8, 0xc6, 0xc9, 0xc7, 0xc7, 0xad, 0xef, 0xd3, - 0xd8, 0xd5, 0xd0, 0xd6, 0xd7, 0xdc, 0xdc, 0xe0, 0xe1, 0xe2, 0xe3, 0xe3, - 0xe6, 0xe8, 0xec, 0xed, 0xf1, 0xeb, 0xef, 0xf3, 0xeb, 0xf2, 0xdd, 0x94, - 0x90, 0x90, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x8e, 0x90, 0x90, 0x90, 0x90, - 0x91, 0x90, 0x91, 0x92, 0x91, 0x92, 0x91, 0x93, 0x92, 0x94, 0x95, 0x97, - 0x98, 0x98, 0x9a, 0x9a, 0x9c, 0x9d, 0xa3, 0xc5, 0xc2, 0xc0, 0xd6, 0x25, - 0xc5, 0xc3, 0xc5, 0xcc, 0xcb, 0xc9, 0xc7, 0xc7, 0xc9, 0xc6, 0xc7, 0xc6, - 0xc3, 0xc8, 0xc8, 0xc5, 0xc6, 0xc6, 0xc9, 0xca, 0xc7, 0xc4, 0xcc, 0xcf, - 0xce, 0xc9, 0xc8, 0xc5, 0xc2, 0xc0, 0xc2, 0xc2, 0xbd, 0xae, 0xe3, 0xd4, - 0xd3, 0xaf, 0xaa, 0xad, 0xb5, 0xc0, 0xc9, 0xd0, 0xda, 0xde, 0xdf, 0xe2, - 0xed, 0xec, 0xef, 0xee, 0xec, 0xf1, 0xf0, 0xe8, 0xf0, 0xf9, 0xb8, 0x90, - 0x91, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x90, - 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x91, 0x93, 0x93, 0x95, 0x96, - 0x96, 0x97, 0x98, 0x98, 0x99, 0x99, 0x9e, 0xbb, 0xd0, 0x14, 0x3d, 0x3b, - 0xf2, 0xf4, 0xf2, 0xf4, 0xf4, 0xf5, 0xf3, 0xf3, 0xf4, 0xf7, 0xf5, 0xf8, - 0xf8, 0xf3, 0xf5, 0xf0, 0xf1, 0xed, 0xed, 0xeb, 0xe5, 0xde, 0xec, 0xf7, - 0xf7, 0xd0, 0xde, 0xe2, 0xdc, 0xd9, 0xd3, 0xd1, 0xce, 0xc5, 0xe0, 0xd6, - 0xd1, 0xae, 0xa5, 0xa3, 0x9f, 0x9e, 0xa2, 0xa3, 0xa9, 0xaf, 0xc0, 0xd3, - 0xed, 0x05, 0x0b, 0x0d, 0xfa, 0xfa, 0xf7, 0xe7, 0x01, 0x00, 0xa6, 0x90, - 0x90, 0x90, 0x8e, 0x8f, 0x8e, 0x90, 0x8f, 0x90, 0x91, 0x90, 0x90, 0x91, - 0x8f, 0x90, 0x90, 0x91, 0x92, 0x93, 0x93, 0x92, 0x93, 0x93, 0x93, 0x95, - 0x95, 0x97, 0x96, 0x97, 0x98, 0x9a, 0x9e, 0xf8, 0x3a, 0x37, 0x34, 0x2d, - 0xbb, 0xb8, 0xb6, 0xb6, 0xb3, 0xb4, 0xb2, 0xb2, 0xb6, 0xb5, 0xb8, 0xb7, - 0xb9, 0xbb, 0xbc, 0xbf, 0xc5, 0xc7, 0xcd, 0xd4, 0xde, 0x07, 0xfa, 0xff, - 0xfe, 0xdf, 0x0d, 0x01, 0x0a, 0x07, 0x0f, 0x0f, 0x14, 0x13, 0x17, 0x17, - 0x0b, 0xb0, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa9, 0xa5, 0xa4, 0xa1, 0x9f, - 0xa1, 0xc0, 0xae, 0xb2, 0xbd, 0xca, 0xd3, 0xec, 0xfb, 0xfc, 0x9e, 0x90, - 0x90, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x91, 0x90, 0x90, 0x90, 0x90, 0x90, - 0x91, 0x92, 0x91, 0x90, 0x90, 0x92, 0x91, 0x92, 0x91, 0x92, 0x93, 0x93, - 0x95, 0x94, 0x96, 0x97, 0x97, 0x98, 0x9f, 0x21, 0x31, 0x2c, 0x21, 0x1d, - 0x1d, 0x15, 0x0c, 0xfc, 0xf0, 0xee, 0xee, 0xe0, 0xd8, 0xd7, 0xd2, 0xd1, - 0xd1, 0xd1, 0xcd, 0xcc, 0xcf, 0xce, 0xcc, 0xcd, 0xd2, 0x06, 0xfc, 0x01, - 0xfe, 0xe1, 0x10, 0xcc, 0xc7, 0xc9, 0xc7, 0xcc, 0xc5, 0xc6, 0xc6, 0xc6, - 0xc7, 0xb0, 0xab, 0xac, 0xab, 0xaa, 0xaa, 0xa9, 0xa9, 0xa7, 0xa6, 0xa6, - 0xa7, 0xa5, 0xa4, 0xa4, 0xa1, 0xa1, 0xa0, 0xa1, 0xa6, 0xaa, 0x96, 0x90, - 0x90, 0x8f, 0x8f, 0x8e, 0x90, 0x8f, 0x8f, 0x90, 0x91, 0x91, 0x91, 0x91, - 0x91, 0x90, 0x91, 0x91, 0x91, 0x92, 0x91, 0x91, 0x92, 0x93, 0x93, 0x94, - 0x95, 0x96, 0x95, 0x95, 0x96, 0x98, 0xa0, 0x18, 0x29, 0x21, 0x1a, 0x17, - 0x67, 0x5e, 0x64, 0x51, 0x34, 0x1e, 0x0e, 0x09, 0xf2, 0xe8, 0xe1, 0xde, - 0xdb, 0xda, 0xd8, 0xd9, 0xd8, 0xdd, 0xde, 0xe1, 0xec, 0x09, 0xfa, 0xfd, - 0xfd, 0xe5, 0x18, 0xe3, 0xe0, 0xeb, 0xdc, 0xf6, 0x27, 0x34, 0x30, 0x25, - 0x34, 0xb7, 0xab, 0xad, 0xa9, 0xaa, 0xa9, 0xa9, 0xa7, 0xa5, 0xa9, 0xa5, - 0xa7, 0xa5, 0xa6, 0xa6, 0xa6, 0xa5, 0xa6, 0xa5, 0xa5, 0xa0, 0x92, 0x8f, - 0x8f, 0x8f, 0x90, 0x90, 0x8d, 0x8f, 0x90, 0x8f, 0x8f, 0x8f, 0x8e, 0x90, - 0x90, 0x8e, 0x90, 0x91, 0x91, 0x91, 0x90, 0x90, 0x92, 0x90, 0x93, 0x93, - 0x92, 0x93, 0x93, 0x93, 0x95, 0x98, 0x9f, 0x10, 0x18, 0x19, 0x1d, 0x20, - 0x7f, 0x7f, 0x71, 0x68, 0x52, 0x4d, 0x3a, 0x26, 0x1d, 0x06, 0xfb, 0xf3, - 0xee, 0xe7, 0xe5, 0xe6, 0xe4, 0xe7, 0xeb, 0xec, 0xf6, 0x07, 0xfd, 0x03, - 0x00, 0xe2, 0x18, 0xee, 0xe2, 0xea, 0xf0, 0x75, 0x5f, 0x6e, 0x7f, 0x72, - 0x77, 0xc5, 0xab, 0xad, 0xac, 0xac, 0xaa, 0xaa, 0xa9, 0xaa, 0xa9, 0xa9, - 0xa7, 0xa9, 0xa7, 0xa7, 0xa7, 0xa9, 0xa6, 0xa6, 0xa6, 0xa2, 0x91, 0x8f, - 0x90, 0x8d, 0x90, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x8f, 0x8f, 0x8e, 0x90, - 0x90, 0x8f, 0x8f, 0x90, 0x92, 0x90, 0x90, 0x92, 0x92, 0x93, 0x93, 0x92, - 0x93, 0x93, 0x93, 0x95, 0x96, 0x97, 0xa1, 0x09, 0x1b, 0x21, 0x26, 0x34, - 0x7f, 0x7b, 0x60, 0x57, 0x38, 0x19, 0x58, 0x57, 0x43, 0x34, 0x1b, 0x0d, - 0x06, 0xfa, 0xf4, 0xf0, 0xeb, 0xee, 0xf0, 0xf1, 0xfe, 0x03, 0xfa, 0xfd, - 0x00, 0xe1, 0x16, 0xef, 0xdf, 0x02, 0x66, 0x7d, 0x7f, 0x7d, 0x53, 0x7f, - 0x73, 0xd6, 0xae, 0xac, 0xaa, 0xaa, 0xac, 0xaa, 0xaa, 0xaa, 0xa9, 0xa7, - 0xa9, 0xa9, 0xa7, 0xa7, 0xa9, 0xa6, 0xa6, 0xa6, 0xa5, 0xa0, 0x90, 0x8f, - 0x8f, 0x8f, 0x8d, 0x8f, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x8e, 0x90, 0x8e, - 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x90, 0x90, 0x91, 0x92, 0x93, 0x93, - 0x92, 0x93, 0x94, 0x93, 0x94, 0x97, 0xa2, 0x16, 0x1c, 0x19, 0x0e, 0x0b, - 0x73, 0x79, 0x1d, 0x26, 0x28, 0x1e, 0x4d, 0x5f, 0x5e, 0x4f, 0x3b, 0x29, - 0x1a, 0x0a, 0x06, 0xfc, 0xfa, 0xf2, 0xfa, 0xfb, 0x07, 0x02, 0xf9, 0x01, - 0x00, 0xe2, 0x14, 0xea, 0xfa, 0x7f, 0x7f, 0x7f, 0x74, 0x78, 0x75, 0x7e, - 0x78, 0xe4, 0xb0, 0xae, 0xac, 0xaa, 0xaa, 0xa9, 0xab, 0xa9, 0xac, 0xaa, - 0xa9, 0xa9, 0xa9, 0xa9, 0xa7, 0xa7, 0xa6, 0xa9, 0xa7, 0xa0, 0x8f, 0x8f, - 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, 0x8e, 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, 0x90, - 0x8f, 0x90, 0x8f, 0x90, 0x8f, 0x8f, 0x90, 0x91, 0x90, 0x91, 0x92, 0x91, - 0x92, 0x93, 0x93, 0x92, 0x95, 0x95, 0xa2, 0xd4, 0xd7, 0xdc, 0xe6, 0xf4, - 0x79, 0x75, 0xca, 0xf5, 0x7f, 0x74, 0x67, 0x7d, 0x6f, 0x62, 0x4e, 0x47, - 0x30, 0x1f, 0x12, 0x08, 0x0b, 0x00, 0xfe, 0x06, 0x08, 0x02, 0xfa, 0xfd, - 0x02, 0xe2, 0x15, 0x05, 0x77, 0x69, 0x7b, 0x7f, 0x7e, 0x7f, 0x7f, 0x7f, - 0x7f, 0xeb, 0xaf, 0xac, 0xac, 0xaa, 0xab, 0xaa, 0xaa, 0xaa, 0xab, 0xab, - 0xaa, 0xaa, 0xaa, 0xa9, 0xa9, 0xa7, 0xa6, 0xa6, 0xa6, 0x9f, 0x8f, 0x8f, - 0x8f, 0x8f, 0x8f, 0x8d, 0x8d, 0x8e, 0x8d, 0x8f, 0x8e, 0x90, 0x90, 0x8f, - 0x8f, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x91, 0x8e, 0x91, 0x91, 0x91, 0x93, - 0x92, 0x91, 0x93, 0x92, 0x94, 0x97, 0xad, 0x46, 0x58, 0x6a, 0x7a, 0x69, - 0x6a, 0x6c, 0xd7, 0xfa, 0x38, 0x69, 0x7f, 0x7f, 0x7f, 0x68, 0x5b, 0x5d, - 0x4c, 0x35, 0x24, 0x18, 0x12, 0x0f, 0x08, 0x0f, 0x0c, 0x05, 0xfc, 0x01, - 0xff, 0xe0, 0x16, 0x5f, 0x7f, 0x6a, 0x76, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0xee, 0xaf, 0xaf, 0xab, 0xac, 0xad, 0xab, 0xab, 0xac, 0xa9, 0xaa, - 0xac, 0xab, 0xab, 0xac, 0xaa, 0xa9, 0xa9, 0xa9, 0xa9, 0x9c, 0x90, 0x8f, - 0x8e, 0x8e, 0x8e, 0x8d, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8e, - 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x91, 0x90, 0x90, 0x91, 0x92, 0x92, - 0x92, 0x91, 0x92, 0x91, 0x92, 0x97, 0xb9, 0x71, 0x5c, 0x74, 0x73, 0x7f, - 0xdd, 0xca, 0xbc, 0xe4, 0x6a, 0x6f, 0x7f, 0x7f, 0x7f, 0x7f, 0x71, 0x74, - 0x5e, 0x49, 0x33, 0x29, 0x1f, 0x16, 0x10, 0xfd, 0xdd, 0x09, 0xfd, 0xfe, - 0x01, 0xe1, 0x18, 0x60, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0xed, 0xb0, 0xaf, 0xac, 0xad, 0xae, 0xae, 0xae, 0xad, 0xac, 0xac, - 0xab, 0xad, 0xac, 0xac, 0xac, 0xac, 0xab, 0xaa, 0xab, 0x9b, 0x8f, 0x8f, - 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, 0x8f, 0x8f, 0x8d, 0x8f, 0x8f, - 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x92, 0x92, 0x92, 0x92, 0x93, - 0x92, 0x93, 0x92, 0x92, 0x95, 0x9a, 0xd7, 0x75, 0x73, 0x7f, 0x7f, 0x7f, - 0xfe, 0xb2, 0xae, 0xe2, 0x66, 0x7f, 0x79, 0x7f, 0x72, 0x7f, 0x6e, 0x61, - 0x79, 0x69, 0x4e, 0x3b, 0x30, 0x18, 0xd9, 0xd5, 0xd9, 0x08, 0xfd, 0x03, - 0x00, 0xdf, 0x16, 0x6c, 0x7f, 0x78, 0x7b, 0x7f, 0x7f, 0x67, 0x55, 0x74, - 0x7f, 0xef, 0xae, 0xae, 0xad, 0xac, 0xa9, 0xac, 0xab, 0xac, 0xad, 0xad, - 0xad, 0xac, 0xad, 0xad, 0xac, 0xac, 0xad, 0xaa, 0xaa, 0x99, 0x8f, 0x8e, - 0x8e, 0x8e, 0x8d, 0x8d, 0x8e, 0x8d, 0x8d, 0x8f, 0x8d, 0x8e, 0x8f, 0x8d, - 0x8e, 0x8f, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x92, 0x8f, 0x90, 0x91, - 0x92, 0x92, 0x92, 0x91, 0x94, 0x9b, 0x06, 0x7f, 0x78, 0x7f, 0x7f, 0x67, - 0xab, 0xa4, 0xba, 0x64, 0x70, 0x7f, 0x7f, 0x7f, 0x7f, 0x74, 0x68, 0x76, - 0x74, 0x77, 0x5c, 0x5a, 0x44, 0xf2, 0xe3, 0xe7, 0xef, 0x07, 0xfe, 0x01, - 0xfc, 0xe0, 0x18, 0x64, 0x7f, 0x72, 0x7f, 0x7f, 0x7f, 0x57, 0xe3, 0x47, - 0x7f, 0xec, 0xae, 0xad, 0xae, 0xac, 0xac, 0xab, 0xae, 0xac, 0xac, 0xae, - 0xae, 0xaf, 0xae, 0xad, 0xad, 0xab, 0xac, 0xab, 0xaa, 0x98, 0x8e, 0x8d, - 0x8f, 0x8e, 0x8d, 0x8f, 0x8e, 0x8d, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, - 0x8e, 0x90, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x92, 0x91, 0x91, 0x92, - 0x92, 0x92, 0x92, 0x93, 0x95, 0x9e, 0x3c, 0x70, 0x7f, 0x7f, 0x7b, 0x7d, - 0xa7, 0xa4, 0xbf, 0xd4, 0x06, 0x1e, 0x2d, 0x20, 0x35, 0x3c, 0x45, 0x46, - 0x3e, 0x3e, 0x17, 0x13, 0x11, 0x0a, 0x0b, 0x0d, 0x0b, 0x00, 0xfe, 0xff, - 0xfd, 0xdb, 0x10, 0x34, 0x54, 0x68, 0x66, 0x74, 0x78, 0x49, 0xf3, 0x36, - 0x68, 0xdc, 0xab, 0xae, 0xad, 0xab, 0xab, 0xac, 0xad, 0xad, 0xae, 0xb0, - 0xaf, 0xaf, 0xaf, 0xaf, 0xad, 0xac, 0xab, 0xad, 0xa7, 0x95, 0x8e, 0x8f, - 0x8e, 0x8e, 0x8e, 0x8f, 0x8e, 0x8d, 0x8f, 0x8f, 0x8e, 0x8d, 0x8d, 0x8f, - 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x8f, 0x90, 0x8f, 0x91, 0x91, 0x90, - 0x93, 0x92, 0x93, 0x94, 0x95, 0x9d, 0x4e, 0x7f, 0x6a, 0x7b, 0x7f, 0x6a, - 0xa2, 0xa0, 0xa9, 0xd9, 0xf0, 0xfb, 0x02, 0x08, 0x12, 0x1b, 0x2b, 0x2f, - 0x34, 0x31, 0xf2, 0xef, 0xef, 0xef, 0xf3, 0xf4, 0xf2, 0xe8, 0xf9, 0xfe, - 0xfd, 0xcf, 0xf5, 0x00, 0x04, 0x06, 0x09, 0x0c, 0x0d, 0x03, 0xfb, 0xf2, - 0xe8, 0xb4, 0xa7, 0xaf, 0xad, 0xab, 0xad, 0xad, 0xad, 0xaf, 0xad, 0xad, - 0xaf, 0xaf, 0xb0, 0xae, 0xaf, 0xaf, 0xad, 0xac, 0xa9, 0x94, 0x8f, 0x8e, - 0x8e, 0x8e, 0x8f, 0x8e, 0x8e, 0x8f, 0x8e, 0x90, 0x8f, 0x8f, 0x8f, 0x8e, - 0x8f, 0x8e, 0x8e, 0x8e, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x90, 0x91, 0x91, - 0x90, 0x92, 0x93, 0x94, 0x95, 0x9d, 0x22, 0x77, 0x6d, 0x63, 0x58, 0x4a, - 0x9f, 0xa0, 0xac, 0xc9, 0xe5, 0xec, 0xf7, 0xfd, 0x05, 0x09, 0x0e, 0x20, - 0x2b, 0x2c, 0xec, 0xec, 0xe6, 0xec, 0xe7, 0xed, 0xe7, 0xde, 0xf3, 0xfd, - 0xf9, 0xcd, 0xde, 0xe6, 0xe5, 0xe6, 0xe6, 0xe1, 0xe1, 0xdc, 0xda, 0xd6, - 0xce, 0xb0, 0xa6, 0xaa, 0xa9, 0xab, 0xac, 0xad, 0xac, 0xb0, 0xaf, 0xae, - 0xaf, 0xb0, 0xaf, 0xae, 0xb0, 0xae, 0xac, 0xac, 0xaa, 0x95, 0x8f, 0x90, - 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, 0x8f, 0x8f, 0x8d, 0x8e, 0x8f, 0x8f, 0x8e, - 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x90, 0x8f, 0x90, 0x90, 0x91, 0x90, 0x92, - 0x92, 0x93, 0x93, 0x92, 0x95, 0x9c, 0xfe, 0x4c, 0x53, 0x40, 0x44, 0x39, - 0x9e, 0x9b, 0xaa, 0xc4, 0xe6, 0xe4, 0xd6, 0xdd, 0xe4, 0xf7, 0x1e, 0x26, - 0x2b, 0x2b, 0xeb, 0xe7, 0xe4, 0xeb, 0xeb, 0xea, 0xe7, 0xe0, 0xf1, 0x01, - 0xfb, 0xcb, 0xdb, 0xe5, 0xe5, 0xe4, 0xe3, 0xe2, 0xdf, 0xdd, 0xda, 0xd2, - 0xd0, 0xac, 0xa1, 0xa5, 0xa7, 0xa9, 0xa7, 0xab, 0xac, 0xac, 0xad, 0xaf, - 0xad, 0xae, 0xae, 0xaf, 0xaf, 0xae, 0xae, 0xad, 0xaa, 0x91, 0x8f, 0x8f, - 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, - 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x91, 0x90, 0x90, 0x91, 0x91, 0x92, - 0x92, 0x92, 0x92, 0x92, 0x95, 0x9a, 0xe3, 0x1e, 0x2c, 0x0e, 0xf2, 0xe2, - 0x9f, 0x9f, 0xc4, 0xcc, 0xe4, 0xf0, 0xee, 0xf7, 0xfd, 0x02, 0x1b, 0x27, - 0x26, 0x24, 0xe7, 0xea, 0xe6, 0xe4, 0xea, 0xe8, 0xe7, 0xdd, 0xf3, 0xfb, - 0xf8, 0xc8, 0xd8, 0xe5, 0xe2, 0xe3, 0xdf, 0xde, 0xdf, 0xdd, 0xd7, 0xd3, - 0xd2, 0xaf, 0xa0, 0xa0, 0xa0, 0xa1, 0xa2, 0xa3, 0xa2, 0xa6, 0xa6, 0xa9, - 0xab, 0xab, 0xab, 0xac, 0xad, 0xa9, 0xac, 0xab, 0xa9, 0x92, 0x8f, 0x8f, - 0x90, 0x8f, 0x8e, 0x8f, 0x8f, 0x90, 0x8f, 0x90, 0x8f, 0x8e, 0x90, 0x8f, - 0x90, 0x8e, 0x90, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, - 0x92, 0x91, 0x93, 0x94, 0x96, 0x9b, 0xce, 0xf4, 0xf4, 0xf0, 0xe8, 0xde, - 0x9d, 0xab, 0xc8, 0xd4, 0xe3, 0xf0, 0xfa, 0x02, 0xfe, 0x02, 0x19, 0x1e, - 0x1f, 0x24, 0xe8, 0xe5, 0xe4, 0xe6, 0xe4, 0xe7, 0xe4, 0xda, 0xee, 0xf7, - 0xf7, 0xc7, 0xda, 0xe0, 0xe2, 0xe2, 0xe0, 0xe1, 0xdd, 0xd8, 0xd8, 0xd4, - 0xcf, 0xc8, 0xbf, 0xb9, 0xb3, 0xb1, 0xad, 0xa9, 0xa0, 0x9d, 0x95, 0x97, - 0x97, 0x9c, 0xa0, 0x9f, 0x9f, 0xa0, 0x9f, 0x9e, 0x9e, 0x90, 0x8f, 0x8e, - 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8f, 0x8e, - 0x8e, 0x8f, 0x8e, 0x8f, 0x90, 0x8e, 0x90, 0x8f, 0x90, 0x90, 0x90, 0x92, - 0x92, 0x92, 0x93, 0x93, 0x94, 0x9a, 0xce, 0xef, 0xee, 0xeb, 0xe8, 0xea, - 0xa1, 0xb3, 0xcb, 0xd7, 0xde, 0xeb, 0xee, 0xfe, 0x02, 0xf6, 0x11, 0x1a, - 0x1d, 0x1d, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe7, 0xe5, 0xda, 0xe7, 0xf6, - 0xf3, 0xc7, 0xd9, 0xe2, 0xe1, 0xdf, 0xe1, 0xe0, 0xdc, 0xda, 0xd8, 0xd6, - 0xcf, 0xca, 0xc6, 0xc2, 0xbf, 0xbc, 0xb8, 0xb8, 0xac, 0xa3, 0x92, 0x90, - 0x92, 0xa7, 0xab, 0xaf, 0xab, 0xa9, 0xa7, 0xa7, 0xa4, 0x91, 0x90, 0x8e, - 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8d, - 0x8e, 0x8e, 0x8f, 0x8e, 0x8e, 0x8f, 0x90, 0x90, 0x8f, 0x91, 0x92, 0x92, - 0x92, 0x92, 0x93, 0x94, 0x95, 0x98, 0xd0, 0xed, 0xf2, 0xe8, 0xe7, 0xe3, - 0xa2, 0xbd, 0xd0, 0xd5, 0xe1, 0xec, 0xf9, 0x03, 0x04, 0xfb, 0x11, 0x1b, - 0x20, 0x1e, 0xe1, 0xe0, 0xe6, 0xe4, 0xe4, 0xe1, 0xe3, 0xda, 0xe6, 0xf1, - 0xef, 0xc0, 0xd7, 0xe1, 0xe3, 0xe1, 0xdf, 0xe0, 0xdc, 0xd9, 0xd8, 0xd4, - 0xd2, 0xce, 0xc9, 0xc2, 0xc0, 0xbb, 0xb9, 0xb6, 0xb2, 0x9e, 0x99, 0x9c, - 0x9a, 0xaa, 0xad, 0xaf, 0xad, 0xac, 0xac, 0xab, 0xa5, 0x91, 0x8f, 0x90, - 0x8f, 0x8f, 0x8f, 0x90, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x8f, 0x8e, - 0x8f, 0x8f, 0x8d, 0x8e, 0x8e, 0x8f, 0x90, 0x8f, 0x91, 0x91, 0x90, 0x91, - 0x91, 0x94, 0x91, 0x92, 0x94, 0x9b, 0xd5, 0xee, 0xed, 0xeb, 0xea, 0xe1, - 0xac, 0xbd, 0xce, 0xd8, 0xee, 0xfa, 0xf5, 0xf1, 0xf5, 0x07, 0x10, 0x1a, - 0x1b, 0x18, 0xe3, 0xe1, 0xe3, 0xe1, 0xe5, 0xe1, 0xe0, 0xd9, 0xe3, 0xed, - 0xe8, 0xc0, 0xd7, 0xe0, 0xe1, 0xdf, 0xe1, 0xdd, 0xdb, 0xd9, 0xd8, 0xd9, - 0xd4, 0xcc, 0xc9, 0xc3, 0xc2, 0xba, 0xb8, 0xb7, 0xb5, 0xa5, 0x97, 0x9d, - 0x9a, 0xad, 0xb2, 0xb4, 0xb3, 0xb4, 0xb2, 0xb1, 0xab, 0x91, 0x8e, 0x8d, - 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, 0x8e, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x8f, - 0x8f, 0x8d, 0x8d, 0x8e, 0x8f, 0x90, 0x8f, 0x8f, 0x90, 0x91, 0x91, 0x92, - 0x91, 0x92, 0x93, 0x93, 0x92, 0x96, 0xd2, 0xed, 0xea, 0xea, 0xe4, 0xe1, - 0xb8, 0xb5, 0xb5, 0xcd, 0x27, 0x1e, 0xd2, 0xe0, 0xdf, 0xd4, 0xd3, 0xda, - 0xdb, 0xdd, 0xe0, 0xe2, 0xe0, 0xe1, 0xe3, 0xe0, 0xe0, 0xd8, 0xe2, 0xeb, - 0xe0, 0xbc, 0xce, 0xd8, 0xd9, 0xdd, 0xdc, 0xda, 0xd9, 0xd9, 0xd8, 0xd4, - 0xcf, 0xce, 0xc8, 0xc4, 0xc3, 0xba, 0xba, 0xb8, 0xb6, 0xab, 0x97, 0x97, - 0x9b, 0xb5, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xb9, 0xb0, 0x92, 0x90, 0x8f, - 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, 0x8d, 0x8e, 0x8d, - 0x8e, 0x8f, 0x8e, 0x8e, 0x8e, 0x8f, 0x8e, 0x90, 0x91, 0x92, 0x90, 0x92, - 0x92, 0x92, 0x94, 0x92, 0x96, 0x9a, 0xd0, 0xed, 0xeb, 0xe8, 0xe4, 0xe2, - 0xb2, 0xb2, 0xb8, 0x0b, 0x22, 0x12, 0xcb, 0xca, 0xce, 0xc9, 0xd3, 0xd8, - 0xdc, 0xdc, 0xde, 0xdc, 0xe2, 0xdf, 0xdd, 0xdf, 0xde, 0xd6, 0xde, 0xdd, - 0xab, 0x9f, 0xa1, 0xa3, 0xa2, 0xa3, 0xa4, 0xa3, 0xa4, 0xa6, 0xa5, 0xa4, - 0xa6, 0xa3, 0xa2, 0xa5, 0xa2, 0xa3, 0xa3, 0xa3, 0xa5, 0xa7, 0x9e, 0x99, - 0x99, 0xa1, 0xa6, 0xa5, 0xa6, 0xa7, 0xa4, 0xa7, 0x9d, 0x92, 0x91, 0x90, - 0x8f, 0x90, 0x8f, 0x8f, 0x90, 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, 0x8f, 0x8f, - 0x8f, 0x8f, 0x8f, 0x8e, 0x90, 0x90, 0x90, 0x91, 0x92, 0x91, 0x92, 0x93, - 0x93, 0x93, 0x94, 0x94, 0x97, 0x9b, 0xeb, 0x12, 0x11, 0xf3, 0xe0, 0xde, - 0xa6, 0xb2, 0xc5, 0x1a, 0x1d, 0x12, 0xcc, 0xcd, 0xce, 0xcc, 0xd3, 0xd7, - 0xd8, 0xdb, 0xde, 0xdf, 0xdf, 0xd9, 0xd5, 0xd1, 0xd3, 0xd0, 0xdc, 0xdb, - 0x9a, 0x93, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x90, 0x93, 0x92, - 0x93, 0x93, 0x92, 0x92, 0x91, 0x92, 0x92, 0x92, 0x90, 0x91, 0x93, 0x91, - 0x90, 0x91, 0x92, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x8f, 0x92, 0x90, 0x8f, - 0x8e, 0x90, 0x8f, 0x8e, 0x8f, 0x8e, 0x90, 0x8e, 0x8e, 0x8e, 0x90, 0x90, - 0x8f, 0x8e, 0x8f, 0x90, 0x8f, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x93, 0x94, - 0x94, 0x94, 0x94, 0x92, 0x98, 0x9a, 0xb7, 0xc8, 0xce, 0xc7, 0xc1, 0xc2, - 0xde, 0xe1, 0xfd, 0x14, 0x16, 0x17, 0xcd, 0xcd, 0xd2, 0xc6, 0xd1, 0xd4, - 0xd7, 0xdb, 0xdc, 0xdb, 0xdc, 0xd0, 0xc3, 0xc0, 0xc2, 0xba, 0xb1, 0xdb, - 0xc1, 0x93, 0x8e, 0x8e, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, - 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8c, 0x8c, 0x8d, 0x8d, 0x8e, 0x8e, 0x8d, - 0x8c, 0x8e, 0x8e, 0x8e, 0x90, 0x90, 0x8f, 0x8f, 0x91, 0x92, 0x92, 0x91, - 0x90, 0x91, 0x91, 0x90, 0x90, 0x90, 0x91, 0x90, 0x8f, 0x90, 0x90, 0x91, - 0x90, 0x91, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92, 0x95, 0x93, 0x95, 0x96, - 0x96, 0x96, 0x96, 0x95, 0x97, 0x99, 0xa2, 0xa9, 0xa9, 0xa0, 0x9c, 0x9a, - 0xf0, 0xfb, 0x0a, 0x0e, 0x0e, 0x0c, 0xe2, 0xdc, 0xd6, 0xc8, 0xce, 0xd3, - 0xd7, 0xd8, 0xd8, 0xd6, 0xdb, 0xd3, 0xd1, 0xd0, 0xcf, 0xc0, 0xa0, 0xaa, - 0xca, 0xae, 0x95, 0x8e, 0x9f, 0xa1, 0x97, 0x90, 0x90, 0x8f, 0x8f, 0x8f, - 0x95, 0x91, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, - 0x8c, 0x92, 0x97, 0x97, 0x98, 0x98, 0x98, 0x91, 0x90, 0x93, 0x92, 0x93, - 0x91, 0x92, 0x91, 0x92, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x91, 0x91, - 0x92, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x95, 0x95, 0x96, 0x98, 0x97, - 0x98, 0x97, 0x97, 0x95, 0x99, 0x9b, 0x9b, 0x9a, 0x99, 0x9a, 0x96, 0x91, - 0xcc, 0xde, 0xfc, 0x13, 0x11, 0x19, 0x13, 0x05, 0xf9, 0xf1, 0xc9, 0xc7, - 0xca, 0xc8, 0xcb, 0xcb, 0xcc, 0xce, 0xd1, 0xcf, 0xce, 0xbd, 0x98, 0x9a, - 0xa3, 0xa9, 0xaa, 0x8f, 0x8e, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0x8c, - 0x8e, 0x8c, 0x8c, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, 0x8c, 0x8c, 0x8b, 0x8e, - 0x8b, 0x8d, 0x8c, 0x8d, 0x8c, 0x8c, 0x8d, 0x8c, 0x8f, 0x93, 0x93, 0x92, - 0x90, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x94, 0x93, 0x94, - 0x93, 0x93, 0x92, 0x93, 0x94, 0x93, 0x94, 0x97, 0x95, 0x97, 0x98, 0x99, - 0x99, 0x99, 0x99, 0x95, 0x99, 0x9b, 0x94, 0x8c, 0x8e, 0x8f, 0x8e, 0x8e, - 0x99, 0x9b, 0xa2, 0xa7, 0xa5, 0xa6, 0xab, 0xa9, 0xb1, 0xc7, 0xc3, 0xc5, - 0xca, 0xce, 0xd1, 0xd1, 0xd2, 0xd0, 0xcc, 0xcc, 0xcb, 0xbc, 0x97, 0x99, - 0x9e, 0xa4, 0xb0, 0x91, 0x8c, 0x8c, 0x8d, 0x8e, 0x8f, 0x8e, 0x8f, 0x90, - 0x90, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8c, - 0x8c, 0x8c, 0x8c, 0x8c, 0x8c, 0x8b, 0x8e, 0x8d, 0x8e, 0x94, 0x93, 0x94, - 0x91, 0x94, 0x95, 0x94, 0x93, 0x95, 0x94, 0x95, 0x94, 0x94, 0x94, 0x94, - 0x95, 0x93, 0x92, 0x93, 0x95, 0x94, 0x94, 0x96, 0x98, 0x99, 0x9a, 0x99, - 0x9a, 0x9a, 0x9b, 0x96, 0x9a, 0x9c, 0x96, 0x8e, 0x90, 0x90, 0x8e, 0x8d, - 0x8f, 0x8e, 0x90, 0x91, 0x90, 0x91, 0x91, 0x91, 0x92, 0x93, 0x97, 0x98, - 0x9d, 0xa7, 0xbc, 0xc4, 0xc8, 0xc9, 0xc7, 0xca, 0xc8, 0xb9, 0x96, 0x96, - 0x99, 0x9d, 0xab, 0x9a, 0x8f, 0x8e, 0x8f, 0x8d, 0x8e, 0x8d, 0x8d, 0x8c, - 0x8e, 0x8e, 0x8e, 0x8f, 0x8d, 0x8e, 0x8d, 0x8e, 0x8d, 0x8d, 0x8e, 0x8e, - 0x8d, 0x8c, 0x8d, 0x8d, 0x8c, 0x8a, 0x8d, 0x8f, 0x91, 0x94, 0x94, 0x95, - 0x91, 0x98, 0x95, 0x96, 0x95, 0x96, 0x95, 0x97, 0x96, 0x96, 0x95, 0x94, - 0x96, 0x96, 0x95, 0x96, 0x94, 0x95, 0x97, 0x97, 0x98, 0x9a, 0x9b, 0x9b, - 0x9a, 0x9b, 0x9b, 0x97, 0x9d, 0x9e, 0x98, 0x8e, 0x90, 0x92, 0x90, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, 0x8c, 0x8c, 0x8f, 0x8f, 0x90, 0x8e, 0x91, - 0x91, 0x92, 0x95, 0x98, 0x9f, 0xbc, 0xc7, 0xc6, 0xc9, 0xbc, 0x96, 0x94, - 0x95, 0x9b, 0xa4, 0x9e, 0x94, 0x91, 0x90, 0x90, 0x8e, 0x8f, 0x8d, 0x90, - 0x8f, 0x90, 0x91, 0x90, 0x91, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, - 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0x8f, 0x90, 0x93, 0x96, 0x95, 0x95, - 0x91, 0x97, 0x98, 0x96, 0x96, 0x97, 0x96, 0x98, 0x98, 0x96, 0x96, 0x98, - 0x96, 0x96, 0x95, 0x97, 0x96, 0x95, 0x97, 0x98, 0x9a, 0x9a, 0x9b, 0x9b, - 0x9c, 0x9a, 0x9d, 0x99, 0x9d, 0x9e, 0x99, 0x8f, 0x96, 0x9b, 0x9c, 0x92, - 0x99, 0x96, 0x8f, 0x8e, 0x8d, 0x8d, 0x8c, 0x92, 0x94, 0x93, 0x91, 0x91, - 0x91, 0x91, 0x91, 0x93, 0x97, 0xae, 0xc3, 0xc3, 0xc6, 0xbd, 0x96, 0x92, - 0x95, 0x99, 0xa2, 0x9c, 0x9a, 0x97, 0x97, 0x92, 0x8e, 0x8f, 0x8e, 0x8f, - 0x8f, 0x90, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x8f, 0x8e, 0x90, 0x8f, 0x8c, - 0x8e, 0x8e, 0x8e, 0x8d, 0x8f, 0x93, 0x8f, 0x90, 0x92, 0x96, 0x96, 0x95, - 0x92, 0x97, 0x97, 0x98, 0x98, 0x97, 0x97, 0x95, 0x96, 0x98, 0x97, 0x97, - 0x98, 0x98, 0x97, 0x98, 0x96, 0x96, 0x98, 0x98, 0x99, 0x9a, 0x99, 0x9c, - 0x9c, 0x9b, 0x9a, 0x98, 0x9d, 0x9f, 0x9a, 0x99, 0x9c, 0xa7, 0xab, 0x94, - 0x9f, 0xa0, 0x9c, 0x96, 0x8e, 0x8d, 0x8d, 0x8e, 0x94, 0x97, 0x96, 0x94, - 0x94, 0x94, 0x93, 0x91, 0x95, 0xab, 0xc4, 0xc2, 0xc1, 0xb9, 0x95, 0x92, - 0x92, 0x95, 0x9e, 0x9b, 0x9b, 0x9a, 0x99, 0x95, 0x90, 0x8f, 0x8e, 0x8e, - 0x8e, 0x8d, 0x8c, 0x8b, 0x8b, 0x8b, 0x8c, 0x8a, 0x8b, 0x8c, 0x8b, 0x8a, - 0x8b, 0x8b, 0x8b, 0x8b, 0x90, 0x93, 0x90, 0x8b, 0x8f, 0x97, 0x96, 0x96, - 0x92, 0x99, 0x99, 0x98, 0x98, 0x99, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, - 0x98, 0x99, 0x98, 0x98, 0x98, 0x98, 0x98, 0x99, 0x9b, 0x9a, 0x9b, 0x9c, - 0x9c, 0x9c, 0x9d, 0x99, 0x9c, 0x9f, 0x9c, 0x99, 0x9f, 0xaa, 0xaf, 0x95, - 0x95, 0x94, 0x94, 0x8f, 0x8e, 0x8d, 0x8e, 0x8e, 0x90, 0x90, 0x94, 0x93, - 0x94, 0x90, 0x90, 0x91, 0x95, 0xaa, 0xc0, 0xc1, 0xc2, 0xb8, 0x96, 0x90, - 0x92, 0x95, 0x9a, 0x9c, 0x9b, 0x9d, 0x9c, 0x99, 0x91, 0x8f, 0x90, 0x8e, - 0x8f, 0x8e, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, - 0x8c, 0x8a, 0x8a, 0x8c, 0x91, 0x94, 0x90, 0x8c, 0x91, 0x98, 0x96, 0x97, - 0x93, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x97, 0x98, 0x98, 0x99, 0x98, - 0x99, 0x98, 0x99, 0x99, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9c, 0x9c, - 0x9c, 0x9c, 0x9d, 0x98, 0x9e, 0xa0, 0x9d, 0x9b, 0xa1, 0xac, 0xac, 0x95, - 0x91, 0x93, 0x91, 0x90, 0x90, 0x8f, 0x8e, 0x8e, 0x90, 0x90, 0x93, 0x92, - 0x8f, 0x8d, 0x8d, 0x8f, 0x95, 0xa7, 0xc0, 0xbe, 0xc0, 0xb8, 0x96, 0x90, - 0x91, 0x94, 0x9b, 0x9b, 0x9c, 0x9d, 0x9c, 0x99, 0x8f, 0x8f, 0x8f, 0x90, - 0x8d, 0x8e, 0x8e, 0x8e, 0x8c, 0x8c, 0x8c, 0x8e, 0x8d, 0x8e, 0x8d, 0x8d, - 0x8e, 0x8d, 0x8e, 0x8e, 0x91, 0x92, 0x90, 0x8f, 0x91, 0x98, 0x97, 0x97, - 0x94, 0x97, 0x97, 0x98, 0x97, 0x96, 0x97, 0x97, 0x98, 0x96, 0x97, 0x98, - 0x98, 0x97, 0x98, 0x98, 0x97, 0x98, 0x97, 0x97, 0x99, 0x9b, 0x99, 0x9b, - 0x9a, 0x9c, 0x9c, 0x98, 0x9d, 0x9e, 0x9f, 0x97, 0xa2, 0xae, 0xaa, 0x94, - 0x90, 0x91, 0x8f, 0x90, 0x8f, 0x8c, 0x8e, 0x92, 0x95, 0x96, 0x97, 0x94, - 0x8f, 0x8f, 0x8f, 0x92, 0x94, 0xa6, 0xbb, 0xbd, 0xbf, 0xb7, 0x97, 0x93, - 0x92, 0x92, 0x99, 0x9b, 0x9d, 0x9e, 0x9e, 0x9b, 0x92, 0x92, 0x92, 0x8e, - 0x90, 0x91, 0x90, 0x8f, 0x91, 0x91, 0x91, 0x8f, 0x91, 0x93, 0x90, 0x92, - 0x91, 0x91, 0x8f, 0x91, 0x93, 0x94, 0x92, 0x93, 0x92, 0x97, 0x97, 0x98, - 0x95, 0x94, 0x97, 0x96, 0x98, 0x95, 0x96, 0x96, 0x96, 0x97, 0x98, 0x98, - 0x96, 0x96, 0x97, 0x97, 0x96, 0x97, 0x98, 0x97, 0x99, 0x9a, 0x9a, 0x9b, - 0x9c, 0x9c, 0x99, 0x99, 0x9e, 0x9e, 0x9d, 0x99, 0xa3, 0xad, 0xab, 0x93, - 0x8d, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8d, 0x96, 0x97, 0x98, 0x98, 0x95, - 0x8e, 0x8e, 0x8f, 0x91, 0x94, 0xa2, 0xb8, 0xb9, 0xb9, 0xb3, 0x95, 0x92, - 0x90, 0x92, 0x97, 0x9b, 0x9e, 0x9f, 0xa0, 0x9b, 0x93, 0x91, 0x91, 0x90, - 0x94, 0x94, 0x94, 0x93, 0x92, 0x94, 0x93, 0x94, 0x93, 0x95, 0x92, 0x95, - 0x94, 0x93, 0x92, 0x94, 0x93, 0x92, 0x91, 0x95, 0x91, 0x96, 0x99, 0x97, - 0x96, 0x95, 0x92, 0x91, 0x92, 0x91, 0x90, 0x91, 0x91, 0x90, 0x90, 0x90, - 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x90, 0x91, 0x91, 0x91, 0x93, 0x94, - 0x94, 0x96, 0x99, 0x9a, 0x9d, 0x9d, 0x9c, 0x97, 0xa1, 0xa9, 0xa7, 0x97, - 0x8c, 0x8b, 0x8b, 0x8c, 0x8d, 0x8e, 0x8e, 0x94, 0x98, 0x97, 0x97, 0x96, - 0x91, 0x92, 0x92, 0x95, 0x97, 0xa2, 0xb5, 0xb5, 0xb7, 0xb2, 0x96, 0x93, - 0x93, 0x92, 0x99, 0x9a, 0x9e, 0xa0, 0x9e, 0x9c, 0x94, 0x93, 0x91, 0x95, - 0x97, 0x94, 0x97, 0x96, 0x97, 0x97, 0x96, 0x96, 0x98, 0x98, 0x93, 0x97, - 0x99, 0x98, 0x95, 0x96, 0x94, 0x91, 0x93, 0x97, 0x93, 0x98, 0x98, 0x97, - 0x97, 0x95, 0x95, 0x95, 0x96, 0x96, 0x95, 0x95, 0x95, 0x95, 0x94, 0x93, - 0x93, 0x95, 0x94, 0x95, 0x94, 0x94, 0x95, 0x96, 0x95, 0x96, 0x96, 0x97, - 0x99, 0x99, 0x99, 0x9b, 0x9b, 0x9c, 0x9c, 0x97, 0x90, 0x8c, 0x8d, 0x8c, - 0x8f, 0x8e, 0x8e, 0x8f, 0x90, 0x90, 0x91, 0x94, 0x95, 0x97, 0x97, 0x96, - 0x97, 0x97, 0x96, 0x98, 0x9f, 0x9e, 0xad, 0xb1, 0xb4, 0xaf, 0x97, 0x92, - 0x91, 0x92, 0x97, 0x98, 0x9f, 0xa1, 0xa1, 0x9d, 0x93, 0x91, 0x93, 0x95, - 0x96, 0x98, 0x97, 0x96, 0x96, 0x96, 0x97, 0x96, 0x97, 0x96, 0x95, 0x97, - 0x97, 0x98, 0x96, 0x97, 0x94, 0x90, 0x91, 0x99, 0x97, 0x95, 0x94, 0x97, - 0x96, 0x90, 0x8c, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8c, 0x8e, - 0x8e, 0x8d, 0x8e, 0x8d, 0x8e, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, - 0x8e, 0x8e, 0x8e, 0x96, 0x9a, 0x9b, 0x9a, 0x94, 0x90, 0x8c, 0x8b, 0x8b, - 0x8c, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8d, 0x8c, 0x8c, 0x8d, 0x8c, 0x8c, - 0x8c, 0x8c, 0x8e, 0x92, 0x9c, 0x9b, 0xa6, 0xac, 0xb0, 0xac, 0x96, 0x92, - 0x91, 0x91, 0x95, 0x99, 0x9f, 0xa0, 0xa1, 0x9d, 0x92, 0x8f, 0x94, 0x97, - 0x98, 0x97, 0x97, 0x97, 0x97, 0x98, 0x97, 0x97, 0x98, 0x94, 0x97, 0x96, - 0x98, 0x98, 0x96, 0x95, 0x92, 0x90, 0x91, 0x98, 0x9c, 0x97, 0x96, 0x95, - 0x95, 0x8d, 0x8b, 0x8b, 0x8d, 0x8d, 0x8e, 0x8d, 0x8f, 0x8e, 0x8d, 0x8e, - 0x8e, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0x8d, 0x8e, 0x8d, 0x8d, 0x8c, 0x8d, - 0x8c, 0x8e, 0x8e, 0x92, 0x99, 0x9a, 0x96, 0x94, 0x90, 0x8a, 0x8b, 0x8b, - 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8d, 0x8d, 0x8c, 0x8c, 0x8d, 0x8c, 0x8d, - 0x8d, 0x8d, 0x8e, 0x91, 0x9c, 0x9b, 0xa5, 0xa6, 0xab, 0xac, 0x97, 0x92, - 0x92, 0x91, 0x97, 0x9a, 0xa1, 0xa2, 0xa1, 0x9f, 0x93, 0x8f, 0x94, 0x96, - 0x97, 0x98, 0x97, 0x96, 0x98, 0x99, 0x97, 0x99, 0x97, 0x96, 0x96, 0x98, - 0x97, 0x98, 0x97, 0x94, 0x92, 0x93, 0x91, 0x9a, 0x9e, 0x94, 0x93, 0x93, - 0x95, 0x91, 0x8d, 0x8c, 0x8d, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, - 0x8f, 0x90, 0x90, 0x90, 0x90, 0x8d, 0x90, 0x8f, 0x8f, 0x8f, 0x8d, 0x8d, - 0x8e, 0x8e, 0x8e, 0x93, 0x98, 0x96, 0x96, 0x96, 0x8f, 0x8c, 0x8c, 0x8c, - 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x8a, 0x8b, 0x8a, 0x8a, 0x8a, - 0x8c, 0x8b, 0x8c, 0x8f, 0x98, 0x98, 0x9f, 0xa4, 0xa5, 0xa5, 0x95, 0x91, - 0x90, 0x90, 0x96, 0x97, 0xa0, 0x9e, 0x9f, 0x9d, 0x8e, 0x8d, 0x8d, 0x8e, - 0x8e, 0x8e, 0x8f, 0x8e, 0x95, 0x96, 0x98, 0x98, 0x94, 0x96, 0x97, 0x97, - 0x98, 0x97, 0x96, 0x94, 0x93, 0x91, 0x91, 0x92, 0x96, 0x90, 0x8f, 0x8f, - 0x8e, 0x8f, 0x8d, 0x8a, 0x8b, 0x8c, 0x8c, 0x8d, 0x8e, 0x8d, 0x8e, 0x8e, - 0x8e, 0x8e, 0x8f, 0x8e, 0x8f, 0x8d, 0x8e, 0x8e, 0x8b, 0x8c, 0x8b, 0x8c, - 0x8d, 0x90, 0x8e, 0x90, 0x8f, 0x90, 0x90, 0x91, 0x8e, 0x8a, 0x8b, 0x8b, - 0x8e, 0x8c, 0x8e, 0x8e, 0x8e, 0x8d, 0x8e, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8d, 0x90, 0x98, 0x98, 0x9e, 0xa2, 0xa5, 0xa3, 0x96, 0x91, - 0x91, 0x92, 0x96, 0x99, 0xa0, 0xa1, 0x9f, 0x9b, 0x90, 0x8f, 0x91, 0x90, - 0x8e, 0x90, 0x8f, 0x8f, 0x90, 0x96, 0x97, 0x96, 0x95, 0x99, 0x98, 0x97, - 0x97, 0x98, 0x96, 0x94, 0x92, 0x8e, 0x8e, 0x8e, 0x8f, 0x8e, 0x8e, 0x8c, - 0x8d, 0x8d, 0x91, 0x8c, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8c, 0x8d, 0x8c, - 0x8e, 0x8c, 0x8d, 0x8c, 0x8c, 0x8b, 0x8c, 0x8c, 0x8c, 0x8c, 0x8e, 0x8f, - 0x8f, 0x90, 0x90, 0x8f, 0x8e, 0x8f, 0x8d, 0x8d, 0x8e, 0x8c, 0x8c, 0x8d, - 0x91, 0x92, 0x95, 0x95, 0x96, 0x95, 0x8d, 0x8c, 0x8d, 0x8c, 0x8d, 0x8c, - 0x8b, 0x8c, 0x8c, 0x8f, 0x99, 0x99, 0x9f, 0xa2, 0xa5, 0xa2, 0x96, 0x91, - 0x91, 0x90, 0x98, 0x98, 0x9f, 0xa1, 0x9e, 0x98, 0x93, 0x92, 0x94, 0x93, - 0x93, 0x94, 0x93, 0x93, 0x93, 0x94, 0x97, 0x96, 0x98, 0x98, 0x98, 0x99, - 0x96, 0x97, 0x97, 0x96, 0x97, 0x93, 0x93, 0x95, 0x92, 0x8c, 0x8d, 0x8d, - 0x8d, 0x8e, 0x8f, 0x94, 0x8d, 0x8d, 0x8d, 0x8b, 0x8c, 0x8c, 0x8d, 0x8b, - 0x8b, 0x8c, 0x8c, 0x8c, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x90, - 0x92, 0x91, 0x8f, 0x8e, 0x8e, 0x8d, 0x8b, 0x8c, 0x8d, 0x8b, 0x8d, 0x8a, - 0xab, 0xa7, 0xa6, 0xa1, 0x9b, 0x96, 0x8e, 0x8c, 0x8b, 0x8b, 0x8c, 0x8c, - 0x8c, 0x8c, 0x8c, 0x8e, 0x98, 0x95, 0x9c, 0xa0, 0xa2, 0xa2, 0x94, 0x91, - 0x8f, 0x90, 0x96, 0x96, 0x9d, 0x9d, 0x9b, 0x95, 0x8e, 0x8d, 0x8b, 0x8b, - 0x8b, 0x8c, 0x8a, 0x8b, 0x8c, 0x90, 0x92, 0x94, 0x95, 0x97, 0x98, 0x97, - 0x96, 0x96, 0x98, 0x98, 0x98, 0x94, 0x96, 0x9b, 0x98, 0x8d, 0x8b, 0x8b, - 0x8c, 0x8c, 0x8c, 0x8e, 0x91, 0x91, 0x8f, 0x8f, 0x8e, 0x8f, 0x8e, 0x8e, - 0x8d, 0x8e, 0x8d, 0x8f, 0x8e, 0x8d, 0x8e, 0x8e, 0x8f, 0x8f, 0x91, 0x90, - 0x90, 0x8d, 0x8c, 0x8c, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8c, 0x8b, 0x8c, - 0x8d, 0x8d, 0x8b, 0x8c, 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, - 0x8b, 0x8b, 0x8c, 0x8d, 0x97, 0x96, 0x9d, 0x9f, 0x9e, 0xa1, 0x95, 0x90, - 0x8e, 0x8e, 0x94, 0x95, 0x9b, 0x9b, 0x9a, 0x95, 0x91, 0x8d, 0x8c, 0x8c, - 0x8c, 0x8c, 0x8c, 0x8c, 0x8b, 0x91, 0x93, 0x95, 0x97, 0x99, 0x9a, 0x97, - 0x9b, 0x9b, 0x9b, 0x99, 0x9a, 0x98, 0x97, 0x99, 0x98, 0x8d, 0x8b, 0x8a, - 0x8b, 0x8c, 0x8d, 0x8c, 0x8d, 0x90, 0x91, 0x91, 0x91, 0x92, 0x93, 0x93, - 0x92, 0x92, 0x8f, 0x8f, 0x91, 0x91, 0x91, 0x91, 0x91, 0x92, 0x90, 0x8d, - 0x8c, 0x8c, 0x8e, 0x8a, 0x8c, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8c, - 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, 0x8c, 0x8b, 0x8c, 0x8c, 0x8c, 0x8c, 0x8a, - 0x8b, 0x8a, 0x8c, 0x8e, 0x95, 0x96, 0x9c, 0x9d, 0x9f, 0xa0, 0x93, 0x8f, - 0x90, 0x8f, 0x93, 0x95, 0x99, 0x99, 0x99, 0x95, 0x91, 0x8c, 0x8f, 0x90, - 0x91, 0x91, 0x91, 0x8e, 0x8d, 0x92, 0x97, 0x96, 0x99, 0x99, 0x9a, 0x97, - 0x97, 0x9d, 0x9e, 0x9d, 0x9b, 0x9d, 0x9a, 0x9a, 0x99, 0x8d, 0x8d, 0x8b, - 0x8d, 0x90, 0x96, 0x95, 0x90, 0x8e, 0x8c, 0x8d, 0x8f, 0x92, 0x92, 0x92, - 0x93, 0x93, 0x92, 0x91, 0x91, 0x91, 0x8f, 0x8f, 0x8d, 0x8d, 0x8d, 0x8c, - 0x8f, 0x8d, 0x8e, 0x8b, 0x8b, 0x8a, 0x8b, 0x8b, 0x8c, 0x8b, 0x8a, 0x8a, - 0x8c, 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, 0x8c, 0x8c, 0x8c, 0x8c, 0x8c, 0x8b, - 0x8b, 0x8b, 0x8b, 0x8f, 0x96, 0x97, 0x9c, 0x9d, 0x9e, 0x9e, 0x95, 0x8f, - 0x8f, 0x8f, 0x93, 0x94, 0x98, 0x9a, 0x99, 0x95, 0x8e, 0x90, 0xc3, 0xcb, - 0xc9, 0xcb, 0xd0, 0x9f, 0x8e, 0x95, 0x9f, 0xa3, 0xa7, 0xa7, 0xa9, 0xab, - 0xa7, 0xa0, 0xa1, 0xad, 0x9b, 0xa1, 0xa6, 0xa4, 0xa5, 0x9f, 0x97, 0x96, - 0x97, 0x8f, 0x8e, 0x92, 0x92, 0x96, 0x92, 0x8d, 0x8b, 0x8c, 0x8b, 0x8b, - 0x8b, 0x8b, 0x8b, 0x8e, 0x8c, 0x8f, 0x8e, 0x90, 0x8f, 0x8e, 0x8e, 0x8d, - 0x8e, 0x8d, 0x8d, 0x8b, 0x8c, 0x8b, 0x8a, 0x8b, 0x8c, 0x8a, 0x8b, 0x8b, - 0x8b, 0x8c, 0x8b, 0x8b, 0x8b, 0x8a, 0x8a, 0x8c, 0x8b, 0x8b, 0x8b, 0x8b, - 0x8a, 0x8c, 0x8b, 0x8d, 0x94, 0x96, 0x9b, 0x9b, 0x9e, 0x9d, 0x92, 0x90, - 0x8f, 0x8d, 0x92, 0x94, 0x99, 0x9b, 0x97, 0x94, 0x8d, 0x91, 0xc9, 0xcd, - 0xc1, 0xc6, 0xc2, 0x9a, 0x8f, 0x9a, 0xaf, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, - 0xaa, 0xa1, 0xa3, 0xb4, 0xb1, 0xac, 0xa9, 0xa7, 0xa1, 0xa7, 0x9a, 0x98, - 0x9b, 0x9a, 0x93, 0x8f, 0x8c, 0x8f, 0x92, 0x91, 0x8e, 0x90, 0x8e, 0x8c, - 0x8f, 0x8e, 0x8f, 0x90, 0x90, 0x91, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8e, - 0x8d, 0x8e, 0x8c, 0x8b, 0x8b, 0x8c, 0x8c, 0x8a, 0x8b, 0x8c, 0x8b, 0x8b, - 0x8b, 0x8c, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8b, 0x8c, 0x8c, 0x8b, 0x8c, - 0x8c, 0x8a, 0x8b, 0x8d, 0x94, 0x95, 0x9b, 0x99, 0x9e, 0x9d, 0x93, 0x8f, - 0x8d, 0x8d, 0x92, 0x94, 0x98, 0x99, 0x97, 0x95, 0x8d, 0x8d, 0x90, 0x92, - 0x91, 0x91, 0x90, 0x8f, 0x8d, 0x99, 0xb0, 0xb3, 0xb5, 0xb6, 0xb4, 0xb6, - 0xa7, 0xa0, 0xa5, 0xb7, 0xb6, 0x9d, 0x9e, 0x9d, 0x9d, 0xa5, 0x9b, 0x98, - 0x9b, 0x97, 0x95, 0x95, 0x96, 0x96, 0x93, 0x8f, 0x90, 0x91, 0x8c, 0x8d, - 0x90, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8c, - 0x8e, 0x8e, 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, 0x8b, 0x8a, 0x8b, 0x8c, 0x8a, - 0x8b, 0x8b, 0x8a, 0x8b, 0x8b, 0x8a, 0x8c, 0x8c, 0x8b, 0x8a, 0x8b, 0x8b, - 0x8a, 0x8a, 0x8a, 0x8d, 0x93, 0x95, 0x99, 0x9b, 0x9d, 0x9d, 0x93, 0x90, - 0x8e, 0x8d, 0x8f, 0x93, 0x98, 0x99, 0x99, 0x93, 0x8d, 0x8d, 0x8c, 0x8d, - 0x8e, 0x8e, 0x8c, 0x8d, 0x8e, 0x99, 0xaf, 0xb3, 0xb1, 0xb5, 0xb3, 0xb6, - 0xa3, 0x9e, 0xa2, 0xb0, 0xb6, 0xaa, 0xb7, 0xb3, 0xb3, 0xb1, 0x99, 0x98, - 0x99, 0x97, 0x97, 0x94, 0x95, 0x96, 0x94, 0x92, 0x91, 0x90, 0x8e, 0x8d, - 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x8f, 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8c, 0x8b, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x89, 0x8b, 0x8b, - 0x8b, 0x8d, 0x8c, 0x8b, 0x8b, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8b, 0x8c, - 0x8b, 0x8c, 0x8c, 0x8d, 0x93, 0x95, 0x99, 0x9a, 0x9d, 0x9c, 0x93, 0x8f, - 0x8e, 0x8e, 0x91, 0x92, 0x95, 0x97, 0x96, 0x94, 0x8d, 0x8f, 0x8d, 0x8d, - 0x8d, 0x8e, 0x8d, 0x8e, 0x8e, 0x98, 0xb0, 0xb3, 0xb3, 0xb5, 0xb7, 0xad, - 0x92, 0x90, 0x8f, 0x92, 0xb5, 0xb3, 0xb6, 0xb2, 0xb6, 0xb6, 0x9c, 0x97, - 0x97, 0x97, 0x96, 0x95, 0x95, 0x93, 0x94, 0x92, 0x90, 0x90, 0x8d, 0x8e, - 0x8f, 0x91, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0x8d, 0x8d, 0x8c, - 0x8b, 0x8d, 0x8b, 0x8a, 0x8c, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, - 0x8c, 0x8c, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8c, 0x8c, 0x8c, 0x8c, 0x8c, - 0x8c, 0x8a, 0x8c, 0x8d, 0x93, 0x94, 0x99, 0x9c, 0x9c, 0x9d, 0x94, 0x8f, - 0x8e, 0x8c, 0x91, 0x92, 0x97, 0x99, 0x97, 0x94, 0x8d, 0x8e, 0x8c, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x99, 0xb1, 0xb6, 0xba, 0xbc, 0xbd, 0xbc, - 0xb5, 0xb6, 0xb6, 0xb7, 0xbd, 0xbb, 0xad, 0xa7, 0xb5, 0xb7, 0x9b, 0x97, - 0x94, 0x94, 0x94, 0x95, 0x95, 0x93, 0x92, 0x92, 0x91, 0x91, 0x8e, 0x8d, - 0x91, 0x8f, 0x91, 0x90, 0x90, 0x8e, 0x8f, 0x8e, 0x8e, 0x8e, 0x8b, 0x8b, - 0x8b, 0x8c, 0x8c, 0x8b, 0x8a, 0x8c, 0x8c, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, - 0x8c, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8c, 0x8d, 0x8c, - 0x8c, 0x8d, 0x8c, 0x8e, 0x94, 0x95, 0x97, 0x9b, 0x9b, 0x9d, 0x94, 0x8f, - 0x8f, 0x8d, 0x91, 0x91, 0x95, 0x98, 0x97, 0x96, 0x8d, 0x8d, 0x8c, 0x8d, - 0x8d, 0x8b, 0x8b, 0x8c, 0x8d, 0x98, 0xb4, 0xb9, 0xb8, 0xac, 0xac, 0xae, - 0xae, 0xb4, 0xae, 0xb0, 0xb1, 0xb5, 0xb6, 0xaf, 0xb9, 0xb8, 0x99, 0x96, - 0x93, 0x8d, 0x8c, 0x92, 0x95, 0x93, 0x93, 0x93, 0x91, 0x92, 0x8e, 0x8d, - 0x92, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8d, - 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8b, 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, 0x8c, - 0x8e, 0x8e, 0x8f, 0x90, 0x91, 0x95, 0x93, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, - 0x8c, 0x8b, 0x8c, 0x8e, 0x94, 0x94, 0x99, 0x9a, 0x99, 0x9c, 0x94, 0x8e, - 0x8e, 0x8f, 0x90, 0x90, 0x95, 0x99, 0x96, 0x94, 0x8f, 0x8d, 0x8c, 0x8c, - 0x8b, 0x8c, 0x8d, 0x8c, 0x8d, 0x98, 0xb4, 0xba, 0xba, 0xb2, 0xac, 0xb0, - 0xb5, 0xb8, 0xbc, 0xb0, 0xb7, 0xbb, 0xbf, 0xbe, 0xbe, 0xb7, 0x94, 0x94, - 0x95, 0x91, 0x91, 0x92, 0x91, 0x90, 0x91, 0x92, 0x93, 0x92, 0x8f, 0x8f, - 0x9c, 0x9e, 0xa0, 0x9c, 0x90, 0x8f, 0x8f, 0x8e, 0x8d, 0x8d, 0x8c, 0x8c, - 0x8d, 0x8d, 0x8c, 0x8b, 0x8c, 0x8b, 0x8c, 0x8b, 0x8a, 0x8c, 0x8b, 0x8c, - 0xbc, 0xc2, 0xbc, 0xb1, 0xa3, 0x97, 0x8e, 0x8d, 0x8c, 0x8b, 0x8c, 0x8d, - 0x8c, 0x8d, 0x8d, 0x8d, 0x93, 0x95, 0x97, 0x9a, 0x9c, 0x9d, 0x95, 0x8e, - 0x8e, 0x8d, 0x90, 0x8f, 0x96, 0x99, 0x98, 0x96, 0x8d, 0x8d, 0x8b, 0x8c, - 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, 0x97, 0xb5, 0xb9, 0xbe, 0xbb, 0xbf, 0xbc, - 0xbf, 0xc3, 0xc0, 0xbf, 0xc1, 0xc1, 0xbf, 0xbe, 0xbb, 0xb7, 0x8f, 0x8f, - 0x93, 0x92, 0x8d, 0x91, 0x8e, 0x8e, 0x90, 0x90, 0x90, 0x90, 0x8d, 0x8e, - 0x93, 0x95, 0x93, 0x93, 0x91, 0x93, 0x91, 0x91, 0x90, 0x8f, 0x8d, 0x8c, - 0x8f, 0x8e, 0x8b, 0x8b, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8c, - 0x8f, 0x8e, 0x8d, 0x8d, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8c, 0x8c, 0x8c, - 0x8c, 0x8c, 0x8c, 0x8d, 0x92, 0x93, 0x96, 0x98, 0x9a, 0x9b, 0x95, 0x8e, - 0x8e, 0x8d, 0x8f, 0x90, 0x94, 0x97, 0x96, 0x95, 0x8e, 0x8d, 0x8c, 0x8c, - 0x8c, 0x8c, 0x8d, 0x8d, 0x8e, 0x96, 0xb5, 0xbc, 0xbc, 0xc0, 0xc2, 0xc3, - 0xc9, 0xc6, 0xc5, 0xc5, 0xc4, 0xc4, 0xbf, 0xbd, 0xbc, 0xb6, 0x92, 0x8f, - 0x91, 0x92, 0x8e, 0x8e, 0x8d, 0x8e, 0x8e, 0x8c, 0x8e, 0x8e, 0x8e, 0x8d, - 0x8f, 0x92, 0x92, 0x91, 0x91, 0x90, 0x92, 0x91, 0x92, 0x91, 0x8f, 0x91, - 0x8e, 0x8f, 0x8c, 0x8b, 0x8c, 0x8b, 0x8b, 0x8c, 0x8b, 0x8a, 0x8c, 0x8c, - 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, - 0x8c, 0x8d, 0x8d, 0x8c, 0x93, 0x95, 0x97, 0x99, 0x9a, 0x9a, 0x94, 0x90, - 0x8e, 0x8d, 0x8e, 0x8f, 0x94, 0x99, 0x99, 0x95, 0x8e, 0x8e, 0x8d, 0x8d, - 0x8d, 0x8c, 0x8c, 0x8d, 0x8e, 0x97, 0xb8, 0xbc, 0xbf, 0xc2, 0xc4, 0xc5, - 0xc7, 0xc6, 0xc7, 0xc3, 0xc6, 0xc3, 0xbd, 0xbd, 0xbd, 0xb7, 0x93, 0x90, - 0x90, 0x90, 0x91, 0x90, 0x8f, 0x90, 0x91, 0x91, 0x90, 0x8e, 0x8e, 0x8c, - 0x8e, 0x91, 0x93, 0x94, 0x92, 0x92, 0x92, 0x92, 0x91, 0x91, 0x90, 0x91, - 0x91, 0x8f, 0x8e, 0x8c, 0x8b, 0x8c, 0x8d, 0x8d, 0x8b, 0x8c, 0x8d, 0x8b, - 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8b, 0x8d, 0x8d, - 0x8d, 0x8c, 0x8c, 0x8e, 0x93, 0x95, 0x96, 0x98, 0x9b, 0x9a, 0x96, 0x8f, - 0x8d, 0x8e, 0x8e, 0x8e, 0x93, 0x9b, 0x99, 0x95, 0x8e, 0x8d, 0x8c, 0x8d, - 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x95, 0xb6, 0xbc, 0xc0, 0xc5, 0xc1, 0xc3, - 0xc9, 0xc6, 0xc5, 0xc4, 0xc3, 0xc4, 0xc1, 0xbf, 0xbe, 0xb9, 0x91, 0x90, - 0x90, 0x8f, 0x90, 0x90, 0x90, 0x8f, 0x90, 0x91, 0x91, 0x8f, 0x8d, 0x8d, - 0x8e, 0x91, 0x92, 0x90, 0x92, 0x92, 0x91, 0x93, 0x92, 0x92, 0x92, 0x91, - 0x8f, 0x8f, 0x8d, 0x8d, 0x8c, 0x8b, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, 0x8d, - 0x8c, 0x8c, 0x8c, 0x8c, 0x8d, 0x8d, 0x8b, 0x8c, 0x8d, 0x8d, 0x8b, 0x8c, - 0x8d, 0x8d, 0x8c, 0x8d, 0x92, 0x94, 0x95, 0x98, 0x99, 0x99, 0x97, 0x8e, - 0x8d, 0x8b, 0x8e, 0x8e, 0x93, 0x99, 0x9a, 0x95, 0x8d, 0x8c, 0x8b, 0x8b, - 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, 0x95, 0xb6, 0xbc, 0xc0, 0xbf, 0xc1, 0xc6, - 0xc6, 0xc6, 0xc7, 0xc5, 0xc2, 0xc1, 0xbd, 0xbc, 0xaf, 0xa1, 0x91, 0x8f, - 0x8f, 0x90, 0x8f, 0x8f, 0x91, 0x8f, 0x91, 0x8f, 0x90, 0x8e, 0x8e, 0x8d, - 0x8d, 0x8f, 0x8f, 0x8f, 0x8d, 0x8f, 0x91, 0x90, 0x92, 0x91, 0x92, 0x91, - 0x90, 0x90, 0x8d, 0x8c, 0x8c, 0x8c, 0x8c, 0x8b, 0x8d, 0x8d, 0x8d, 0x8c, - 0x8c, 0x8d, 0x8d, 0x8b, 0x8c, 0x8c, 0x8c, 0x8b, 0x8c, 0x8c, 0x8b, 0x8c, - 0x8b, 0x8c, 0x8d, 0x8d, 0x90, 0x92, 0x95, 0x96, 0x99, 0x9b, 0x97, 0x8f, - 0x8d, 0x8d, 0x8d, 0x8e, 0x92, 0x9a, 0x99, 0x95, 0x8c, 0x8d, 0x8b, 0x8d, - 0x8c, 0x8c, 0x8c, 0x8c, 0x8d, 0x94, 0xb6, 0xbc, 0xc1, 0xc2, 0xc1, 0xc2, - 0xc7, 0xc5, 0xc6, 0xc3, 0xbd, 0xbd, 0xba, 0xb7, 0x98, 0x8d, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x90, 0x90, 0x8f, 0x90, 0x8f, - 0x90, 0x8e, 0x8e, 0x8d, 0x8d, 0x8e, 0x90, 0x91, 0x91, 0x91, 0x91, 0x91, - 0x91, 0x91, 0x90, 0x8e, 0x8e, 0x8c, 0x8f, 0x8d, 0x8f, 0x91, 0x95, 0x9c, - 0x8d, 0x8c, 0x8d, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8e, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8e, 0x91, 0x93, 0x95, 0x98, 0x99, 0x99, 0x97, 0x8e, - 0x8d, 0x8d, 0x8f, 0x90, 0x94, 0x9b, 0x99, 0x96, 0x8e, 0x8d, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8e, 0x8d, 0x8d, 0x95, 0xb4, 0xbb, 0xbf, 0xc2, 0xc1, 0xc5, - 0xc5, 0xc4, 0xc3, 0xbf, 0xbb, 0xbc, 0xb7, 0xaa, 0x97, 0x8d, 0x8e, 0x8e, - 0x8e, 0x8c, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x8e, 0x8f, 0x8f, 0x91, 0x93, - 0x91, 0x90, 0x90, 0x90, 0x8e, 0x8f, 0x90, 0x91, 0x93, 0x92, 0x93, 0x95, - 0x94, 0x94, 0x97, 0x97, 0x9a, 0x9c, 0xa5, 0xb8, 0xd0, 0xfa, 0x2d, 0x40, - 0x8d, 0x8e, 0x8c, 0x8d, 0x8c, 0x8c, 0x8b, 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, - 0x8d, 0x8c, 0x8c, 0x8d, 0x91, 0x94, 0x94, 0x97, 0x97, 0x99, 0x96, 0x8f, - 0x8f, 0x90, 0x91, 0x91, 0x97, 0x9c, 0x99, 0x96, 0x8e, 0x8c, 0x8d, 0x8d, - 0x8c, 0x8c, 0x8d, 0x8c, 0x8b, 0x94, 0xb2, 0xba, 0xbe, 0xbd, 0xbd, 0xc0, - 0xc2, 0xc0, 0xbd, 0xbb, 0xb7, 0xb5, 0xb0, 0x9e, 0x90, 0x8c, 0x8d, 0x8f, - 0x90, 0x90, 0x90, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8f, - 0x90, 0x8f, 0x8e, 0x90, 0x90, 0x90, 0x92, 0x92, 0x96, 0x99, 0x9c, 0xa1, - 0xad, 0xc1, 0xd7, 0xef, 0x07, 0x14, 0x27, 0x31, 0x45, 0x46, 0x48, 0x48, - 0x8d, 0x8d, 0x8d, 0x8c, 0x8b, 0x8c, 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, - 0x8d, 0x8c, 0x8d, 0x8d, 0x90, 0x93, 0x93, 0x97, 0x98, 0x99, 0x96, 0x8f, - 0x8f, 0x90, 0x91, 0x93, 0x9b, 0x9c, 0x9a, 0x98, 0x8f, 0x8d, 0x8c, 0x8d, - 0x8c, 0x8c, 0x8c, 0x8d, 0x8d, 0x92, 0xae, 0xb3, 0xb5, 0xb6, 0xb6, 0xb2, - 0xb4, 0xb1, 0xac, 0xa5, 0xa1, 0xa0, 0xa0, 0x9b, 0x8e, 0x8e, 0x8d, 0x8e, - 0x90, 0x90, 0x8e, 0x8f, 0x8e, 0x90, 0x90, 0x8f, 0x8f, 0x90, 0x8e, 0x8f, - 0x91, 0x92, 0x95, 0x98, 0x9f, 0xac, 0xba, 0xd0, 0xe3, 0xf8, 0x0a, 0x13, - 0x21, 0x26, 0x30, 0x42, 0x4a, 0x71, 0x74, 0x60, 0x70, 0x5a, 0x5c, 0x67, - 0x8c, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, - 0x8d, 0x8c, 0x8b, 0x8c, 0x90, 0x93, 0x91, 0x97, 0x98, 0x98, 0x96, 0x8f, - 0x93, 0x93, 0x93, 0x95, 0x9e, 0x9d, 0x9c, 0x99, 0x8f, 0x8e, 0x8b, 0x8b, - 0x8d, 0x8c, 0x8d, 0x8d, 0x8f, 0x8e, 0x98, 0x98, 0x95, 0x97, 0x93, 0x94, - 0x98, 0x99, 0x98, 0x96, 0x96, 0x96, 0x97, 0x94, 0x90, 0x8d, 0x8f, 0x90, - 0x90, 0x90, 0x90, 0x90, 0x8f, 0x92, 0x93, 0x95, 0x98, 0x9e, 0xab, 0xb9, - 0xcd, 0xe0, 0xf8, 0x0a, 0x15, 0x23, 0x29, 0x2e, 0x37, 0x38, 0x3d, 0x3a, - 0x38, 0x38, 0x3e, 0x48, 0x54, 0x68, 0x70, 0x7a, 0x65, 0x65, 0x59, 0x4d, - 0x8c, 0x8c, 0x8d, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8d, 0x90, 0x93, 0x92, 0x97, 0x98, 0x99, 0x97, 0x93, - 0x92, 0x95, 0x95, 0x99, 0xa0, 0x9f, 0x9e, 0x9c, 0x8f, 0x8d, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8e, 0x94, 0x8e, 0x93, 0x95, 0x95, 0x98, 0x99, 0x98, - 0x98, 0x9a, 0x99, 0x99, 0x99, 0x99, 0x96, 0x95, 0x90, 0x91, 0x92, 0x95, - 0x96, 0x99, 0xa0, 0xaf, 0xbc, 0xcd, 0xe0, 0xf4, 0x09, 0x16, 0x20, 0x2b, - 0x37, 0x39, 0x3d, 0x44, 0x40, 0x4a, 0x50, 0x54, 0x54, 0x43, 0x49, 0x47, - 0x3c, 0x3e, 0x42, 0x46, 0x43, 0x46, 0x4f, 0x4b, 0x4d, 0x46, 0x4a, 0x46, - 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, - 0x8e, 0x8e, 0x8d, 0x8e, 0x90, 0x92, 0x91, 0x97, 0x97, 0x96, 0x94, 0x96, - 0x96, 0x98, 0x97, 0x9f, 0xa5, 0xa3, 0xa3, 0xa2, 0x94, 0x9d, 0x8e, 0x8e, - 0x8d, 0x8e, 0x8e, 0x8f, 0x8e, 0x8f, 0x92, 0x96, 0x98, 0x99, 0x9a, 0x9b, - 0x9b, 0x9d, 0x9c, 0x9c, 0x9c, 0x9d, 0xa2, 0xa9, 0xb6, 0xc1, 0xd4, 0xe6, - 0xf8, 0x09, 0x18, 0x20, 0x2f, 0x36, 0x35, 0x3e, 0x43, 0x46, 0x4a, 0x4c, - 0x44, 0x54, 0x54, 0x51, 0x56, 0x4d, 0x56, 0x4b, 0x54, 0x4d, 0x54, 0x53, - 0x4a, 0x56, 0x4e, 0x53, 0x4d, 0x45, 0x43, 0x4e, 0x55, 0x45, 0x47, 0x50, - 0x8f, 0x8f, 0x8c, 0x8f, 0x8e, 0x8d, 0x8e, 0x8d, 0x8d, 0x8e, 0x8c, 0x8d, - 0x8d, 0x8e, 0x8d, 0x8d, 0x8e, 0x8e, 0x91, 0x92, 0x92, 0x91, 0x91, 0x96, - 0x95, 0x96, 0x95, 0xa7, 0xad, 0xab, 0xae, 0xad, 0x94, 0x93, 0x91, 0x93, - 0x93, 0x94, 0x94, 0x97, 0x94, 0x96, 0x9a, 0x9f, 0xa0, 0xa4, 0xaa, 0xb2, - 0xc0, 0xc9, 0xd6, 0xe4, 0xf6, 0x06, 0x14, 0x20, 0x2f, 0x39, 0x3c, 0x41, - 0x42, 0x3a, 0x46, 0x4a, 0x45, 0x41, 0x4d, 0x4b, 0x4c, 0x57, 0x4c, 0x51, - 0x4b, 0x45, 0x40, 0x43, 0x44, 0x44, 0x45, 0x3f, 0x44, 0x55, 0x58, 0x56, - 0x47, 0x4c, 0x47, 0x52, 0x54, 0x4d, 0x4c, 0x59, 0x58, 0x50, 0x5d, 0x49, - 0x8d, 0x8e, 0x8d, 0x8e, 0x8d, 0x8e, 0x8e, 0x8d, 0x8d, 0x8e, 0x8d, 0x8d, - 0x8b, 0x8d, 0x8d, 0x8d, 0x90, 0x93, 0x96, 0x99, 0x99, 0x99, 0x98, 0x97, - 0x97, 0x94, 0x93, 0xb6, 0xc1, 0xbe, 0xc0, 0xb8, 0x9b, 0x9d, 0x9e, 0x9c, - 0x9d, 0x9f, 0xa4, 0xab, 0xb7, 0xc7, 0xd3, 0xe6, 0xf7, 0x09, 0x18, 0x25, - 0x33, 0x32, 0x3f, 0x49, 0x4d, 0x4b, 0x52, 0x51, 0x4f, 0x4d, 0x52, 0x50, - 0x4c, 0x4a, 0x55, 0x4d, 0x50, 0x4a, 0x54, 0x54, 0x51, 0x4c, 0x4d, 0x4b, - 0x4c, 0x4a, 0x46, 0x3f, 0x3e, 0x44, 0x41, 0x46, 0x44, 0x4a, 0x4c, 0x44, - 0x5c, 0x64, 0x5a, 0x50, 0x4f, 0x5e, 0x5a, 0x74, 0x62, 0x5e, 0x4c, 0x5e, - 0x8f, 0x90, 0x8e, 0x8e, 0x8f, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, - 0x8c, 0x8d, 0x8e, 0x8d, 0x90, 0x93, 0x97, 0x99, 0x9a, 0x9a, 0x9b, 0x99, - 0x98, 0x9b, 0x9a, 0xa7, 0xaf, 0xb1, 0xb2, 0xb9, 0xc4, 0xd4, 0xe5, 0xf6, - 0x0d, 0x1a, 0x20, 0x25, 0x31, 0x3d, 0x45, 0x41, 0x50, 0x52, 0x55, 0x50, - 0x51, 0x55, 0x58, 0x4c, 0x50, 0x59, 0x54, 0x4e, 0x4f, 0x52, 0x57, 0x53, - 0x57, 0x56, 0x41, 0x53, 0x5a, 0x54, 0x53, 0x4f, 0x43, 0x4a, 0x45, 0x37, - 0x45, 0x48, 0x4b, 0x4d, 0x45, 0x4c, 0x4d, 0x51, 0x5a, 0x5d, 0x51, 0x53, - 0x66, 0x6f, 0x77, 0x73, 0x7a, 0x7f, 0x74, 0x7c, 0x6c, 0x6e, 0x6f, 0x72, - 0x8f, 0x8f, 0x8e, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8d, 0x8e, 0x8e, 0x8e, - 0x8d, 0x8f, 0x8f, 0x8e, 0x91, 0x97, 0x9c, 0xa0, 0xa4, 0xa9, 0xb2, 0xbb, - 0xc8, 0xd9, 0xe8, 0xf5, 0x0a, 0x1b, 0x2c, 0x3b, 0x41, 0x30, 0x43, 0x48, - 0x4d, 0x4e, 0x66, 0x6a, 0x5e, 0x65, 0x5b, 0x67, 0x62, 0x5a, 0x55, 0x49, - 0x5c, 0x57, 0x51, 0x4f, 0x58, 0x52, 0x57, 0x5b, 0x4e, 0x53, 0x4d, 0x55, - 0x4c, 0x52, 0x49, 0x4d, 0x4b, 0x4f, 0x4f, 0x48, 0x45, 0x44, 0x2d, 0x31, - 0x43, 0x49, 0x4a, 0x4d, 0x56, 0x5a, 0x4e, 0x59, 0x61, 0x5a, 0x5b, 0x69, - 0x5e, 0x60, 0x62, 0x72, 0x7f, 0x7f, 0x75, 0x59, 0x65, 0x4d, 0x63, 0x62, - 0x91, 0x91, 0x92, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x94, 0x97, 0x9a, - 0x9d, 0xa5, 0xb1, 0xbd, 0xce, 0xe3, 0xf0, 0x04, 0x0f, 0x1d, 0x28, 0x33, - 0x3b, 0x46, 0x4e, 0x4b, 0x5d, 0x5b, 0x65, 0x52, 0x53, 0x4b, 0x46, 0x50, - 0x55, 0x70, 0x7b, 0x66, 0x68, 0x6f, 0x6a, 0x60, 0x63, 0x56, 0x65, 0x63, - 0x5c, 0x62, 0x55, 0x58, 0x59, 0x57, 0x5b, 0x5c, 0x55, 0x58, 0x53, 0x52, - 0x4e, 0x54, 0x4e, 0x53, 0x51, 0x4f, 0x4e, 0x50, 0x4a, 0x47, 0x3d, 0x3c, - 0x49, 0x48, 0x4d, 0x50, 0x58, 0x59, 0x5f, 0x61, 0x60, 0x6a, 0x68, 0x7f, - 0x70, 0x5d, 0x66, 0x6f, 0x7a, 0x7f, 0x6f, 0x6c, 0x76, 0x6c, 0x56, 0x6f, - 0x97, 0x99, 0x9f, 0xa2, 0xaa, 0xb3, 0xc1, 0xd0, 0xe2, 0xf5, 0x0a, 0x14, - 0x1b, 0x25, 0x2e, 0x2f, 0x30, 0x37, 0x3a, 0x3f, 0x43, 0x48, 0x48, 0x55, - 0x4f, 0x5d, 0x64, 0x57, 0x66, 0x77, 0x76, 0x6f, 0x4e, 0x4f, 0x46, 0x52, - 0x6a, 0x64, 0x6e, 0x73, 0x6e, 0x6f, 0x70, 0x56, 0x5c, 0x64, 0x5e, 0x57, - 0x5e, 0x5c, 0x4f, 0x57, 0x52, 0x57, 0x59, 0x47, 0x57, 0x58, 0x52, 0x4d, - 0x50, 0x4d, 0x4f, 0x50, 0x55, 0x4e, 0x54, 0x53, 0x43, 0x4f, 0x4e, 0x4e, - 0x4b, 0x45, 0x53, 0x57, 0x5c, 0x56, 0x52, 0x63, 0x71, 0x73, 0x67, 0x60, - 0x71, 0x76, 0x61, 0x5d, 0x67, 0x61, 0x5a, 0x4e, 0x42, 0x63, 0x6e, 0x63, - 0x02, 0x13, 0x20, 0x1e, 0x1e, 0x25, 0x2c, 0x2c, 0x2f, 0x30, 0x33, 0x39, - 0x3c, 0x3b, 0x3b, 0x3d, 0x42, 0x42, 0x45, 0x46, 0x52, 0x46, 0x5c, 0x5c, - 0x62, 0x60, 0x6c, 0x54, 0x7f, 0x6a, 0x7f, 0x6a, 0x4d, 0x4c, 0x4d, 0x51, - 0x5c, 0x5e, 0x7f, 0x6f, 0x6d, 0x72, 0x63, 0x5e, 0x64, 0x60, 0x5d, 0x50, - 0x62, 0x59, 0x5f, 0x5a, 0x62, 0x5b, 0x5c, 0x55, 0x54, 0x54, 0x57, 0x51, - 0x53, 0x52, 0x51, 0x53, 0x4b, 0x50, 0x54, 0x48, 0x4e, 0x4e, 0x47, 0x55, - 0x4b, 0x51, 0x58, 0x58, 0x52, 0x55, 0x56, 0x5e, 0x5d, 0x77, 0x64, 0x5d, - 0x6c, 0x5f, 0x72, 0x62, 0x68, 0x65, 0x16, 0x17, 0x18, 0x18, 0x47, 0x3a, - 0x2c, 0x2b, 0x29, 0x29, 0x2e, 0x32, 0x33, 0x35, 0x37, 0x36, 0x3d, 0x3e, - 0x39, 0x3f, 0x42, 0x42, 0x49, 0x47, 0x4b, 0x48, 0x49, 0x52, 0x56, 0x5a, - 0x62, 0x63, 0x71, 0x65, 0x58, 0x7f, 0x78, 0x64, 0x4e, 0x49, 0x42, 0x4e, - 0x54, 0x6e, 0x7f, 0x5e, 0x71, 0x63, 0x6f, 0x69, 0x57, 0x5b, 0x63, 0x5d, - 0x55, 0x5e, 0x53, 0x5a, 0x56, 0x57, 0x58, 0x56, 0x52, 0x4e, 0x43, 0x4c, - 0x52, 0x4e, 0x4b, 0x52, 0x50, 0x52, 0x4a, 0x52, 0x4a, 0x56, 0x46, 0x52, - 0x4a, 0x4e, 0x4d, 0x4c, 0x55, 0x55, 0x48, 0x3d, 0x38, 0x36, 0x3b, 0x41, - 0x3d, 0x56, 0x60, 0x67, 0x70, 0x74, 0x64, 0x3a, 0x11, 0x11, 0x0a, 0x1b, - 0x2b, 0x2b, 0x2c, 0x2b, 0x30, 0x2e, 0x2b, 0x34, 0x34, 0x3b, 0x3b, 0x43, - 0x42, 0x48, 0x3e, 0x45, 0x48, 0x43, 0x4d, 0x52, 0x4f, 0x4e, 0x5b, 0x5b, - 0x59, 0x5a, 0x73, 0x5d, 0x71, 0x65, 0x6c, 0x59, 0x4d, 0x4b, 0x48, 0x49, - 0x52, 0x73, 0x7f, 0x62, 0x68, 0x66, 0x64, 0x59, 0x69, 0x5a, 0x59, 0x5c, - 0x54, 0x5b, 0x57, 0x56, 0x5a, 0x4f, 0x55, 0x4e, 0x52, 0x4c, 0x50, 0x4c, - 0x4c, 0x52, 0x4f, 0x4c, 0x4d, 0x54, 0x4c, 0x4e, 0x45, 0x4a, 0x48, 0x42, - 0x4b, 0x45, 0x2e, 0x19, 0x02, 0xe6, 0xd8, 0xce, 0xca, 0xcc, 0xd3, 0xd6, - 0xda, 0xde, 0xee, 0x0c, 0x3a, 0x5f, 0x62, 0x64, 0x59, 0x29, 0x01, 0xf1, - 0x28, 0x2c, 0x27, 0x2e, 0x2f, 0x33, 0x30, 0x34, 0x35, 0x38, 0x37, 0x3b, - 0x41, 0x44, 0x46, 0x46, 0x49, 0x4a, 0x52, 0x4f, 0x4c, 0x57, 0x5c, 0x5e, - 0x58, 0x4f, 0x67, 0x70, 0x79, 0x68, 0x7f, 0x74, 0x4d, 0x4a, 0x48, 0x4a, - 0x50, 0x5e, 0x79, 0x6f, 0x5f, 0x65, 0x5b, 0x72, 0x53, 0x65, 0x67, 0x58, - 0x57, 0x58, 0x5b, 0x5c, 0x53, 0x58, 0x51, 0x4f, 0x54, 0x4d, 0x56, 0x51, - 0x51, 0x52, 0x52, 0x4d, 0x4d, 0x46, 0x43, 0x39, 0x31, 0x1c, 0x1b, 0x42, - 0x4b, 0x48, 0x0a, 0xc2, 0xb3, 0xb0, 0xb3, 0xb1, 0xc8, 0xe3, 0xf9, 0xfa, - 0xf5, 0xe7, 0xde, 0xd8, 0xd9, 0xf5, 0x1a, 0x4d, 0x6f, 0x54, 0x54, 0x11, - 0x26, 0x2c, 0x2c, 0x29, 0x30, 0x31, 0x33, 0x33, 0x36, 0x39, 0x3d, 0x3d, - 0x3c, 0x3e, 0x3f, 0x45, 0x46, 0x4a, 0x4c, 0x51, 0x56, 0x51, 0x5c, 0x67, - 0x6c, 0x69, 0x74, 0x5e, 0x65, 0x6c, 0x72, 0x6a, 0x47, 0x46, 0x42, 0x48, - 0x47, 0x5e, 0x72, 0x76, 0x63, 0x6d, 0x6e, 0x68, 0x67, 0x60, 0x5f, 0x5c, - 0x5a, 0x5e, 0x5e, 0x4c, 0x55, 0x50, 0x4f, 0x59, 0x53, 0x4c, 0x57, 0x51, - 0x55, 0x4c, 0x3a, 0x2c, 0x1d, 0x02, 0xeb, 0xd6, 0xc6, 0xba, 0xb8, 0xf2, - 0x3f, 0x47, 0x3f, 0x0e, 0xbc, 0xae, 0xad, 0xb1, 0xcf, 0x1f, 0x45, 0x4e, - 0x53, 0x44, 0x3b, 0x1a, 0xee, 0xe7, 0xef, 0x01, 0x1c, 0x5d, 0x5d, 0x5a, - 0x24, 0x28, 0x28, 0x2d, 0x2e, 0x34, 0x32, 0x35, 0x3a, 0x35, 0x39, 0x41, - 0x3a, 0x3f, 0x42, 0x45, 0x3d, 0x40, 0x4a, 0x4c, 0x56, 0x4d, 0x5b, 0x53, - 0x5c, 0x5d, 0x5d, 0x67, 0x55, 0x67, 0x67, 0x70, 0x4d, 0x4c, 0x47, 0x41, - 0x46, 0x4f, 0x62, 0x64, 0x5e, 0x72, 0x67, 0x64, 0x6a, 0x5b, 0x59, 0x59, - 0x53, 0x57, 0x5c, 0x58, 0x4f, 0x51, 0x52, 0x50, 0x53, 0x55, 0x4e, 0x50, - 0x4a, 0x1b, 0xdb, 0xc6, 0xba, 0xaf, 0xa5, 0xa5, 0xad, 0xb5, 0xc2, 0xd9, - 0x20, 0x43, 0x48, 0x41, 0x19, 0xc8, 0xad, 0xab, 0xad, 0xcb, 0x16, 0x46, - 0x57, 0x51, 0x59, 0x4c, 0x31, 0xe1, 0xd7, 0xdd, 0xfa, 0x17, 0x5b, 0x65, - 0x28, 0x25, 0x29, 0x28, 0x2e, 0x30, 0x30, 0x30, 0x38, 0x38, 0x3a, 0x3b, - 0x3d, 0x3b, 0x41, 0x45, 0x41, 0x48, 0x42, 0x4b, 0x50, 0x5a, 0x55, 0x5f, - 0x5d, 0x69, 0x60, 0x51, 0x63, 0x7f, 0x72, 0x59, 0x4a, 0x47, 0x42, 0x45, - 0x45, 0x3f, 0x58, 0x6e, 0x69, 0x6d, 0x50, 0x64, 0x5b, 0x5e, 0x60, 0x4f, - 0x53, 0x56, 0x55, 0x5b, 0x55, 0x56, 0x51, 0x52, 0x56, 0x54, 0x53, 0x51, - 0x4b, 0x35, 0xd2, 0xa6, 0xa1, 0xa0, 0xa0, 0xaf, 0xf1, 0x13, 0x24, 0x30, - 0x33, 0x34, 0x44, 0x42, 0x45, 0x26, 0xd1, 0xad, 0xaa, 0xad, 0xbd, 0xf1, - 0x40, 0x4e, 0x53, 0x4e, 0x51, 0x1e, 0xcd, 0xc8, 0xda, 0xf1, 0x1d, 0x6c, - 0x21, 0x25, 0x27, 0x2c, 0x2c, 0x2e, 0x33, 0x30, 0x31, 0x35, 0x3c, 0x3c, - 0x3e, 0x3d, 0x3f, 0x46, 0x41, 0x44, 0x45, 0x4b, 0x53, 0x4c, 0x58, 0x60, - 0x54, 0x5b, 0x5f, 0x6e, 0x54, 0x5e, 0x69, 0x56, 0x3e, 0x46, 0x3e, 0x49, - 0x4b, 0x58, 0x66, 0x71, 0x58, 0x6a, 0x6a, 0x72, 0x62, 0x63, 0x51, 0x5f, - 0x5d, 0x5f, 0x5c, 0x5a, 0x54, 0x50, 0x52, 0x53, 0x4c, 0x52, 0x50, 0x54, - 0x51, 0x49, 0x25, 0xc6, 0xa2, 0xa0, 0x9e, 0xa4, 0xdf, 0x18, 0x11, 0xfe, - 0xe7, 0xd4, 0xf2, 0x30, 0x45, 0x45, 0x2c, 0xe2, 0xaf, 0xaa, 0xad, 0xb5, - 0xd6, 0x28, 0x46, 0x4a, 0x48, 0x28, 0xde, 0xcc, 0xce, 0xd9, 0xf9, 0x5a, - 0x27, 0x25, 0x25, 0x2e, 0x29, 0x2e, 0x2e, 0x31, 0x31, 0x36, 0x38, 0x3b, - 0x38, 0x3f, 0x3e, 0x44, 0x3e, 0x46, 0x48, 0x46, 0x49, 0x4a, 0x4d, 0x54, - 0x56, 0x5c, 0x60, 0x6c, 0x74, 0x64, 0x66, 0x5c, 0x4f, 0x42, 0x47, 0x48, - 0x54, 0x5f, 0x69, 0x6a, 0x6d, 0x6d, 0x70, 0x68, 0x67, 0x61, 0x64, 0x67, - 0x5d, 0x5f, 0x52, 0x4f, 0x4f, 0x57, 0x5a, 0x55, 0x54, 0x51, 0x50, 0x4d, - 0x51, 0x4d, 0x48, 0x19, 0xbb, 0xa2, 0x9f, 0xa1, 0xa6, 0xb9, 0xb0, 0xab, - 0xab, 0xa9, 0xb1, 0xe4, 0x38, 0x3e, 0x44, 0x33, 0xfa, 0xb4, 0xab, 0xac, - 0xb5, 0xc2, 0x01, 0x00, 0xe7, 0xca, 0xcf, 0xd1, 0xd1, 0xd7, 0x05, 0x52, - 0x22, 0x20, 0x25, 0x28, 0x28, 0x2c, 0x2c, 0x30, 0x31, 0x33, 0x32, 0x35, - 0x3e, 0x3a, 0x3d, 0x3b, 0x3d, 0x40, 0x3c, 0x4b, 0x49, 0x50, 0x4f, 0x56, - 0x54, 0x5b, 0x57, 0x67, 0x69, 0x63, 0x6c, 0x5b, 0x54, 0x46, 0x3e, 0x4b, - 0x4b, 0x6a, 0x75, 0x51, 0x60, 0x69, 0x60, 0x65, 0x61, 0x56, 0x60, 0x5d, - 0x5d, 0x5f, 0x53, 0x51, 0x57, 0x57, 0x5a, 0x4a, 0x57, 0x53, 0x4f, 0x55, - 0x4d, 0x4c, 0x48, 0x3e, 0x0a, 0xb4, 0xa0, 0x9e, 0xa0, 0x9d, 0xa2, 0xa7, - 0xb2, 0xc6, 0xe5, 0xfd, 0x2b, 0x40, 0x45, 0x42, 0x3c, 0x0e, 0xbd, 0xad, - 0xac, 0xb0, 0xb4, 0xbb, 0xb9, 0xb8, 0xbc, 0xc4, 0xd6, 0xf8, 0x38, 0x51}; -unsigned int _tmp_dump_no_person_3_bmp_len = 9216; diff --git a/pico_sdk_import.cmake b/pico_sdk_import.cmake index f63ee3f..a0721d0 100644 --- a/pico_sdk_import.cmake +++ b/pico_sdk_import.cmake @@ -3,8 +3,6 @@ # This can be dropped into an external project to help locate this SDK # It should be include()ed prior to project() -# todo document - if (DEFINED ENV{PICO_SDK_PATH} AND (NOT PICO_SDK_PATH)) set(PICO_SDK_PATH $ENV{PICO_SDK_PATH}) message("Using PICO_SDK_PATH from environment ('${PICO_SDK_PATH}')") @@ -20,9 +18,20 @@ if (DEFINED ENV{PICO_SDK_FETCH_FROM_GIT_PATH} AND (NOT PICO_SDK_FETCH_FROM_GIT_P message("Using PICO_SDK_FETCH_FROM_GIT_PATH from environment ('${PICO_SDK_FETCH_FROM_GIT_PATH}')") endif () -set(PICO_SDK_PATH "${PICO_SDK_PATH}" CACHE PATH "Path to the PICO SDK") -set(PICO_SDK_FETCH_FROM_GIT "${PICO_SDK_FETCH_FROM_GIT}" CACHE BOOL "Set to ON to fetch copy of PICO SDK from git if not otherwise locatable") +if (DEFINED ENV{PICO_SDK_FETCH_FROM_GIT_TAG} AND (NOT PICO_SDK_FETCH_FROM_GIT_TAG)) + set(PICO_SDK_FETCH_FROM_GIT_TAG $ENV{PICO_SDK_FETCH_FROM_GIT_TAG}) + message("Using PICO_SDK_FETCH_FROM_GIT_TAG from environment ('${PICO_SDK_FETCH_FROM_GIT_TAG}')") +endif () + +if (PICO_SDK_FETCH_FROM_GIT AND NOT PICO_SDK_FETCH_FROM_GIT_TAG) + set(PICO_SDK_FETCH_FROM_GIT_TAG "master") + message("Using master as default value for PICO_SDK_FETCH_FROM_GIT_TAG") +endif() + +set(PICO_SDK_PATH "${PICO_SDK_PATH}" CACHE PATH "Path to the Raspberry Pi Pico SDK") +set(PICO_SDK_FETCH_FROM_GIT "${PICO_SDK_FETCH_FROM_GIT}" CACHE BOOL "Set to ON to fetch copy of SDK from git if not otherwise locatable") set(PICO_SDK_FETCH_FROM_GIT_PATH "${PICO_SDK_FETCH_FROM_GIT_PATH}" CACHE FILEPATH "location to download SDK") +set(PICO_SDK_FETCH_FROM_GIT_TAG "${PICO_SDK_FETCH_FROM_GIT_TAG}" CACHE FILEPATH "release tag for SDK") if (NOT PICO_SDK_PATH) if (PICO_SDK_FETCH_FROM_GIT) @@ -31,20 +40,31 @@ if (NOT PICO_SDK_PATH) if (PICO_SDK_FETCH_FROM_GIT_PATH) get_filename_component(FETCHCONTENT_BASE_DIR "${PICO_SDK_FETCH_FROM_GIT_PATH}" REALPATH BASE_DIR "${CMAKE_SOURCE_DIR}") endif () - FetchContent_Declare( - pico_sdk - GIT_REPOSITORY https://github.com/raspberrypi/pico-sdk - GIT_TAG master - ) + # GIT_SUBMODULES_RECURSE was added in 3.17 + if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.17.0") + FetchContent_Declare( + pico_sdk + GIT_REPOSITORY https://github.com/raspberrypi/pico-sdk + GIT_TAG ${PICO_SDK_FETCH_FROM_GIT_TAG} + GIT_SUBMODULES_RECURSE FALSE + ) + else () + FetchContent_Declare( + pico_sdk + GIT_REPOSITORY https://github.com/raspberrypi/pico-sdk + GIT_TAG ${PICO_SDK_FETCH_FROM_GIT_TAG} + ) + endif () + if (NOT pico_sdk) - message("Downloading PICO SDK") + message("Downloading Raspberry Pi Pico SDK") FetchContent_Populate(pico_sdk) set(PICO_SDK_PATH ${pico_sdk_SOURCE_DIR}) endif () set(FETCHCONTENT_BASE_DIR ${FETCHCONTENT_BASE_DIR_SAVE}) else () message(FATAL_ERROR - "PICO SDK location was not specified. Please set PICO_SDK_PATH or set PICO_SDK_FETCH_FROM_GIT to on to fetch from git." + "SDK location was not specified. Please set PICO_SDK_PATH or set PICO_SDK_FETCH_FROM_GIT to on to fetch from git." ) endif () endif () @@ -56,9 +76,9 @@ endif () set(PICO_SDK_INIT_CMAKE_FILE ${PICO_SDK_PATH}/pico_sdk_init.cmake) if (NOT EXISTS ${PICO_SDK_INIT_CMAKE_FILE}) - message(FATAL_ERROR "Directory '${PICO_SDK_PATH}' does not appear to contain the PICO SDK") + message(FATAL_ERROR "Directory '${PICO_SDK_PATH}' does not appear to contain the Raspberry Pi Pico SDK") endif () -set(PICO_SDK_PATH ${PICO_SDK_PATH} CACHE PATH "Path to the PICO SDK" FORCE) +set(PICO_SDK_PATH ${PICO_SDK_PATH} CACHE PATH "Path to the Raspberry Pi Pico SDK" FORCE) include(${PICO_SDK_INIT_CMAKE_FILE}) diff --git a/src/signal/micro/kernels/delay.cpp b/src/signal/micro/kernels/delay.cpp new file mode 100644 index 0000000..33ef35e --- /dev/null +++ b/src/signal/micro/kernels/delay.cpp @@ -0,0 +1,154 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "signal/src/circular_buffer.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +constexpr int kDelayLengthIndex = 0; // 'delay_length' + +struct TFLMSignalFrontendDelayParams { + int32_t frame_size; + int32_t delay_length; + int32_t outer_dims; + + int8_t** state_buffers; + tflm_signal::CircularBuffer** circular_buffers; +}; + +void* DelayInit(TfLiteContext* context, const char* buffer, size_t length) { + auto* params = static_cast( + context->AllocatePersistentBuffer(context, + sizeof(TFLMSignalFrontendDelayParams))); + + if (params == nullptr) { + return nullptr; + } + + FlexbufferWrapper fbw(reinterpret_cast(buffer), length); + params->delay_length = fbw.ElementAsInt32(kDelayLengthIndex); + return params; +} + +TfLiteStatus DelayPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt16); + + auto* params = + reinterpret_cast(node->user_data); + + TF_LITE_ENSURE(context, params != nullptr); + + RuntimeShape input_shape = GetTensorShape(input); + int innermost_dim = input_shape.Dims(input_shape.DimensionsCount() - 1); + params->outer_dims = input_shape.FlatSize() / innermost_dim; + params->frame_size = innermost_dim; + + params->state_buffers = + static_cast(context->AllocatePersistentBuffer( + context, params->outer_dims * sizeof(int8_t*))); + params->circular_buffers = static_cast( + context->AllocatePersistentBuffer( + context, params->outer_dims * sizeof(tflm_signal::CircularBuffer*))); + + for (int i = 0; i < params->outer_dims; i++) { + size_t capacity = params->frame_size + params->delay_length; + + size_t state_size = tflm_signal::CircularBufferGetNeededMemory(capacity); + params->state_buffers[i] = + static_cast(context->AllocatePersistentBuffer( + context, state_size * sizeof(int8_t))); + params->circular_buffers[i] = tflm_signal::CircularBufferInit( + capacity, params->state_buffers[i], state_size); + tflm_signal::CircularBufferWriteZeros(params->circular_buffers[i], + params->delay_length); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus DelayEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->user_data); + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + const int16_t* input_data = micro::GetTensorData(input); + int16_t* output_data = micro::GetTensorData(output); + + for (int dim_index = 0, sample_index = 0; dim_index < params->outer_dims; + dim_index++, sample_index += params->frame_size) { + tflm_signal::CircularBufferWrite(params->circular_buffers[dim_index], + &input_data[sample_index], + params->frame_size); + tflm_signal::CircularBufferGet(params->circular_buffers[dim_index], + params->frame_size, + &output_data[sample_index]); + tflm_signal::CircularBufferDiscard(params->circular_buffers[dim_index], + params->frame_size); + } + return kTfLiteOk; +} + +void DelayReset(TfLiteContext* context, void* buffer) { + auto* params = static_cast(buffer); + for (int i = 0; i < params->outer_dims; ++i) { + tflm_signal::CircularBufferReset(params->circular_buffers[i]); + tflm_signal::CircularBufferWriteZeros(params->circular_buffers[i], + params->delay_length); + } +} + +} // namespace + +namespace tflm_signal { +TFLMRegistration* Register_DELAY() { + static TFLMRegistration r = micro::RegisterOp(DelayInit, DelayPrepare, + DelayEval, nullptr, DelayReset); + return &r; +} +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/delay_flexbuffers_generated_data.h b/src/signal/micro/kernels/delay_flexbuffers_generated_data.h new file mode 100644 index 0000000..c79273e --- /dev/null +++ b/src/signal/micro/kernels/delay_flexbuffers_generated_data.h @@ -0,0 +1,25 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_DELAY_FLEXBUFFERS_GENERATED_DATA_H_ +#define SIGNAL_MICRO_KERNELS_DELAY_FLEXBUFFERS_GENERATED_DATA_H_ + +extern const int g_gen_data_size_3_delay; +extern const unsigned char g_gen_data_3_delay[]; + +extern const int g_gen_data_size_5_delay; +extern const unsigned char g_gen_data_5_delay[]; + +#endif // SIGNAL_MICRO_KERNELS_DELAY_FLEXBUFFERS_GENERATED_DATA_H_ diff --git a/src/signal/micro/kernels/energy.cpp b/src/signal/micro/kernels/energy.cpp new file mode 100644 index 0000000..6a86366 --- /dev/null +++ b/src/signal/micro/kernels/energy.cpp @@ -0,0 +1,113 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/energy.h" + +#include +#include +#include + +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_context.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +constexpr int kEndIndexIndex = 0; // 'end_index' +constexpr int kStartIndexIndex = 1; // 'start_index' + +struct TFLMSignalEnergyParams { + int32_t end_index; + int32_t start_index; +}; + +void* EnergyInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + + auto* data = + static_cast(context->AllocatePersistentBuffer( + context, sizeof(TFLMSignalEnergyParams))); + + if (data == nullptr) { + return nullptr; + } + + tflite::FlexbufferWrapper fbw(reinterpret_cast(buffer), + length); + data->end_index = fbw.ElementAsInt32(kEndIndexIndex); + data->start_index = fbw.ElementAsInt32(kStartIndexIndex); + return data; +} + +TfLiteStatus EnergyPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt32); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus EnergyEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + const Complex* input_data = + tflite::micro::GetTensorData>(input); + uint32_t* output_data = tflite::micro::GetTensorData(output); + + tflm_signal::SpectrumToEnergy(input_data, params->start_index, + params->end_index, output_data); + return kTfLiteOk; +} + +} // namespace + +namespace tflm_signal { +TFLMRegistration* Register_ENERGY() { + static TFLMRegistration r = + tflite::micro::RegisterOp(EnergyInit, EnergyPrepare, EnergyEval); + return &r; +} +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/energy_flexbuffers_generated_data.h b/src/signal/micro/kernels/energy_flexbuffers_generated_data.h new file mode 100644 index 0000000..f2840f6 --- /dev/null +++ b/src/signal/micro/kernels/energy_flexbuffers_generated_data.h @@ -0,0 +1,28 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_ENERGY_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_ENERGY_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_start_index_2_end_index_4; +extern const unsigned char g_gen_data_start_index_2_end_index_4[]; + +extern const int g_gen_data_size_start_index_0_end_index_4; +extern const unsigned char g_gen_data_start_index_0_end_index_4[]; + +extern const int g_gen_data_size_start_index_4_end_index_8; +extern const unsigned char g_gen_data_start_index_4_end_index_8[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_ENERGY_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/micro/kernels/fft_auto_scale_common.cpp b/src/signal/micro/kernels/fft_auto_scale_common.cpp new file mode 100644 index 0000000..8703ac6 --- /dev/null +++ b/src/signal/micro/kernels/fft_auto_scale_common.cpp @@ -0,0 +1,54 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "signal/micro/kernels/fft_auto_scale_kernel.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; +constexpr int kScaleBitTensor = 1; + +TfLiteStatus FftAutoScalePrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* scale_bit = + micro_context->AllocateTempOutputTensor(node, kScaleBitTensor); + TF_LITE_ENSURE(context, scale_bit != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(scale_bit), 0); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, scale_bit->type, kTfLiteInt32); + + micro_context->DeallocateTempTfLiteTensor(scale_bit); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/signal/micro/kernels/fft_auto_scale_kernel.cpp b/src/signal/micro/kernels/fft_auto_scale_kernel.cpp new file mode 100644 index 0000000..4946fb3 --- /dev/null +++ b/src/signal/micro/kernels/fft_auto_scale_kernel.cpp @@ -0,0 +1,64 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/micro/kernels/fft_auto_scale_kernel.h" + +#include +#include +#include + +#include "signal/src/fft_auto_scale.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_context.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; +constexpr int kScaleBitTensor = 1; + +TfLiteStatus FftAutoScaleEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TfLiteEvalTensor* scale_bit = + tflite::micro::GetEvalOutput(context, node, kScaleBitTensor); + + const int16_t* input_data = tflite::micro::GetTensorData(input); + int16_t* output_data = tflite::micro::GetTensorData(output); + int32_t* scale_bit_data = tflite::micro::GetTensorData(scale_bit); + + *scale_bit_data = + tflm_signal::FftAutoScale(input_data, output->dims->data[0], output_data); + return kTfLiteOk; +} + +} // namespace + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +TFLMRegistration* Register_FFT_AUTO_SCALE() { + static TFLMRegistration r = + tflite::micro::RegisterOp(nullptr, FftAutoScalePrepare, FftAutoScaleEval); + return &r; +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/micro/kernels/fft_auto_scale_kernel.h b/src/signal/micro/kernels/fft_auto_scale_kernel.h new file mode 100644 index 0000000..9461c90 --- /dev/null +++ b/src/signal/micro/kernels/fft_auto_scale_kernel.h @@ -0,0 +1,26 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef SIGNAL_MICRO_KERNELS_FFT_AUTO_SCALE_KERNEL_H_ +#define SIGNAL_MICRO_KERNELS_FFT_AUTO_SCALE_KERNEL_H_ + +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +TfLiteStatus FftAutoScalePrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // SIGNAL_MICRO_KERNELS_FFT_AUTO_SCALE_KERNEL_H_ diff --git a/src/signal/micro/kernels/fft_flexbuffers_generated_data.h b/src/signal/micro/kernels/fft_flexbuffers_generated_data.h new file mode 100644 index 0000000..9471b83 --- /dev/null +++ b/src/signal/micro/kernels/fft_flexbuffers_generated_data.h @@ -0,0 +1,37 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FFT_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FFT_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_fft_length_64_float; +extern const unsigned char g_gen_data_fft_length_64_float[]; + +extern const int g_gen_data_size_fft_length_64_int16; +extern const unsigned char g_gen_data_fft_length_64_int16[]; + +extern const int g_gen_data_size_fft_length_64_int32; +extern const unsigned char g_gen_data_fft_length_64_int32[]; + +extern const int g_gen_data_size_fft_length_512_float; +extern const unsigned char g_gen_data_fft_length_512_float[]; + +extern const int g_gen_data_size_fft_length_512_int16; +extern const unsigned char g_gen_data_fft_length_512_int16[]; + +extern const int g_gen_data_size_fft_length_512_int32; +extern const unsigned char g_gen_data_fft_length_512_int32[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FFT_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/micro/kernels/filter_bank.cpp b/src/signal/micro/kernels/filter_bank.cpp new file mode 100644 index 0000000..1cf08d2 --- /dev/null +++ b/src/signal/micro/kernels/filter_bank.cpp @@ -0,0 +1,177 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/filter_bank.h" + +#include + +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kWeightTensor = 1; +constexpr int kUnweightTensor = 2; +constexpr int kChFreqStartsTensor = 3; +constexpr int kChWeightStartsTensor = 4; +constexpr int kChannelWidthsTensor = 5; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +constexpr int kNumChannelsIndex = 0; // 'num_channels' + +struct TFLMSignalFilterBankParams { + tflm_signal::FilterbankConfig config; + uint64_t* work_area; +}; + +void* FilterBankInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + + auto* params = static_cast( + context->AllocatePersistentBuffer(context, + sizeof(TFLMSignalFilterBankParams))); + if (params == nullptr) { + return nullptr; + } + + tflite::FlexbufferWrapper fbw(reinterpret_cast(buffer), + length); + params->config.num_channels = fbw.ElementAsInt32(kNumChannelsIndex); + + params->work_area = static_cast(context->AllocatePersistentBuffer( + context, (params->config.num_channels + 1) * sizeof(uint64_t))); + + if (params->work_area == nullptr) { + return nullptr; + } + + return params; +} + +TfLiteStatus FilterBankPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 6); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt32); + micro_context->DeallocateTempTfLiteTensor(input); + + input = micro_context->AllocateTempInputTensor(node, kWeightTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + micro_context->DeallocateTempTfLiteTensor(input); + + input = micro_context->AllocateTempInputTensor(node, kUnweightTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + micro_context->DeallocateTempTfLiteTensor(input); + + input = micro_context->AllocateTempInputTensor(node, kChFreqStartsTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + micro_context->DeallocateTempTfLiteTensor(input); + + input = micro_context->AllocateTempInputTensor(node, kChWeightStartsTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + micro_context->DeallocateTempTfLiteTensor(input); + + input = micro_context->AllocateTempInputTensor(node, kChannelWidthsTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + micro_context->DeallocateTempTfLiteTensor(input); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt64); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus FilterBankEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input0 = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kWeightTensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kUnweightTensor); + const TfLiteEvalTensor* input3 = + tflite::micro::GetEvalInput(context, node, kChFreqStartsTensor); + const TfLiteEvalTensor* input4 = + tflite::micro::GetEvalInput(context, node, kChWeightStartsTensor); + const TfLiteEvalTensor* input5 = + tflite::micro::GetEvalInput(context, node, kChannelWidthsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + params->config.weights = tflite::micro::GetTensorData(input1); + params->config.unweights = tflite::micro::GetTensorData(input2); + params->config.channel_frequency_starts = + tflite::micro::GetTensorData(input3); + params->config.channel_weight_starts = + tflite::micro::GetTensorData(input4); + params->config.channel_widths = tflite::micro::GetTensorData(input5); + + const uint32_t* input_data = tflite::micro::GetTensorData(input0); + uint64_t* output_data = tflite::micro::GetTensorData(output); + tflm_signal::FilterbankAccumulateChannels(¶ms->config, input_data, + params->work_area); + + size_t output_size; + TfLiteTypeSizeOf(output->type, &output_size); + output_size *= ElementCount(*output->dims); + // Discard channel 0, which is just scratch + memcpy(output_data, params->work_area + 1, output_size); + return kTfLiteOk; +} + +} // namespace + +namespace tflm_signal { + +TFLMRegistration* Register_FILTER_BANK() { + static TFLMRegistration r = tflite::micro::RegisterOp( + FilterBankInit, FilterBankPrepare, FilterBankEval); + return &r; +} + +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h b/src/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h new file mode 100644 index 0000000..59e74e7 --- /dev/null +++ b/src/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h @@ -0,0 +1,25 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_filter_bank_32_channel; +extern const unsigned char g_gen_data_filter_bank_32_channel[]; + +extern const int g_gen_data_size_filter_bank_16_channel; +extern const unsigned char g_gen_data_filter_bank_16_channel[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/micro/kernels/filter_bank_log.cpp b/src/signal/micro/kernels/filter_bank_log.cpp new file mode 100644 index 0000000..3d38e61 --- /dev/null +++ b/src/signal/micro/kernels/filter_bank_log.cpp @@ -0,0 +1,114 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/filter_bank_log.h" + +#include + +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +constexpr int kInputCorrectionBitsIndex = 0; // 'input_correction_bits' +constexpr int kOutputScaleIndex = 1; // 'output_scale' + +struct TFLMSignalLogParams { + int input_correction_bits; + int output_scale; +}; + +void* FilterBankLogInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + + auto* params = static_cast( + context->AllocatePersistentBuffer(context, sizeof(TFLMSignalLogParams))); + + if (params == nullptr) { + return nullptr; + } + tflite::FlexbufferWrapper fbw(reinterpret_cast(buffer), + length); + + params->input_correction_bits = fbw.ElementAsInt32(kInputCorrectionBitsIndex); + params->output_scale = fbw.ElementAsInt32(kOutputScaleIndex); + return params; +} + +TfLiteStatus FilterBankLogPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt32); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt16); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus FilterBankLogEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + const uint32_t* input_data = tflite::micro::GetTensorData(input); + int16_t* output_data = tflite::micro::GetTensorData(output); + int num_channels = input->dims->data[0]; + tflm_signal::FilterbankLog(input_data, num_channels, params->output_scale, + params->input_correction_bits, output_data); + return kTfLiteOk; +} + +} // namespace + +namespace tflm_signal { + +TFLMRegistration* Register_FILTER_BANK_LOG() { + static TFLMRegistration r = tflite::micro::RegisterOp( + FilterBankLogInit, FilterBankLogPrepare, FilterBankLogEval); + return &r; +} + +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h b/src/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h new file mode 100644 index 0000000..dbc3bd9 --- /dev/null +++ b/src/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h @@ -0,0 +1,27 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_LOG_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_LOG_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_filter_bank_log_scale_1600_correction_bits_3; +extern const unsigned char + g_gen_data_filter_bank_log_scale_1600_correction_bits_3[]; + +extern const int g_gen_data_size_filter_bank_log_scale_32768_correction_bits_5; +extern const unsigned char + g_gen_data_filter_bank_log_scale_32768_correction_bits_5[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_LOG_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/micro/kernels/filter_bank_spectral_subtraction.cpp b/src/signal/micro/kernels/filter_bank_spectral_subtraction.cpp new file mode 100644 index 0000000..e069323 --- /dev/null +++ b/src/signal/micro/kernels/filter_bank_spectral_subtraction.cpp @@ -0,0 +1,184 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/filter_bank_spectral_subtraction.h" + +#include + +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; +constexpr int kNoiseEstimateTensor = 1; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +// 'alternate_one_minus_smoothing' +constexpr int kAlternateOneMinusSmoothingIndex = 0; +constexpr int kAlternateSmoothingIndex = 1; // 'alternate_smoothing' +constexpr int kClampingIndex = 2; // 'clamping' +constexpr int kMinSignalRemainingIndex = 3; // 'min_signal_remaining' +constexpr int kNumChannelsIndex = 4; // 'num_channels' +constexpr int kOneMinusSmoothingIndex = 5; // 'one_minus_smoothing' +constexpr int kSmoothingIndex = 6; // 'smoothing' +constexpr int kSmoothingBitsIndex = 7; // 'smoothing_bits' +constexpr int kSpectralSubtractionBitsIndex = 8; // 'spectral_subtraction_bits' + +struct TFLMSignalSpectralSubtractionParams { + tflm_signal::SpectralSubtractionConfig config; + uint32_t* noise_estimate; + size_t noise_estimate_size; +}; + +void FilterBankSpectralSubtractionResetState( + TFLMSignalSpectralSubtractionParams* params) { + memset(params->noise_estimate, 0, + sizeof(uint32_t) * params->config.num_channels); +} + +void* FilterBankSpectralSubtractionInit(TfLiteContext* context, + const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + + auto* params = static_cast( + context->AllocatePersistentBuffer( + context, sizeof(TFLMSignalSpectralSubtractionParams))); + + if (params == nullptr) { + return nullptr; + } + + tflite::FlexbufferWrapper fbw(reinterpret_cast(buffer), + length); + params->config.alternate_one_minus_smoothing = + fbw.ElementAsInt32(kAlternateOneMinusSmoothingIndex); + params->config.alternate_smoothing = + fbw.ElementAsInt32(kAlternateSmoothingIndex); + params->config.clamping = fbw.ElementAsBool(kClampingIndex); + params->config.min_signal_remaining = + fbw.ElementAsInt32(kMinSignalRemainingIndex); + params->config.num_channels = fbw.ElementAsInt32(kNumChannelsIndex); + params->config.one_minus_smoothing = + fbw.ElementAsInt32(kOneMinusSmoothingIndex); + params->config.one_minus_smoothing = + fbw.ElementAsInt32(kOneMinusSmoothingIndex); + params->config.smoothing = fbw.ElementAsInt32(kSmoothingIndex); + params->config.smoothing_bits = fbw.ElementAsInt32(kSmoothingBitsIndex); + params->config.spectral_subtraction_bits = + fbw.ElementAsInt32(kSpectralSubtractionBitsIndex); + params->noise_estimate = + static_cast(context->AllocatePersistentBuffer( + context, params->config.num_channels * sizeof(uint32_t))); + + if (params->noise_estimate == nullptr) { + return nullptr; + } + + return params; +} + +TfLiteStatus FilterBankSpectralSubtractionPrepare(TfLiteContext* context, + TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TfLiteTensor* noise_estimate = + micro_context->AllocateTempOutputTensor(node, kNoiseEstimateTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE(context, noise_estimate != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(noise_estimate), 1); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt32); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt32); + TF_LITE_ENSURE_TYPES_EQ(context, noise_estimate->type, kTfLiteUInt32); + + auto* params = + reinterpret_cast(node->user_data); + TfLiteTypeSizeOf(output->type, ¶ms->noise_estimate_size); + params->noise_estimate_size *= ElementCount(*noise_estimate->dims); + + FilterBankSpectralSubtractionResetState(params); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(noise_estimate); + return kTfLiteOk; +} + +TfLiteStatus FilterBankSpectralSubtractionEval(TfLiteContext* context, + TfLiteNode* node) { + auto* params = + reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TfLiteEvalTensor* noise_estimate = + tflite::micro::GetEvalOutput(context, node, kNoiseEstimateTensor); + + const uint32_t* input_data = tflite::micro::GetTensorData(input); + uint32_t* output_data = tflite::micro::GetTensorData(output); + uint32_t* noise_estimate_data = + tflite::micro::GetTensorData(noise_estimate); + + FilterbankSpectralSubtraction(¶ms->config, input_data, output_data, + params->noise_estimate); + + memcpy(noise_estimate_data, params->noise_estimate, + params->noise_estimate_size); + + return kTfLiteOk; +} + +void FilterBankSpectralSubtractionReset(TfLiteContext* context, void* buffer) { + FilterBankSpectralSubtractionResetState( + static_cast(buffer)); +} + +} // namespace + +namespace tflm_signal { + +TFLMRegistration* Register_FILTER_BANK_SPECTRAL_SUBTRACTION() { + static TFLMRegistration r = tflite::micro::RegisterOp( + FilterBankSpectralSubtractionInit, FilterBankSpectralSubtractionPrepare, + FilterBankSpectralSubtractionEval, + /*Free*/ nullptr, FilterBankSpectralSubtractionReset); + return &r; +} + +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h b/src/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h new file mode 100644 index 0000000..175a14e --- /dev/null +++ b/src/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h @@ -0,0 +1,26 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_SPECTRAL_SUBTRACTION_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_SPECTRAL_SUBTRACTION_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_filter_bank_spectral_subtraction_32_channel; +extern const unsigned char + g_gen_data_filter_bank_spectral_subtraction_32_channel[]; +extern const int g_gen_data_size_filter_bank_spectral_subtraction_16_channel; +extern const unsigned char + g_gen_data_filter_bank_spectral_subtraction_16_channel[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FILTER_BANK_SPECTRAL_SUBTRACTION_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/micro/kernels/filter_bank_square_root.cpp b/src/signal/micro/kernels/filter_bank_square_root.cpp new file mode 100644 index 0000000..bd7eff9 --- /dev/null +++ b/src/signal/micro/kernels/filter_bank_square_root.cpp @@ -0,0 +1,65 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/filter_bank_square_root.h" + +#include + +#include "signal/micro/kernels/filter_bank_square_root.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kScaleBitsTensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus FilterBankSquareRootEval(TfLiteContext* context, + TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* scale_bits = + tflite::micro::GetEvalInput(context, node, kScaleBitsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + const uint64_t* input_data = tflite::micro::GetTensorData(input); + const int32_t* scale_bits_data = + tflite::micro::GetTensorData(scale_bits); + uint32_t* output_data = tflite::micro::GetTensorData(output); + int32_t num_channels = input->dims->data[0]; + tflm_signal::FilterbankSqrt(input_data, num_channels, *scale_bits_data, + output_data); + return kTfLiteOk; +} + +} // namespace + +namespace tflm_signal { + +TFLMRegistration* Register_FILTER_BANK_SQUARE_ROOT() { + static TFLMRegistration r = tflite::micro::RegisterOp( + nullptr, FilterBankSquareRootPrepare, FilterBankSquareRootEval); + return &r; +} + +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/filter_bank_square_root.h b/src/signal/micro/kernels/filter_bank_square_root.h new file mode 100644 index 0000000..25b6779 --- /dev/null +++ b/src/signal/micro/kernels/filter_bank_square_root.h @@ -0,0 +1,27 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef SIGNAL_MICRO_KERNELS_FILTER_BANK_SQUARE_ROOT_H_ +#define SIGNAL_MICRO_KERNELS_FILTER_BANK_SQUARE_ROOT_H_ + +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +TfLiteStatus FilterBankSquareRootPrepare(TfLiteContext* context, + TfLiteNode* node); + +} // namespace tflite + +#endif // SIGNAL_MICRO_KERNELS_FILTER_BANK_SQUARE_ROOT_H_ diff --git a/src/signal/micro/kernels/filter_bank_square_root_common.cpp b/src/signal/micro/kernels/filter_bank_square_root_common.cpp new file mode 100644 index 0000000..b430901 --- /dev/null +++ b/src/signal/micro/kernels/filter_bank_square_root_common.cpp @@ -0,0 +1,56 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "signal/micro/kernels/filter_bank_square_root.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +constexpr int kInputTensor = 0; +constexpr int kScaleBitsTensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus FilterBankSquareRootPrepare(TfLiteContext* context, + TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* scale_bits = + micro_context->AllocateTempInputTensor(node, kScaleBitsTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, scale_bits != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(scale_bits), 0); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt64); + TF_LITE_ENSURE_TYPES_EQ(context, scale_bits->type, kTfLiteInt32); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt32); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(scale_bits); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/signal/micro/kernels/framer.cpp b/src/signal/micro/kernels/framer.cpp new file mode 100644 index 0000000..36f189c --- /dev/null +++ b/src/signal/micro/kernels/framer.cpp @@ -0,0 +1,199 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "signal/src/circular_buffer.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; +constexpr int kOutputValidTensor = 1; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +constexpr int kFrameSizeIndex = 0; // 'frame_size' +constexpr int kFrameStepIndex = 1; // 'frame_step' +constexpr int kPrefillIndex = 2; // 'prefill' + +struct TFLMSignalFramerParams { + int32_t frame_size; + int32_t frame_step; + int32_t outer_dims; + int32_t n_frames; + bool prefill; + + int8_t** state_buffers; + tflite::tflm_signal::CircularBuffer** circular_buffers; +}; + +void FramerResetState(TFLMSignalFramerParams* params) { + for (int i = 0; i < params->outer_dims; ++i) { + tflite::tflm_signal::CircularBufferReset(params->circular_buffers[i]); + if (params->prefill) { + tflite::tflm_signal::CircularBufferWriteZeros( + params->circular_buffers[i], params->frame_size - params->frame_step); + } + } +} + +void* FramerInit(TfLiteContext* context, const char* buffer, size_t length) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + + auto* params = + static_cast(context->AllocatePersistentBuffer( + context, sizeof(TFLMSignalFramerParams))); + + if (params == nullptr) { + return nullptr; + } + + tflite::FlexbufferWrapper fbw(buffer_t, length); + params->frame_size = fbw.ElementAsInt32(kFrameSizeIndex); + params->frame_step = fbw.ElementAsInt32(kFrameStepIndex); + params->prefill = fbw.ElementAsBool(kPrefillIndex); + return params; +} + +TfLiteStatus FramerPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* output_valid = + micro_context->AllocateTempOutputTensor(node, kOutputValidTensor); + TF_LITE_ENSURE(context, output_valid != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input) + 1, NumDimensions(output)); + TF_LITE_ENSURE_EQ(context, NumDimensions(output_valid), 0); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output_valid->type, kTfLiteBool); + + auto* params = reinterpret_cast(node->user_data); + + RuntimeShape input_shape = GetTensorShape(input); + int innermost_dim = input_shape.Dims(input_shape.DimensionsCount() - 1); + TF_LITE_ENSURE(context, innermost_dim >= params->frame_step); + TF_LITE_ENSURE_EQ(context, innermost_dim % params->frame_step, 0); + params->outer_dims = input_shape.FlatSize() / innermost_dim; + params->n_frames = innermost_dim / params->frame_step; + + params->state_buffers = + static_cast(context->AllocatePersistentBuffer( + context, params->outer_dims * sizeof(int8_t*))); + params->circular_buffers = static_cast( + context->AllocatePersistentBuffer( + context, + params->outer_dims * sizeof(tflite::tflm_signal::CircularBuffer*))); + for (int i = 0; i < params->outer_dims; i++) { + // Calculate the capacity of the circular buffer. Round up the frame size to + // a multiple of frame step. Saves memory relative to the simpler frame_size + // + frame_step. For example: step_size = 160, frame_size = 400 capacity = + // 480 vs. step_size + frame_size = 560 + size_t capacity = (params->frame_size + params->frame_step - 1) / + params->frame_step * params->frame_step; + + size_t state_size = + tflite::tflm_signal::CircularBufferGetNeededMemory(capacity); + params->state_buffers[i] = + static_cast(context->AllocatePersistentBuffer( + context, state_size * sizeof(int8_t))); + params->circular_buffers[i] = tflite::tflm_signal::CircularBufferInit( + capacity, params->state_buffers[i], state_size); + } + + FramerResetState(params); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(output_valid); + + return kTfLiteOk; +} + +TfLiteStatus FramerEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TfLiteEvalTensor* output_valid = + tflite::micro::GetEvalOutput(context, node, kOutputValidTensor); + + const int16_t* input_data = tflite::micro::GetTensorData(input); + int16_t* output_data = tflite::micro::GetTensorData(output); + bool* output_valid_data = tflite::micro::GetTensorData(output_valid); + *output_valid_data = true; + + for (int i = 0; i < params->outer_dims; i++) { + for (int frame = 0; frame < params->n_frames; frame++) { + int input_idx = (i * params->n_frames + frame) * params->frame_step; + int output_idx = (i * params->n_frames + frame) * params->frame_size; + tflite::tflm_signal::CircularBufferWrite(params->circular_buffers[i], + &input_data[input_idx], + params->frame_step); + + if (tflite::tflm_signal::CircularBufferAvailable( + params->circular_buffers[i]) >= + static_cast(params->frame_size)) { + tflite::tflm_signal::CircularBufferGet(params->circular_buffers[i], + params->frame_size, + &output_data[output_idx]); + tflite::tflm_signal::CircularBufferDiscard(params->circular_buffers[i], + params->frame_step); + } else { + *output_valid_data = false; + } + } + } + + return kTfLiteOk; +} + +void FramerReset(TfLiteContext* context, void* buffer) { + FramerResetState(static_cast(buffer)); +} + +} // namespace + +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above +TFLMRegistration* Register_FRAMER() { + static TFLMRegistration r = tflite::micro::RegisterOp( + FramerInit, FramerPrepare, FramerEval, nullptr, FramerReset); + return &r; +} +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/framer_flexbuffers_generated_data.h b/src/signal/micro/kernels/framer_flexbuffers_generated_data.h new file mode 100644 index 0000000..655bfa6 --- /dev/null +++ b/src/signal/micro/kernels/framer_flexbuffers_generated_data.h @@ -0,0 +1,25 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FRAMER_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FRAMER_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_3_1_0_framer; +extern const unsigned char g_gen_data_3_1_0_framer[]; + +extern const int g_gen_data_size_5_2_1_framer; +extern const unsigned char g_gen_data_5_2_1_framer[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_FRAMER_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/micro/kernels/irfft.cpp b/src/signal/micro/kernels/irfft.cpp new file mode 100644 index 0000000..b0d58d5 --- /dev/null +++ b/src/signal/micro/kernels/irfft.cpp @@ -0,0 +1,230 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/irfft.h" + +#include +#include +#include + +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/portable_type_to_tflitetype.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +// 'T' is added implicitly by the TensorFlow framework when the type is resolved +// during graph construction. +// constexpr int kTypeIndex = 0; // 'T' (unused) +constexpr int kFftLengthIndex = 1; // 'fft_length' + +struct TfLiteAudioFrontendIrfftParams { + int32_t fft_length; + int32_t input_size; + int32_t input_length; + int32_t output_length; + TfLiteType fft_type; + int8_t* state; +}; + +template +void* IrfftInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + + auto* params = static_cast( + context->AllocatePersistentBuffer( + context, sizeof(TfLiteAudioFrontendIrfftParams))); + + if (params == nullptr) { + return nullptr; + } + + tflite::FlexbufferWrapper fbw(reinterpret_cast(buffer), + length); + params->fft_length = fbw.ElementAsInt32(kFftLengthIndex); + params->fft_type = typeToTfLiteType(); + + size_t state_size = (*get_needed_memory_func)(params->fft_length); + params->state = reinterpret_cast( + context->AllocatePersistentBuffer(context, state_size * sizeof(int8_t))); + + if (params->state == nullptr) { + return nullptr; + } + + (*init_func)(params->fft_length, params->state, state_size); + return params; +} + +template +TfLiteStatus IrfftPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), NumDimensions(output)); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, TfLiteTypeEnum); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, TfLiteTypeEnum); + + auto* params = + reinterpret_cast(node->user_data); + RuntimeShape input_shape = GetTensorShape(input); + RuntimeShape output_shape = GetTensorShape(output); + // Divide by 2 because input is complex. + params->input_length = + input_shape.Dims(input_shape.DimensionsCount() - 1) / 2; + params->input_size = input_shape.FlatSize() / 2; + params->output_length = output_shape.Dims(output_shape.DimensionsCount() - 1); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template * input, T*)> +TfLiteStatus IrfftEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + const Complex* input_data = + tflite::micro::GetTensorData>(input); + T* output_data = tflite::micro::GetTensorData(output); + for (int input_idx = 0, output_idx = 0; input_idx < params->input_size; + input_idx += params->input_length, output_idx += params->output_length) { + (*apply_func)(params->state, &input_data[input_idx], + &output_data[output_idx]); + } + return kTfLiteOk; +} + +void* IrfftInitAll(TfLiteContext* context, const char* buffer, size_t length) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); + auto tensor_type = static_cast(m["T"].AsInt32()); + + switch (tensor_type) { + case TensorType_INT16: { + return IrfftInit(context, buffer, length); + } + case TensorType_INT32: { + return IrfftInit(context, buffer, length); + } + case TensorType_FLOAT32: { + return IrfftInit(context, buffer, length); + } + default: + return nullptr; + } +} + +TfLiteStatus IrfftPrepareAll(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->user_data); + + switch (params->fft_type) { + case kTfLiteInt16: { + return IrfftPrepare(context, node); + } + case kTfLiteInt32: { + return IrfftPrepare(context, node); + } + case kTfLiteFloat32: { + return IrfftPrepare(context, node); + } + default: + return kTfLiteError; + } +} + +TfLiteStatus IrfftEvalAll(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->user_data); + + switch (params->fft_type) { + case kTfLiteInt16: { + return IrfftEval(context, node); + } + case kTfLiteInt32: { + return IrfftEval(context, node); + } + case kTfLiteFloat32: { + return IrfftEval(context, node); + } + default: + return kTfLiteError; + } +} + +} // namespace + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +TFLMRegistration* Register_IRFFT() { + static TFLMRegistration r = + tflite::micro::RegisterOp(IrfftInitAll, IrfftPrepareAll, IrfftEvalAll); + return &r; +} + +TFLMRegistration* Register_IRFFT_FLOAT() { + static TFLMRegistration r = tflite::micro::RegisterOp( + IrfftInit, + IrfftPrepare, IrfftEval); + return &r; +} + +TFLMRegistration* Register_IRFFT_INT16() { + static TFLMRegistration r = tflite::micro::RegisterOp( + IrfftInit, + IrfftPrepare, IrfftEval); + return &r; +} + +TFLMRegistration* Register_IRFFT_INT32() { + static TFLMRegistration r = tflite::micro::RegisterOp( + IrfftInit, + IrfftPrepare, IrfftEval); + return &r; +} + +} // namespace tflm_signal +} // namespace tflite \ No newline at end of file diff --git a/src/signal/micro/kernels/irfft.h b/src/signal/micro/kernels/irfft.h new file mode 100644 index 0000000..380bc3e --- /dev/null +++ b/src/signal/micro/kernels/irfft.h @@ -0,0 +1,31 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef SIGNAL_MICRO_KERNELS_IRFFT_H_ +#define SIGNAL_MICRO_KERNELS_IRFFT_H_ + +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { +namespace tflm_signal { + +TFLMRegistration* Register_IRFFT(); +TFLMRegistration* Register_IRFFT_FLOAT(); +TFLMRegistration* Register_IRFFT_INT16(); +TFLMRegistration* Register_IRFFT_INT32(); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_MICRO_KERNELS_IRFFT_H_ diff --git a/src/signal/micro/kernels/overlap_add.cpp b/src/signal/micro/kernels/overlap_add.cpp new file mode 100644 index 0000000..c365cd8 --- /dev/null +++ b/src/signal/micro/kernels/overlap_add.cpp @@ -0,0 +1,244 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/overlap_add.h" + +#include + +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/portable_type_to_tflitetype.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +// 'T' is added implicitly by the TensorFlow framework when the type is resolved +// during graph construction. +// constexpr int kTypeIndex = 0; // 'T' (unused) +constexpr int kFrameStepIndex = 1; // 'frame_step' + +template +struct TFLMSignalOverlapAddParams { + int32_t frame_size; + int32_t frame_step; + int32_t outer_dims; + int32_t n_frames; + TfLiteType type; + T** state_buffers; +}; + +template +void OverlapAddResetState(TFLMSignalOverlapAddParams* params) { + for (int i = 0; i < params->outer_dims; i++) { + memset(params->state_buffers[i], 0, sizeof(T) * params->frame_size); + } +} + +template +void* OverlapAddInit(TfLiteContext* context, const char* buffer, + size_t length) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + + auto* params = static_cast*>( + context->AllocatePersistentBuffer(context, + sizeof(TFLMSignalOverlapAddParams))); + + if (params == nullptr) { + return nullptr; + } + + tflite::FlexbufferWrapper fbw(buffer_t, length); + params->type = typeToTfLiteType(); + params->frame_step = fbw.ElementAsInt32(kFrameStepIndex); + return params; +} + +template +TfLiteStatus OverlapAddPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), NumDimensions(output) + 1); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, TfLiteTypeEnum); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, TfLiteTypeEnum); + + auto* params = + reinterpret_cast*>(node->user_data); + RuntimeShape input_shape = GetTensorShape(input); + RuntimeShape output_shape = GetTensorShape(output); + TF_LITE_ENSURE(context, input_shape.DimensionsCount() >= 2); + TF_LITE_ENSURE_EQ(context, input_shape.DimensionsCount(), + output_shape.DimensionsCount() + 1); + + params->frame_size = input_shape.Dims(input_shape.DimensionsCount() - 1); + params->n_frames = input_shape.Dims(input_shape.DimensionsCount() - 2); + params->outer_dims = + input_shape.FlatSize() / (params->frame_size * params->n_frames); + params->state_buffers = static_cast(context->AllocatePersistentBuffer( + context, params->outer_dims * sizeof(T*))); + TF_LITE_ENSURE(context, params != nullptr); + + for (int i = 0; i < params->outer_dims; i++) { + params->state_buffers[i] = + static_cast(context->AllocatePersistentBuffer( + context, params->frame_size * sizeof(T))); + } + OverlapAddResetState(params); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +TfLiteStatus OverlapAddEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast*>(node->user_data); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + const T* input_data = tflite::micro::GetTensorData(input); + T* output_data = tflite::micro::GetTensorData(output); + for (int i = 0; i < params->outer_dims; i++) { + T* buffer = params->state_buffers[i]; + for (int frame = 0; frame < params->n_frames; frame++) { + int input_index = (i * params->n_frames + frame) * params->frame_size; + int output_index = (i * params->n_frames + frame) * params->frame_step; + tflm_signal::OverlapAdd(&input_data[input_index], buffer, + params->frame_size, &output_data[output_index], + params->frame_step); + } + } + return kTfLiteOk; +} + +template +void OverlapAddReset(TfLiteContext* context, void* buffer) { + OverlapAddResetState(static_cast*>(buffer)); +} + +void* OverlapAddInitAll(TfLiteContext* context, const char* buffer, + size_t length) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); + auto tensor_type = static_cast(m["T"].AsInt32()); + + switch (tensor_type) { + case TensorType_INT16: { + return OverlapAddInit(context, buffer, length); + } + case TensorType_FLOAT32: { + return OverlapAddInit(context, buffer, length); + } + default: + return nullptr; + } +} + +TfLiteStatus OverlapAddPrepareAll(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast*>(node->user_data); + + switch (params->type) { + case kTfLiteInt16: { + return OverlapAddPrepare(context, node); + } + case kTfLiteFloat32: { + return OverlapAddPrepare(context, node); + } + default: + return kTfLiteError; + } +} + +TfLiteStatus OverlapAddEvalAll(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast*>(node->user_data); + + switch (params->type) { + case kTfLiteInt16: { + return OverlapAddEval(context, node); + } + case kTfLiteFloat32: { + return OverlapAddEval(context, node); + } + default: + return kTfLiteError; + } +} + +void OverlapAddResetAll(TfLiteContext* context, void* buffer) { + auto* params = reinterpret_cast*>(buffer); + + switch (params->type) { + case kTfLiteInt16: { + OverlapAddReset(context, buffer); + break; + } + case kTfLiteFloat32: { + OverlapAddReset(context, buffer); + break; + } + default: + break; + } +} + +} // namespace + +namespace tflm_signal { +TFLMRegistration* Register_OVERLAP_ADD() { + static TFLMRegistration r = + tflite::micro::RegisterOp(OverlapAddInitAll, OverlapAddPrepareAll, + OverlapAddEvalAll, nullptr, OverlapAddResetAll); + return &r; +} + +TFLMRegistration* Register_OVERLAP_ADD_FLOAT() { + static TFLMRegistration r = tflite::micro::RegisterOp( + OverlapAddInit, OverlapAddPrepare, + OverlapAddEval, nullptr, OverlapAddReset); + return &r; +} + +TFLMRegistration* Register_OVERLAP_ADD_INT16() { + static TFLMRegistration r = tflite::micro::RegisterOp( + OverlapAddInit, OverlapAddPrepare, + OverlapAddEval, nullptr, OverlapAddReset); + return &r; +} +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h b/src/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h new file mode 100644 index 0000000..adb4fba --- /dev/null +++ b/src/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h @@ -0,0 +1,25 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_OVERLAP_ADD_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_OVERLAP_ADD_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_overlap_add_float; +extern const unsigned char g_gen_data_overlap_add_float[]; + +extern const int g_gen_data_size_overlap_add_int16; +extern const unsigned char g_gen_data_overlap_add_int16[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_OVERLAP_ADD_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/micro/kernels/pcan.cpp b/src/signal/micro/kernels/pcan.cpp new file mode 100644 index 0000000..9473e1b --- /dev/null +++ b/src/signal/micro/kernels/pcan.cpp @@ -0,0 +1,135 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "signal/src/pcan_argc_fixed.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_context.h" + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +constexpr int kInputTensor = 0; +constexpr int kNoiseEstimateTensor = 1; +constexpr int kGainLutTensor = 2; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +constexpr int kSnrShiftIndex = 0; // 'snr_shift' + +struct TfLitePcanParams { + int snr_shift; +}; + +void* PcanInit(TfLiteContext* context, const char* buffer, size_t length) { + auto* params = static_cast( + context->AllocatePersistentBuffer(context, sizeof(TfLitePcanParams))); + + tflite::FlexbufferWrapper fbw(reinterpret_cast(buffer), + length); + params->snr_shift = fbw.ElementAsInt32(kSnrShiftIndex); + return params; +} + +TfLiteStatus PcanPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* noise_estimate = + micro_context->AllocateTempInputTensor(node, kNoiseEstimateTensor); + TF_LITE_ENSURE(context, noise_estimate != nullptr); + TfLiteTensor* gain_lut = + micro_context->AllocateTempInputTensor(node, kGainLutTensor); + TF_LITE_ENSURE(context, gain_lut != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(noise_estimate), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(gain_lut), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteUInt32); + TF_LITE_ENSURE_TYPES_EQ(context, noise_estimate->type, kTfLiteUInt32); + TF_LITE_ENSURE_TYPES_EQ(context, gain_lut->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt32); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(noise_estimate); + micro_context->DeallocateTempTfLiteTensor(gain_lut); + return kTfLiteOk; +} + +TfLiteStatus PcanEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteEvalTensor* noise_estimate = + tflite::micro::GetEvalInput(context, node, kNoiseEstimateTensor); + TF_LITE_ENSURE(context, noise_estimate != nullptr); + const TfLiteEvalTensor* gain_lut = + tflite::micro::GetEvalInput(context, node, kGainLutTensor); + TF_LITE_ENSURE(context, gain_lut != nullptr); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + const uint32_t* input_data = tflite::micro::GetTensorData(input); + const uint32_t* noise_estimate_data = + tflite::micro::GetTensorData(noise_estimate); + const int16_t* gain_lut_data = + tflite::micro::GetTensorData(gain_lut); + uint32_t* output_data = tflite::micro::GetTensorData(output); + + int num_channels = input->dims->data[0]; + + size_t output_byte_size; + TF_LITE_ENSURE_OK( + context, tflite::TfLiteEvalTensorByteLength(output, &output_byte_size)); + + memcpy(output_data, input_data, output_byte_size); + + tflite::tflm_signal::ApplyPcanAutoGainControlFixed( + gain_lut_data, params->snr_shift, noise_estimate_data, output_data, + num_channels); + return kTfLiteOk; +} + +TFLMRegistration* Register_PCAN() { + static TFLMRegistration r = + tflite::micro::RegisterOp(PcanInit, PcanPrepare, PcanEval); + return &r; +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/micro/kernels/pcan_flexbuffers_generated_data.h b/src/signal/micro/kernels/pcan_flexbuffers_generated_data.h new file mode 100644 index 0000000..32b4cd7 --- /dev/null +++ b/src/signal/micro/kernels/pcan_flexbuffers_generated_data.h @@ -0,0 +1,7 @@ +#ifndef SIGNAL_MICRO_KERNELS_PCAN_FLEXBUFFERS_GENERATED_DATA_H_ +#define SIGNAL_MICRO_KERNELS_PCAN_FLEXBUFFERS_GENERATED_DATA_H_ + +extern const int g_gen_data_size_snr_shift_6_test; +extern const unsigned char g_gen_data_snr_shift_6_test[]; + +#endif // SIGNAL_MICRO_KERNELS_PCAN_FLEXBUFFERS_GENERATED_DATA_H_ diff --git a/src/signal/micro/kernels/rfft.cpp b/src/signal/micro/kernels/rfft.cpp new file mode 100644 index 0000000..c9472b0 --- /dev/null +++ b/src/signal/micro/kernels/rfft.cpp @@ -0,0 +1,239 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/rfft.h" + +#include +#include +#include + +#include "signal/micro/kernels/rfft.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/portable_type_to_tflitetype.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +// 'T' is added implicitly by the TensorFlow framework when the type is resolved +// during graph construction. +// constexpr int kTypeIndex = 0; // 'T' (unused) +constexpr int kFftLengthIndex = 1; // 'fft_length' + +template +struct TfLiteAudioFrontendRfftParams { + int32_t fft_length; + int32_t input_size; + int32_t input_length; + int32_t output_length; + TfLiteType fft_type; + T* work_area; + int scratch_buffer_index; + int8_t* state; +}; + +template +void* RfftInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + + const uint8_t* buffer_t = reinterpret_cast(buffer); + auto* params = static_cast*>( + context->AllocatePersistentBuffer( + context, sizeof(TfLiteAudioFrontendRfftParams))); + + tflite::FlexbufferWrapper fbw(buffer_t, length); + params->fft_length = fbw.ElementAsInt32(kFftLengthIndex); + params->fft_type = typeToTfLiteType(); + + size_t state_size = (*get_needed_memory_func)(params->fft_length); + params->state = static_cast( + context->AllocatePersistentBuffer(context, state_size * sizeof(int8_t))); + (*init_func)(params->fft_length, params->state, state_size); + return params; +} + +template +TfLiteStatus RfftPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), NumDimensions(output)); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, TfLiteTypeEnum); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, TfLiteTypeEnum); + + auto* params = + reinterpret_cast*>(node->user_data); + RuntimeShape input_shape = GetTensorShape(input); + RuntimeShape output_shape = GetTensorShape(output); + params->input_length = input_shape.Dims(input_shape.DimensionsCount() - 1); + params->input_size = input_shape.FlatSize(); + // Divide by 2 because output is complex. + params->output_length = + output_shape.Dims(output_shape.DimensionsCount() - 1) / 2; + + context->RequestScratchBufferInArena(context, params->fft_length * sizeof(T), + ¶ms->scratch_buffer_index); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template *)> +TfLiteStatus RfftEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast*>(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + + const T* input_data = tflite::micro::GetTensorData(input); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + Complex* output_data = tflite::micro::GetTensorData>(output); + + T* work_area = static_cast( + context->GetScratchBuffer(context, params->scratch_buffer_index)); + + for (int input_idx = 0, output_idx = 0; input_idx < params->input_size; + input_idx += params->input_length, output_idx += params->output_length) { + memcpy(work_area, &input_data[input_idx], sizeof(T) * params->input_length); + // Zero pad input to FFT length + memset(&work_area[params->input_length], 0, + sizeof(T) * (params->fft_length - params->input_length)); + + (*apply_func)(params->state, work_area, &output_data[output_idx]); + } + return kTfLiteOk; +} + +void* RfftInitAll(TfLiteContext* context, const char* buffer, size_t length) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); + auto tensor_type = static_cast(m["T"].AsInt32()); + + switch (tensor_type) { + case TensorType_INT16: { + return RfftInit(context, buffer, length); + } + case TensorType_INT32: { + return RfftInit(context, buffer, length); + } + case TensorType_FLOAT32: { + return RfftInit(context, buffer, length); + } + default: + return nullptr; + } +} + +TfLiteStatus RfftPrepareAll(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast*>(node->user_data); + + switch (params->fft_type) { + case kTfLiteInt16: { + return RfftPrepare(context, node); + } + case kTfLiteInt32: { + return RfftPrepare(context, node); + } + case kTfLiteFloat32: { + return RfftPrepare(context, node); + } + default: + return kTfLiteError; + } +} + +TfLiteStatus RfftEvalAll(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast*>(node->user_data); + + switch (params->fft_type) { + case kTfLiteInt16: { + return RfftEval(context, node); + } + case kTfLiteInt32: { + return RfftEval(context, node); + } + case kTfLiteFloat32: { + return RfftEval(context, node); + } + default: + return kTfLiteError; + } +} +} // namespace + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +TFLMRegistration* Register_RFFT() { + static TFLMRegistration r = + tflite::micro::RegisterOp(RfftInitAll, RfftPrepareAll, RfftEvalAll); + return &r; +} + +TFLMRegistration* Register_RFFT_FLOAT() { + static TFLMRegistration r = tflite::micro::RegisterOp( + RfftInit, + RfftPrepare, + RfftEval); + return &r; +} + +TFLMRegistration* Register_RFFT_INT16() { + static TFLMRegistration r = tflite::micro::RegisterOp( + RfftInit, + RfftPrepare, + RfftEval); + return &r; +} + +TFLMRegistration* Register_RFFT_INT32() { + static TFLMRegistration r = tflite::micro::RegisterOp( + RfftInit, + RfftPrepare, + RfftEval); + return &r; +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/micro/kernels/rfft.h b/src/signal/micro/kernels/rfft.h new file mode 100644 index 0000000..f732c6f --- /dev/null +++ b/src/signal/micro/kernels/rfft.h @@ -0,0 +1,31 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef SIGNAL_MICRO_KERNELS_RFFT_H_ +#define SIGNAL_MICRO_KERNELS_RFFT_H_ + +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { +namespace tflm_signal { + +TFLMRegistration* Register_RFFT(); +TFLMRegistration* Register_RFFT_FLOAT(); +TFLMRegistration* Register_RFFT_INT16(); +TFLMRegistration* Register_RFFT_INT32(); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_MICRO_KERNELS_RFFT_H_ diff --git a/src/signal/micro/kernels/stacker.cpp b/src/signal/micro/kernels/stacker.cpp new file mode 100644 index 0000000..fc1a4a3 --- /dev/null +++ b/src/signal/micro/kernels/stacker.cpp @@ -0,0 +1,176 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "signal/src/circular_buffer.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; +constexpr int kOutputValidTensor = 1; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +constexpr int kNumChannelsIndex = 0; // 'num_channels' +constexpr int kStackerLeftContextIndex = 1; // 'stacker_left_context' +constexpr int kStackerRightContextIndex = 2; // 'stacker_right_context' +constexpr int kStackerStepIndex = 3; // 'stacker_step' + +struct TFLMSignalStackerParams { + int32_t num_channels; + int32_t stacker_left_context; + int32_t stacker_right_context; + int32_t stacker_step; + + size_t buffer_size; + size_t step_size; + bool stacker_has_first_frame; + + int8_t* state; + tflm_signal::CircularBuffer* circular_buffer; +}; + +void* StackerInit(TfLiteContext* context, const char* buffer, size_t length) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + + auto* params = + static_cast(context->AllocatePersistentBuffer( + context, sizeof(TFLMSignalStackerParams))); + if (params == nullptr) { + return nullptr; + } + + tflite::FlexbufferWrapper fbw(buffer_t, length); + params->num_channels = fbw.ElementAsInt32(kNumChannelsIndex); + params->stacker_left_context = fbw.ElementAsInt32(kStackerLeftContextIndex); + params->stacker_right_context = fbw.ElementAsInt32(kStackerRightContextIndex); + params->stacker_step = fbw.ElementAsInt32(kStackerStepIndex); + + params->buffer_size = + params->num_channels * + (params->stacker_left_context + params->stacker_right_context + 1); + params->step_size = params->num_channels * params->stacker_step; + params->stacker_has_first_frame = false; + + size_t state_size = + tflm_signal::CircularBufferGetNeededMemory(params->buffer_size); + params->state = static_cast( + context->AllocatePersistentBuffer(context, sizeof(int8_t) * state_size)); + + if (params->state == nullptr) { + return nullptr; + } + + params->circular_buffer = tflm_signal::CircularBufferInit( + params->buffer_size, params->state, state_size); + return params; +} + +TfLiteStatus StackerPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* output_valid = + micro_context->AllocateTempOutputTensor(node, kOutputValidTensor); + TF_LITE_ENSURE(context, output_valid != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(output_valid), 0); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output_valid->type, kTfLiteBool); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(output_valid); + return kTfLiteOk; +} + +TfLiteStatus StackerEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->user_data); + TF_LITE_ENSURE(context, params != nullptr); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TfLiteEvalTensor* output_valid = + tflite::micro::GetEvalOutput(context, node, kOutputValidTensor); + + const int16_t* input_data = tflite::micro::GetTensorData(input); + + tflm_signal::CircularBufferWrite(params->circular_buffer, input_data, + params->num_channels); + + // The first frame is replicated an extra left_context times to pad. + if (params->stacker_has_first_frame == false) { + tflm_signal::CircularBufferExtend(params->circular_buffer, + params->num_channels, + params->stacker_left_context); + params->stacker_has_first_frame = true; + } + + int16_t* output_data = tflite::micro::GetTensorData(output); + bool* output_valid_data = tflite::micro::GetTensorData(output_valid); + if (tflm_signal::CircularBufferAvailable(params->circular_buffer) >= + params->buffer_size) { + tflm_signal::CircularBufferGet(params->circular_buffer, params->buffer_size, + output_data); + tflm_signal::CircularBufferDiscard(params->circular_buffer, + params->step_size); + *output_valid_data = true; + } else { + *output_valid_data = false; + } + return kTfLiteOk; +} + +void StackerReset(TfLiteContext* context, void* buffer) { + auto* params = static_cast(buffer); + tflm_signal::CircularBufferReset(params->circular_buffer); + params->stacker_has_first_frame = false; +} + +} // namespace + +namespace tflm_signal { +TFLMRegistration* Register_STACKER() { + static TFLMRegistration r = tflite::micro::RegisterOp( + StackerInit, StackerPrepare, StackerEval, /*Free*/ nullptr, StackerReset); + return &r; +} +} // namespace tflm_signal + +} // namespace tflite diff --git a/src/signal/micro/kernels/stacker_flexbuffers_generated_data.h b/src/signal/micro/kernels/stacker_flexbuffers_generated_data.h new file mode 100644 index 0000000..47a3827 --- /dev/null +++ b/src/signal/micro/kernels/stacker_flexbuffers_generated_data.h @@ -0,0 +1,25 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_STACKER_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_STACKER_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_stacker_3_channels_step_1; +extern const unsigned char g_gen_data_stacker_3_channels_step_1[]; + +extern const int g_gen_data_size_stacker_10_channels_step_2; +extern const unsigned char g_gen_data_stacker_10_channels_step_2[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_STACKER_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/micro/kernels/window.cpp b/src/signal/micro/kernels/window.cpp new file mode 100644 index 0000000..cd9c462 --- /dev/null +++ b/src/signal/micro/kernels/window.cpp @@ -0,0 +1,123 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/window.h" + +#include + +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kWeightsTensor = 1; +constexpr int kOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +constexpr int kShiftIndex = 0; // 'shift' + +struct TFLMSignalWindowParams { + int32_t shift; + int32_t input_size; +}; + +void* WindowInit(TfLiteContext* context, const char* buffer, size_t length) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + + auto* params = + static_cast(context->AllocatePersistentBuffer( + context, sizeof(TFLMSignalWindowParams))); + + tflite::FlexbufferWrapper fbw(buffer_t, length); + params->shift = fbw.ElementAsInt32(kShiftIndex); + return params; +} + +TfLiteStatus WindowPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* weights = + micro_context->AllocateTempInputTensor(node, kWeightsTensor); + TF_LITE_ENSURE(context, weights != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(weights), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), NumDimensions(output)); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, weights->type, kTfLiteInt16); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt16); + + auto* params = reinterpret_cast(node->user_data); + RuntimeShape input_shape = GetTensorShape(input); + params->input_size = input_shape.FlatSize(); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(weights); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus WindowEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* weights = + tflite::micro::GetEvalInput(context, node, kWeightsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + const int16_t* input_data = tflite::micro::GetTensorData(input); + const int16_t* weight_data = tflite::micro::GetTensorData(weights); + int16_t* output_data = tflite::micro::GetTensorData(output); + int weight_size = weights->dims->data[0]; + + for (int i = 0; i < params->input_size; i += weight_size) { + ::tflm_signal::ApplyWindow(&input_data[i], weight_data, weight_size, + params->shift, &output_data[i]); + } + return kTfLiteOk; +} +} // namespace + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +TFLMRegistration* Register_WINDOW() { + static TFLMRegistration r = + tflite::micro::RegisterOp(WindowInit, WindowPrepare, WindowEval); + return &r; +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/micro/kernels/window_flexbuffers_generated_data.h b/src/signal/micro/kernels/window_flexbuffers_generated_data.h new file mode 100644 index 0000000..cd26fc0 --- /dev/null +++ b/src/signal/micro/kernels/window_flexbuffers_generated_data.h @@ -0,0 +1,25 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_WINDOW_FLEXBUFFERS_DATA_H_ +#define SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_WINDOW_FLEXBUFFERS_DATA_H_ + +extern const int g_gen_data_size_window_shift_12; +extern const unsigned char g_gen_data_window_shift_12[]; + +extern const int g_gen_data_size_window_shift_8; +extern const unsigned char g_gen_data_window_shift_8[]; + +#endif // SIGNAL_MICRO_KERNELS_TEST_DATA_GENERATION_GENERATE_WINDOW_FLEXBUFFERS_DATA_H_ diff --git a/src/signal/src/circular_buffer.cpp b/src/signal/src/circular_buffer.cpp new file mode 100644 index 0000000..7638d91 --- /dev/null +++ b/src/signal/src/circular_buffer.cpp @@ -0,0 +1,290 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/circular_buffer.h" + +#include +#include +#include + +#define ASSERT assert + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above +void CircularBufferReset(tflm_signal::CircularBuffer* cb) { + cb->read = 0; + cb->write = 0; + cb->empty = 1; + cb->buffer = (int16_t*)(cb + 1); + memset(cb->buffer, 0, sizeof(cb->buffer[0]) * cb->buffer_size); +} + +size_t CircularBufferGetNeededMemory(size_t capacity) { + return sizeof(CircularBuffer) + sizeof(int16_t) * 2 * capacity; +} + +CircularBuffer* CircularBufferInit(size_t capacity, void* state, + size_t state_size) { + ASSERT(CircularBufferGetNeededMemory(capacity) >= state_size); + CircularBuffer* cb = (CircularBuffer*)state; + cb->buffer_size = 2 * capacity; + cb->capacity = capacity; + CircularBufferReset(cb); + return cb; +} + +size_t CircularBufferCapacity(const tflm_signal::CircularBuffer* cb) { + return cb->capacity; +} + +bool CircularBufferFull(const tflm_signal::CircularBuffer* cb) { + return cb->read == cb->write && cb->empty == 0; +} + +bool CircularBufferEmpty(const tflm_signal::CircularBuffer* cb) { + return cb->empty == 1; +} + +size_t CircularBufferAvailable(const tflm_signal::CircularBuffer* cb) { + const int32_t diff = cb->write - cb->read; + if (diff > 0) { + return diff; + } else if (diff < 0) { + return cb->capacity + diff; + } else if (cb->empty == 1) { + return 0; + } else { + return cb->capacity; + } +} + +size_t CircularBufferCanWrite(const tflm_signal::CircularBuffer* cb) { + return cb->capacity - CircularBufferAvailable(cb); +} + +void CircularBufferAdd(tflm_signal::CircularBuffer* cb, int16_t value) { + ASSERT(!CircularBufferFull(cb)); + cb->buffer[cb->write] = value; + cb->buffer[cb->write + cb->capacity] = value; + if (++cb->write == cb->capacity) { + cb->write = 0; + } + cb->empty = 0; +} + +void CircularBufferWrite(tflm_signal::CircularBuffer* cb, const int16_t* values, + size_t n) { + if (n > 0) { + ASSERT(CircularBufferCanWrite(cb) >= n); + size_t write = cb->write; + int16_t* buffer = cb->buffer; + const size_t capacity = cb->capacity; + const size_t end = write + n; + + memcpy(buffer + write, values, n * sizeof(int16_t)); + if (end < capacity) { + memcpy(buffer + capacity + write, values, n * sizeof(int16_t)); + write += n; + } else { + const size_t n1 = capacity - write; + const size_t nbytes1 = n1 * sizeof(int16_t); + memcpy(buffer + capacity + write, values, nbytes1); + const size_t n2 = end - capacity; + if (n2 > 0) { + const size_t nbytes2 = n2 * sizeof(int16_t); + memcpy(buffer, values + n1, nbytes2); + } + write = n2; + } + cb->write = write; + cb->empty = 0; + } +} + +void CircularBufferWriteZeros(tflm_signal::CircularBuffer* cb, size_t n) { + if (n > 0) { + ASSERT(CircularBufferCanWrite(cb) >= n); + size_t write = cb->write; + int16_t* buffer = cb->buffer; + const size_t capacity = cb->capacity; + const size_t end = write + n; + + memset(buffer + write, 0, n * sizeof(int16_t)); + if (end < capacity) { + memset(buffer + capacity + write, 0, n * sizeof(int16_t)); + write += n; + } else { + const size_t n1 = capacity - write; + const size_t nbytes1 = n1 * sizeof(int16_t); + memset(buffer + capacity + write, 0, nbytes1); + const size_t n2 = end - capacity; + if (n2 > 0) { + const size_t nbytes2 = n2 * sizeof(int16_t); + memset(buffer, 0, nbytes2); + } + write = n2; + } + cb->write = write; + cb->empty = 0; + } +} + +int16_t* CircularBufferReserveForWrite(tflm_signal::CircularBuffer* cb, + size_t n) { + ASSERT(cb->write + n <= cb->capacity); + int16_t* write_ptr = cb->buffer + cb->write; + cb->write += n; + if (cb->write == cb->capacity) { + cb->write = 0; + } + cb->empty = cb->empty && n == 0; + return write_ptr; +} + +void CircularBufferExtend(tflm_signal::CircularBuffer* cb, size_t count, + int32_t n) { + if (n > 0 && count > 0) { + ASSERT(CircularBufferCanWrite(cb) >= count * n); + ASSERT(CircularBufferAvailable(cb) >= count); + const size_t capacity = cb->capacity; + // start pos of region to copy + const size_t start = + (count > cb->write) ? cb->write + capacity - count : cb->write - count; + const size_t end = start + count; + int i; + if (end <= capacity) { + // the source elements are contiguous + for (i = 0; i < n; ++i) { + CircularBufferWrite(cb, cb->buffer + start, count); + } + } else { + // the source elements wrap around the end of the buffer + for (i = 0; i < n; ++i) { + const size_t n1 = capacity - start; + const size_t n2 = count - n1; + CircularBufferWrite(cb, cb->buffer + start, n1); + CircularBufferWrite(cb, cb->buffer, n2); + } + } + } + // Note: no need to update empty flag +} + +int16_t CircularBufferRemove(tflm_signal::CircularBuffer* cb) { + ASSERT(!CircularBufferEmpty(cb)); + const int16_t result = cb->buffer[cb->read]; + if (++cb->read == cb->capacity) { + cb->read = 0; + } + if (cb->read == cb->write) { + cb->empty = 1; + } + return result; +} + +int16_t CircularBufferPeek(const tflm_signal::CircularBuffer* cb, + size_t index) { + ASSERT(CircularBufferAvailable(cb) > index); + size_t target = cb->read + index; + while (target >= cb->capacity) { + target -= cb->capacity; + } + return cb->buffer[target]; +} + +void CircularBufferRewind(tflm_signal::CircularBuffer* cb, size_t n) { + ASSERT(n <= CircularBufferCanWrite(cb)); + if (n > cb->read) { + // Must add before subtracting because types are unsigned. + cb->read = (cb->read + cb->capacity) - n; + } else { + cb->read -= n; + } + if (n > 0) cb->empty = 0; +} + +const int16_t* CircularBufferPeekDirect(const tflm_signal::CircularBuffer* cb, + size_t index) { + ASSERT(CircularBufferAvailable(cb) > index); + size_t target = cb->read + index; + while (target >= cb->capacity) { + target -= cb->capacity; + } + return cb->buffer + target; +} + +const int16_t* CircularBufferPeekMax(const tflm_signal::CircularBuffer* cb, + size_t* n) { + if (CircularBufferAvailable(cb) > 0) { + *n = (cb->write <= cb->read) ? cb->capacity - cb->read + : cb->write - cb->read; + return cb->buffer + cb->read; + } else { + *n = 0; + return NULL; + } +} + +void CircularBufferGet(tflm_signal::CircularBuffer* cb, size_t n, + int16_t* values) { + ASSERT(CircularBufferAvailable(cb) >= n); + const int16_t* buffer = cb->buffer; + const size_t read = cb->read; + const size_t end = read + n; + const size_t capacity = cb->capacity; + if (end <= capacity) { + memcpy(values, buffer + read, n * sizeof(int16_t)); + } else { + const size_t n1 = capacity - read; + const size_t n2 = end - capacity; + const size_t nbytes1 = n1 * sizeof(int16_t); + const size_t nbytes2 = n2 * sizeof(int16_t); + memcpy(values, buffer + read, nbytes1); + memcpy(values + n1, buffer, nbytes2); + } +} + +void CircularBufferDiscard(tflm_signal::CircularBuffer* cb, size_t n) { + ASSERT(n > 0); + ASSERT(CircularBufferAvailable(cb) >= n); + cb->read += n; + if (cb->read >= cb->capacity) { + cb->read -= cb->capacity; + } + if (cb->read == cb->write) { + cb->empty = 1; + } +} + +void CircularBufferShift(tflm_signal::CircularBuffer* cb, int n) { + if (n < 0) { + ASSERT(-n <= (int)cb->capacity); + if ((int)cb->read < -n) { + // First add then subtract to ensure positivity as types are unsigned. + cb->read += cb->capacity; + } + cb->read += n; + } else { + ASSERT(n <= (int)cb->capacity); + cb->read += n; + if (cb->read >= cb->capacity) { + cb->read -= cb->capacity; + } + } +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/circular_buffer.h b/src/signal/src/circular_buffer.h new file mode 100644 index 0000000..d175a9b --- /dev/null +++ b/src/signal/src/circular_buffer.h @@ -0,0 +1,118 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_CIRCULAR_BUFFER_H_ +#define SIGNAL_SRC_CIRCULAR_BUFFER_H_ + +#include +#include + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above +struct CircularBuffer { + // Max number of elements, value passed-in to CircularBufferAlloc. + size_t capacity; + // Next position to read. + size_t read; + // Next position to write. + size_t write; + // Flag to indicate emptiness. + int32_t empty; + // Auto-generated size variable + int32_t buffer_size; + // Array of the circular buffer elements (integers). + int16_t* buffer; +}; + +// Returns the size of the memory that the circular buffer needs +// in order to hold `capacity` items. +size_t CircularBufferGetNeededMemory(size_t capacity); + +// Initialize an instance of the circular buffer that holds `capacity` items. +// `state` points to a memory allocation of size `state_size`. The size +// should be greater or equal to the value returned by +// CircularBufferGetNeededMemory(capacity). Fails if it isn't. +// On success, returns a pointer to the circular buffer's object. +CircularBuffer* CircularBufferInit(size_t capacity, void* state, + size_t state_size); + +// Reset a circular buffer to its initial empty state +void CircularBufferReset(CircularBuffer* cb); + +size_t CircularBufferCapacity(const CircularBuffer* cb); + +bool CircularBufferFull(const CircularBuffer* cb); + +bool CircularBufferEmpty(const CircularBuffer* cb); + +// Returns the number of elements ready to read +size_t CircularBufferAvailable(const CircularBuffer* cb); + +// Returns the number of elements available to write. +size_t CircularBufferCanWrite(const CircularBuffer* cb); + +// Adds a single `value` to the buffer and advances the write pointer. +void CircularBufferAdd(CircularBuffer* cb, int16_t value); + +// Writes `n` `values` into the buffer and advances the write pointer. +void CircularBufferWrite(CircularBuffer* cb, const int16_t* values, size_t n); + +// Writes `n` zeros into the buffer and advances the write pointer. +void CircularBufferWriteZeros(CircularBuffer* cb, size_t n); + +// Returns a pointer to a buffer where elements can be written, and +// advances the write pointer as though they have already been written. +// Fails if `n` elements are not available contiguously at the current +// write position. +int16_t* CircularBufferReserveForWrite(CircularBuffer* cb, size_t n); + +// Copies the final region (`count` elements) of the buffer `n` times, to +// the end of the buffer. +void CircularBufferExtend(CircularBuffer* cb, size_t count, int32_t n); + +// Reads a single value from the buffer and advances the read pointer +int16_t CircularBufferRemove(CircularBuffer* cb); + +// Reads the value at the given `index`, does not modify the read pointer. +int16_t CircularBufferPeek(const CircularBuffer* cb, size_t index); + +// Rewinds to restore the previous `n` values read +void CircularBufferRewind(CircularBuffer* cb, size_t n); + +// Returns a pointer directly into the circular buffer at the given `index`. +// Caller is responsible for not reading past the end. +const int16_t* CircularBufferPeekDirect(const CircularBuffer* cb, size_t index); + +// Returns a pointer into the circular buffer at the current read pointer, +// setting `n` to the number of values available to be read from here. +const int16_t* CircularBufferPeekMax(const CircularBuffer* cb, size_t* n); + +// Copies `n` `values` from the buffer and does not advance the read +// pointer and does not update the empty flag. +void CircularBufferGet(CircularBuffer* cb, size_t n, int16_t* values); + +// Discards the next `n` values by advancing the read index. +// Valid for n > 0. +void CircularBufferDiscard(CircularBuffer* cb, size_t n); + +// Shifts the buffer with `n` values (`n` can be negative) by moving +// the read index. +void CircularBufferShift(CircularBuffer* cb, int n); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_CIRCULAR_BUFFER_H_ diff --git a/src/signal/src/complex.h b/src/signal/src/complex.h new file mode 100644 index 0000000..6f20303 --- /dev/null +++ b/src/signal/src/complex.h @@ -0,0 +1,29 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_COMPLEX_H_ +#define SIGNAL_SRC_COMPLEX_H_ + +#include + +// We would use the standard complex type in complex.h, but there's +// no guarantee that all architectures will support it. +template +struct Complex { + T real; + T imag; +}; + +#endif // SIGNAL_SRC_COMPLEX_H_ diff --git a/src/signal/src/energy.cpp b/src/signal/src/energy.cpp new file mode 100644 index 0000000..3ea5fc0 --- /dev/null +++ b/src/signal/src/energy.cpp @@ -0,0 +1,33 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "signal/src/energy.h" + +#include "signal/src/complex.h" + +namespace tflite { +namespace tflm_signal { +void SpectrumToEnergy(const Complex* input, int start_index, + int end_index, uint32_t* output) { + for (int i = start_index; i < end_index; i++) { + const int16_t real = input[i].real; // 15 bits + const int16_t imag = input[i].imag; // 15 bits + // 31 bits + output[i] = (static_cast(real) * real) + + (static_cast(imag) * imag); + } +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/energy.h b/src/signal/src/energy.h new file mode 100644 index 0000000..5a1cf37 --- /dev/null +++ b/src/signal/src/energy.h @@ -0,0 +1,38 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_ENERGY_H_ +#define SIGNAL_ENERGY_H_ + +#include + +#include "signal/src/complex.h" + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +// Calculates the power spectrum from a DFT output between start and end indices +// +// * `start_index` and `end_index` must valid indices into `input` +// * `output` must be the same size as `input`. Only the values at indices +// `start_index` and `end_index` inclusive should be considered valid. +void SpectrumToEnergy(const Complex* input, int start_index, + int end_index, uint32_t* output); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_ENERGY_H_ diff --git a/src/signal/src/fft_auto_scale.cpp b/src/signal/src/fft_auto_scale.cpp new file mode 100644 index 0000000..7e78397 --- /dev/null +++ b/src/signal/src/fft_auto_scale.cpp @@ -0,0 +1,42 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/fft_auto_scale.h" + +#include +#include + +#include "signal/src/max_abs.h" +#include "signal/src/msb.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflite { +namespace tflm_signal { + +int FftAutoScale(const int16_t* input, int size, int16_t* output) { + const int16_t max = MaxAbs16(input, size); + int scale_bits = (sizeof(int16_t) * 8) - MostSignificantBit32(max) - 1; + if (scale_bits <= 0) { + scale_bits = 0; + } + for (int i = 0; i < size; i++) { + // (input[i] << scale_bits) is undefined if input[i] is negative. + // Multiply explicitly to make the code portable. + output[i] = input[i] * (1 << scale_bits); + } + return scale_bits; +} +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/fft_auto_scale.h b/src/signal/src/fft_auto_scale.h new file mode 100644 index 0000000..c566a0e --- /dev/null +++ b/src/signal/src/fft_auto_scale.h @@ -0,0 +1,35 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_FFT_AUTO_SCALE_H_ +#define SIGNAL_SRC_FFT_AUTO_SCALE_H_ + +#include +#include + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflite { +namespace tflm_signal { + +// Auto scales `input` and write the result to `output` +// Elements in `input` are left shifted to maximize the amplitude without +// clipping, +// * both `input` and `output` must be of size `size` +int FftAutoScale(const int16_t* input, int size, int16_t* output); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_FFT_AUTO_SCALE_H_ diff --git a/src/signal/src/filter_bank.cpp b/src/signal/src/filter_bank.cpp new file mode 100644 index 0000000..8517e45 --- /dev/null +++ b/src/signal/src/filter_bank.cpp @@ -0,0 +1,53 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/filter_bank.h" + +namespace tflite { +namespace tflm_signal { + +void FilterbankAccumulateChannels(const FilterbankConfig* config, + const uint32_t* input, uint64_t* output) { + // With a log mel filterbank, the energy at each frequency gets added to + // two adjacent filterbank filters/channels. + // For the first filter bank channel, its energy is first multiplied by + // some weight 'w', then gets accumulated. + // For the subsequent filter bank, its power is first multiplied by 1-'w' + // (called unweight here), then gets accumulated. + // For this reason, we need to calculate (config->num_channels + 1) output + // where element 0 is only used as scratch storage for the unweights of + // element 1 (channel 0). The caller should discard element 0. + // Writing the code like this doesn't save multiplications, but it lends + // itself better to optimization, because input[freq_start + j] only needs + // to be loaded once. + uint64_t weight_accumulator = 0; + uint64_t unweight_accumulator = 0; + for (int i = 0; i < config->num_channels + 1; i++) { + const int16_t freq_start = config->channel_frequency_starts[i]; + const int16_t weight_start = config->channel_weight_starts[i]; + for (int j = 0; j < config->channel_widths[i]; ++j) { + weight_accumulator += config->weights[weight_start + j] * + static_cast(input[freq_start + j]); + unweight_accumulator += config->unweights[weight_start + j] * + static_cast(input[freq_start + j]); + } + output[i] = weight_accumulator; + weight_accumulator = unweight_accumulator; + unweight_accumulator = 0; + } +} + +} // namespace tflm_signal +} // namespace tflite \ No newline at end of file diff --git a/src/signal/src/filter_bank.h b/src/signal/src/filter_bank.h new file mode 100644 index 0000000..95b2168 --- /dev/null +++ b/src/signal/src/filter_bank.h @@ -0,0 +1,69 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_FILTER_BANK_H_ +#define SIGNAL_SRC_FILTER_BANK_H_ + +#include + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +struct FilterbankConfig { + // Number of filterbank channels + int32_t num_channels; + + // Each of the following three arrays is of size num_channels + 1 + // An extra channel is needed for scratch. See implementation of + // FilterbankAccumulateChannels() for more details + + // For each channel, the index in the input (spectrum) where its band starts + const int16_t* channel_frequency_starts; + // For each channel, the index in the weights/unweights arrays where + // it filter weights start + const int16_t* channel_weight_starts; + // For each channel, the number of bins in the input (spectrum) that span + // its band + const int16_t* channel_widths; + + // The weights array holds the triangular filter weights of all the filters + // in the bank. The output of each filter in the bank is caluclated by + // multiplying the elements in the input spectrum that are in its band + // (see above: channel_frequency_starts, channel_widths) by the filter weights + // then accumulating. Each element in the unweights array holds the 1 minus + // corresponding elements in the weights array and is used to make this + // operation more efficient. For more details, see documnetation in + // FilterbankAccumulateChannels() + const int16_t* weights; + const int16_t* unweights; + int32_t output_scale; + + int32_t input_correction_bits; +}; + +// Accumulate the energy spectrum bins in `input` into filter bank channels +// contained in `output`. +// * `input` - Spectral energy array +// * `output` - of size `config.num_channels` + 1. +// Elements [1:num_channels] contain the filter bank channels. +// Element 0 is used as scratch and should be ignored +void FilterbankAccumulateChannels(const FilterbankConfig* config, + const uint32_t* input, uint64_t* output); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_FILTER_BANK_H_ diff --git a/src/signal/src/filter_bank_log.cpp b/src/signal/src/filter_bank_log.cpp new file mode 100644 index 0000000..c670a7c --- /dev/null +++ b/src/signal/src/filter_bank_log.cpp @@ -0,0 +1,40 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/filter_bank_log.h" + +#include "signal/src/log.h" + +namespace tflite { +namespace tflm_signal { + +void FilterbankLog(const uint32_t* input, int num_channels, + int32_t output_scale, uint32_t correction_bits, + int16_t* output) { + for (int i = 0; i < num_channels; ++i) { + const uint32_t scaled = input[i] << correction_bits; + if (scaled > 1) { + const uint32_t log_value = Log32(scaled, output_scale); + output[i] = ((log_value < static_cast(INT16_MAX)) + ? log_value + : static_cast(INT16_MAX)); + } else { + output[i] = 0; + } + } +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/filter_bank_log.h b/src/signal/src/filter_bank_log.h new file mode 100644 index 0000000..e8514c7 --- /dev/null +++ b/src/signal/src/filter_bank_log.h @@ -0,0 +1,38 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_FILTER_BANK_LOG_H_ +#define SIGNAL_SRC_FILTER_BANK_LOG_H_ + +#include + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +// Apply natural log to each element in array `input` of size `num_channels` +// with pre-shift and post scaling. +// The operation is roughly equivalent to: +// `output` = min(Log(`input` << `correction_bits`) * `output_scale`, INT16_MAX) +// Where: +// If (input << `correction_bits`) is 1 or 0, the function returns 0 +void FilterbankLog(const uint32_t* input, int num_channels, + int32_t output_scale, uint32_t correction_bits, + int16_t* output); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_FILTER_BANK_LOG_H_ diff --git a/src/signal/src/filter_bank_spectral_subtraction.cpp b/src/signal/src/filter_bank_spectral_subtraction.cpp new file mode 100644 index 0000000..bbad86b --- /dev/null +++ b/src/signal/src/filter_bank_spectral_subtraction.cpp @@ -0,0 +1,64 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/filter_bank_spectral_subtraction.h" + +namespace tflite { +namespace tflm_signal { + +void FilterbankSpectralSubtraction(const SpectralSubtractionConfig* config, + const uint32_t* input, uint32_t* output, + uint32_t* noise_estimate) { + const bool data_clamping = config->clamping; + const int smoothing_bits = config->smoothing_bits; + const int num_channels = config->num_channels; + + for (int i = 0; i < num_channels; ++i) { + uint32_t smoothing; + uint32_t one_minus_smoothing; + if ((i & 1) == 0) { + smoothing = config->smoothing; + one_minus_smoothing = config->one_minus_smoothing; + } else { // Use alternate smoothing coefficient on odd-index channels. + smoothing = config->alternate_smoothing; + one_minus_smoothing = config->alternate_one_minus_smoothing; + } + + // Scale up signal[i] for smoothing filter computation. + const uint32_t signal_scaled_up = input[i] << smoothing_bits; + noise_estimate[i] = + ((static_cast(signal_scaled_up) * smoothing) + + (static_cast(noise_estimate[i]) * one_minus_smoothing)) >> + config->spectral_subtraction_bits; + + uint32_t estimate_scaled_up = noise_estimate[i]; + // Make sure that we can't get a negative value for the signal - estimate. + if (estimate_scaled_up > signal_scaled_up) { + estimate_scaled_up = signal_scaled_up; + if (data_clamping) { + noise_estimate[i] = estimate_scaled_up; + } + } + const uint32_t floor = + (static_cast(input[i]) * config->min_signal_remaining) >> + config->spectral_subtraction_bits; + const uint32_t subtracted = + (signal_scaled_up - estimate_scaled_up) >> smoothing_bits; + output[i] = subtracted > floor ? subtracted : floor; + } +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/filter_bank_spectral_subtraction.h b/src/signal/src/filter_bank_spectral_subtraction.h new file mode 100644 index 0000000..e862d77 --- /dev/null +++ b/src/signal/src/filter_bank_spectral_subtraction.h @@ -0,0 +1,73 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_FILTER_BANK_SPECTRAL_SUBTRACTION_H_ +#define SIGNAL_SRC_FILTER_BANK_SPECTRAL_SUBTRACTION_H_ + +#include + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +struct SpectralSubtractionConfig { + // Number of filterbank channels in input and output + int32_t num_channels; + // The constant used for the lowpass filter for finding the noise. + // Higher values correspond to more aggressively adapting estimates + // of the noise. + // Scale is 1 << spectral_subtraction_bits + uint32_t smoothing; + // One minus smoothing constant for low pass filter. + // Scale is 1 << spectral_subtraction_bits + uint32_t one_minus_smoothing; + // The maximum cap to subtract away from the signal (ie, if this is + // 0.2, then the result of spectral subtraction will not go below + // 0.2 * signal). + // Scale is 1 << spectral_subtraction_bits + uint32_t min_signal_remaining; + // If positive, specifies the filter coefficient for odd-index + // channels, while 'smoothing' is used as the coefficient for even- + // index channels. Otherwise, the same filter coefficient is + // used on all channels. + // Scale is 1 << spectral_subtraction_bits + uint32_t alternate_smoothing; + // Alternate One minus smoothing constant for low pass filter. + // Scale is 1 << spectral_subtraction_bits + uint32_t alternate_one_minus_smoothing; + // Extra fractional bits for the noise_estimate smoothing filter. + uint32_t smoothing_bits; + // Scaling bits for some members of this struct + uint32_t spectral_subtraction_bits; + // If true, when the filterbank level drops below the output, + // the noise estimate will be forced down to the new noise level. + // If false, the noise estimate will remain above the current + // filterbank output (but the subtraction will still keep the + // output non negative). + bool clamping; +}; + +// Apply spectral subtraction to each element in `input`, then write the result +// to `output` and `noise_estimate`. `input`, `output` and `noise estimate` +// must all be of size `config.num_channels`. `config` holds the +// parameters of the spectral subtraction algorithm. +void FilterbankSpectralSubtraction(const SpectralSubtractionConfig* config, + const uint32_t* input, uint32_t* output, + uint32_t* noise_estimate); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_FILTER_BANK_SPECTRAL_SUBTRACTION_H_ diff --git a/src/signal/src/filter_bank_square_root.cpp b/src/signal/src/filter_bank_square_root.cpp new file mode 100644 index 0000000..25e8b23 --- /dev/null +++ b/src/signal/src/filter_bank_square_root.cpp @@ -0,0 +1,31 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/filter_bank_square_root.h" + +#include "signal/src/square_root.h" + +namespace tflite { +namespace tflm_signal { + +void FilterbankSqrt(const uint64_t* input, int num_channels, + int scale_down_bits, uint32_t* output) { + for (int i = 0; i < num_channels; ++i) { + output[i] = Sqrt64(input[i]) >> scale_down_bits; + } +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/filter_bank_square_root.h b/src/signal/src/filter_bank_square_root.h new file mode 100644 index 0000000..7d484b9 --- /dev/null +++ b/src/signal/src/filter_bank_square_root.h @@ -0,0 +1,34 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_FILTER_BANK_SQUARE_ROOT_H_ +#define SIGNAL_SRC_FILTER_BANK_SQUARE_ROOT_H_ + +#include + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +// Apply square root to each element in `input`, then shift right by +// `scale_down_bits` before writing the result to `output`, +// `input` and `output` must both be of size `num_channels` +void FilterbankSqrt(const uint64_t* input, int num_channels, + int scale_down_bits, uint32_t* output); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_FILTER_BANK_SQUARE_ROOT_H_ diff --git a/src/signal/src/irfft.h b/src/signal/src/irfft.h new file mode 100644 index 0000000..c2b54d7 --- /dev/null +++ b/src/signal/src/irfft.h @@ -0,0 +1,84 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_IRFFT_H_ +#define SIGNAL_SRC_IRFFT_H_ + +#include +#include + +#include "signal/src/complex.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflite { +namespace tflm_signal { + +// IRFFT (Inverse Real Fast Fourier Transform) +// IFFT for real valued time domain outputs. + +// 16-bit Integer input/output + +// Returns the size of the memory that an IRFFT of `fft_length` needs +size_t IrfftInt16GetNeededMemory(int32_t fft_length); + +// Initialize the state of an IRFFT of `fft_length` +// `state` points to an opaque state of size `state_size`, which +// must be greater or equal to the value returned by +// IrfftGetNeededMemory(fft_length). Fails if it isn't. +void* IrfftInt16Init(int32_t fft_length, void* state, size_t state_size); + +// Applies IRFFT to `input` and writes the result to `output` +// * `input` must be of size `fft_length` elements (see IRfftInit) +// * `output` must be of size output +void IrfftInt16Apply(void* state, const Complex* input, + int16_t* output); + +// 32-bit Integer input/output + +// Returns the size of the memory that an IRFFT of `fft_length` needs +size_t IrfftInt32GetNeededMemory(int32_t fft_length); + +// Initialize the state of an IRFFT of `fft_length` +// `state` points to an opaque state of size `state_size`, which +// must be greater or equal to the value returned by +// IrfftGetNeededMemory(fft_length). Fails if it isn't. +void* IrfftInt32Init(int32_t fft_length, void* state, size_t state_size); + +// Applies IRFFT to `input` and writes the result to `output` +// * `input` must be of size `fft_length` elements (see IRfftInit) +// * `output` must be of size output +void IrfftInt32Apply(void* state, const Complex* input, + int32_t* output); + +// Floating point input/output + +// Returns the size of the memory that an IRFFT of `fft_length` needs +size_t IrfftFloatGetNeededMemory(int32_t fft_length); + +// Initialize the state of an IRFFT of `fft_length` +// `state` points to an opaque state of size `state_size`, which +// must be greater or equal to the value returned by +// IrfftGetNeededMemory(fft_length). Fails if it isn't. +void* IrfftFloatInit(int32_t fft_length, void* state, size_t state_size); + +// Applies IRFFT to `input` and writes the result to `output` +// * `input` must be of size `fft_length` elements (see IRfftInit) +// * `output` must be of size output +void IrfftFloatApply(void* state, const Complex* input, float* output); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_IRFFT_H_ \ No newline at end of file diff --git a/src/signal/src/irfft_float.cpp b/src/signal/src/irfft_float.cpp new file mode 100644 index 0000000..53ce0fe --- /dev/null +++ b/src/signal/src/irfft_float.cpp @@ -0,0 +1,64 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "signal/src/complex.h" +#include "signal/src/irfft.h" +#include "signal/src/kiss_fft_wrappers/kiss_fft_float.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflite { +namespace tflm_signal { + +struct IrfftFloatState { + int32_t fft_length; + kiss_fft_float::kiss_fftr_cfg cfg; +}; + +size_t IrfftFloatGetNeededMemory(int32_t fft_length) { + size_t cfg_size = 0; + kiss_fft_float::kiss_fftr_alloc(fft_length, 1, nullptr, &cfg_size); + return sizeof(IrfftFloatState) + cfg_size; +} + +void* IrfftFloatInit(int32_t fft_length, void* state, size_t state_size) { + IrfftFloatState* irfft_float_state = static_cast(state); + irfft_float_state->cfg = + reinterpret_cast(irfft_float_state + 1); + irfft_float_state->fft_length = fft_length; + size_t cfg_size = state_size - sizeof(IrfftFloatState); + return kiss_fft_float::kiss_fftr_alloc(fft_length, 1, irfft_float_state->cfg, + &cfg_size); +} + +void IrfftFloatApply(void* state, const Complex* input, float* output) { + IrfftFloatState* irfft_float_state = static_cast(state); + kiss_fft_float::kiss_fftri( + static_cast(irfft_float_state->cfg), + reinterpret_cast(input), + reinterpret_cast(output)); + // KissFFT scales the IRFFT output by the FFT length. + // KissFFT's nfft is the complex FFT length, which is half the real FFT's + // length. Compensate. + const int fft_length = irfft_float_state->fft_length; + for (int i = 0; i < fft_length; i++) { + output[i] /= fft_length; + } +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/irfft_int16.cpp b/src/signal/src/irfft_int16.cpp new file mode 100644 index 0000000..f92b3c5 --- /dev/null +++ b/src/signal/src/irfft_int16.cpp @@ -0,0 +1,46 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "signal/src/complex.h" +#include "signal/src/irfft.h" +#include "signal/src/kiss_fft_wrappers/kiss_fft_int16.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflite { +namespace tflm_signal { + +size_t IrfftInt16GetNeededMemory(int32_t fft_length) { + size_t state_size = 0; + kiss_fft_fixed16::kiss_fftr_alloc(fft_length, 1, nullptr, &state_size); + return state_size; +} + +void* IrfftInt16Init(int32_t fft_length, void* state, size_t state_size) { + return kiss_fft_fixed16::kiss_fftr_alloc(fft_length, 1, state, &state_size); +} + +void IrfftInt16Apply(void* state, const Complex* input, + int16_t* output) { + kiss_fft_fixed16::kiss_fftri( + static_cast(state), + reinterpret_cast(input), + reinterpret_cast(output)); +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/irfft_int32.cpp b/src/signal/src/irfft_int32.cpp new file mode 100644 index 0000000..d4802a6 --- /dev/null +++ b/src/signal/src/irfft_int32.cpp @@ -0,0 +1,46 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "signal/src/complex.h" +#include "signal/src/irfft.h" +#include "signal/src/kiss_fft_wrappers/kiss_fft_int32.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflite { +namespace tflm_signal { + +size_t IrfftInt32GetNeededMemory(int32_t fft_length) { + size_t state_size = 0; + kiss_fft_fixed32::kiss_fftr_alloc(fft_length, 1, nullptr, &state_size); + return state_size; +} + +void* IrfftInt32Init(int32_t fft_length, void* state, size_t state_size) { + return kiss_fft_fixed32::kiss_fftr_alloc(fft_length, 1, state, &state_size); +} + +void IrfftInt32Apply(void* state, const Complex* input, + int32_t* output) { + kiss_fft_fixed32::kiss_fftri( + static_cast(state), + reinterpret_cast(input), + reinterpret_cast(output)); +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/kiss_fft_wrappers/kiss_fft_common.h b/src/signal/src/kiss_fft_wrappers/kiss_fft_common.h new file mode 100644 index 0000000..b09d9af --- /dev/null +++ b/src/signal/src/kiss_fft_wrappers/kiss_fft_common.h @@ -0,0 +1,49 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_COMMON_H_ +#define SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_COMMON_H_ + +// This header file should be included in all variants of kiss_fft_$type.{h,cc} +// so that their sub-included source files do not mistakenly wrap libc header +// files within their kissfft_$type namespaces. +// E.g., This header avoids kissfft_int16.h containing: +// namespace kiss_fft_int16 { +// #include "third_party/kissfft/kiss_fft.h" +// } +// where kiss_fft_.h contains: +// #include +// +// TRICK: By including the following header files here, their preprocessor +// header guards prevent them being re-defined inside of the kiss_fft_$type +// namespaces declared within the kiss_fft_$type.{h,cc} sources. +// Note that the original kiss_fft*.h files are untouched since they +// may be used in libraries that include them directly. + +#include +#include +#include +#include +#include + +#ifdef FIXED_POINT +#include +#endif + +#ifdef USE_SIMD +#include +#endif + +#endif // SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_COMMON_H_ diff --git a/src/signal/src/kiss_fft_wrappers/kiss_fft_float.cpp b/src/signal/src/kiss_fft_wrappers/kiss_fft_float.cpp new file mode 100644 index 0000000..1e16574 --- /dev/null +++ b/src/signal/src/kiss_fft_wrappers/kiss_fft_float.cpp @@ -0,0 +1,22 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/kiss_fft_wrappers/kiss_fft_common.h" + +#undef FIXED_POINT +namespace kiss_fft_float { +#include "kiss_fft.c" +#include "tools/kiss_fftr.c" +} // namespace kiss_fft_float diff --git a/src/signal/src/kiss_fft_wrappers/kiss_fft_float.h b/src/signal/src/kiss_fft_wrappers/kiss_fft_float.h new file mode 100644 index 0000000..d91df66 --- /dev/null +++ b/src/signal/src/kiss_fft_wrappers/kiss_fft_float.h @@ -0,0 +1,31 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_FLOAT_H_ +#define SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_FLOAT_H_ + +#include "signal/src/kiss_fft_wrappers/kiss_fft_common.h" + +// Wrap floating point kiss fft in its own namespace. Enables us to link an +// application with different kiss fft resolutions +// (16/32 bit integer, float, double) without getting a linker error. +#undef FIXED_POINT +namespace kiss_fft_float { +#include "third_party/kissfft/kiss_fft.h" +#include "third_party/kissfft/tools/kiss_fftr.h" +} // namespace kiss_fft_float +#undef FIXED_POINT + +#endif // SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_FLOAT_H_ diff --git a/src/signal/src/kiss_fft_wrappers/kiss_fft_int16.cpp b/src/signal/src/kiss_fft_wrappers/kiss_fft_int16.cpp new file mode 100644 index 0000000..33a229f --- /dev/null +++ b/src/signal/src/kiss_fft_wrappers/kiss_fft_int16.cpp @@ -0,0 +1,23 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/kiss_fft_wrappers/kiss_fft_common.h" + +#define FIXED_POINT 16 +namespace kiss_fft_fixed16 { +#include "kiss_fft.c" +#include "tools/kiss_fftr.c" +} // namespace kiss_fft_fixed16 +#undef FIXED_POINT diff --git a/src/signal/src/kiss_fft_wrappers/kiss_fft_int16.h b/src/signal/src/kiss_fft_wrappers/kiss_fft_int16.h new file mode 100644 index 0000000..731d006 --- /dev/null +++ b/src/signal/src/kiss_fft_wrappers/kiss_fft_int16.h @@ -0,0 +1,30 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_INT16_H_ +#define SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_INT16_H_ + +#include "signal/src/kiss_fft_wrappers/kiss_fft_common.h" +// Wrap floating point kiss fft in its own namespace. Enables us to link an +// application with different kiss fft resolutions +// (16/32 bit integer, float, double) without getting a linker error. +#define FIXED_POINT 16 +namespace kiss_fft_fixed16 { +#include "third_party/kissfft/kiss_fft.h" +#include "third_party/kissfft/tools/kiss_fftr.h" +} // namespace kiss_fft_fixed16 +#undef FIXED_POINT + +#endif // SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_INT16_H_ diff --git a/src/signal/src/kiss_fft_wrappers/kiss_fft_int32.cpp b/src/signal/src/kiss_fft_wrappers/kiss_fft_int32.cpp new file mode 100644 index 0000000..38e18f0 --- /dev/null +++ b/src/signal/src/kiss_fft_wrappers/kiss_fft_int32.cpp @@ -0,0 +1,23 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/kiss_fft_wrappers/kiss_fft_common.h" + +#define FIXED_POINT 32 +namespace kiss_fft_fixed32 { +#include "kiss_fft.c" +#include "tools/kiss_fftr.c" +} // namespace kiss_fft_fixed32 +#undef FIXED_POINT diff --git a/src/signal/src/kiss_fft_wrappers/kiss_fft_int32.h b/src/signal/src/kiss_fft_wrappers/kiss_fft_int32.h new file mode 100644 index 0000000..f88ae23 --- /dev/null +++ b/src/signal/src/kiss_fft_wrappers/kiss_fft_int32.h @@ -0,0 +1,31 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_INT32_H_ +#define SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_INT32_H_ + +#include "signal/src/kiss_fft_wrappers/kiss_fft_common.h" + +// Wrap floating point kiss fft in its own namespace. Enables us to link an +// application with different kiss fft resolutions +// (16/32 bit integer, float, double) without getting a linker error. +#define FIXED_POINT 32 +namespace kiss_fft_fixed32 { +#include "third_party/kissfft/kiss_fft.h" +#include "third_party/kissfft/tools/kiss_fftr.h" +} // namespace kiss_fft_fixed32 +#undef FIXED_POINT + +#endif // SIGNAL_SRC_KISS_FFT_WRAPPERS_KISS_FFT_INT32_H_ diff --git a/src/signal/src/log.cpp b/src/signal/src/log.cpp new file mode 100644 index 0000000..86f8e72 --- /dev/null +++ b/src/signal/src/log.cpp @@ -0,0 +1,84 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/log.h" + +#include "signal/src/msb.h" + +namespace tflite { +namespace tflm_signal { +namespace { + +const uint16_t kLogLut[] = { + 0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163, + 2329, 2490, 2646, 2797, 2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848, + 3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633, 4714, 4791, 4864, 4934, + 5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507, + 5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633, + 5626, 5615, 5602, 5586, 5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370, + 5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000, 4944, 4885, 4825, 4762, + 4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848, + 3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659, + 2549, 2437, 2323, 2207, 2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224, + 1094, 963, 830, 695, 559, 421, 282, 142, 0, 0}; + +// Number of segments in the log lookup table. The table will be kLogSegments+1 +// in length (with some padding). +// constexpr int kLogSegments = 128; +constexpr int kLogSegmentsLog2 = 7; + +// Scale used by lookup table. +constexpr int kLogScale = 65536; +constexpr int kLogScaleLog2 = 16; +constexpr int kLogCoeff = 45426; + +uint32_t Log2FractionPart32(uint32_t x, uint32_t log2x) { + // Part 1 + int32_t frac = x - (1LL << log2x); + if (log2x < kLogScaleLog2) { + frac <<= kLogScaleLog2 - log2x; + } else { + frac >>= log2x - kLogScaleLog2; + } + // Part 2 + const uint32_t base_seg = frac >> (kLogScaleLog2 - kLogSegmentsLog2); + const uint32_t seg_unit = (UINT32_C(1) << kLogScaleLog2) >> kLogSegmentsLog2; + + // ASSERT(base_seg < kLogSegments); + const int32_t c0 = kLogLut[base_seg]; + const int32_t c1 = kLogLut[base_seg + 1]; + const int32_t seg_base = seg_unit * base_seg; + const int32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> kLogScaleLog2; + return frac + c0 + rel_pos; +} + +} // namespace + +// Calculate integer logarithm, 32 Bit version +uint32_t Log32(uint32_t x, uint32_t out_scale) { + // ASSERT(x != 0); + const uint32_t integer = MostSignificantBit32(x) - 1; + const uint32_t fraction = Log2FractionPart32(x, integer); + const uint32_t log2 = (integer << kLogScaleLog2) + fraction; + const uint32_t round = kLogScale / 2; + const uint32_t loge = + ((static_cast(kLogCoeff)) * log2 + round) >> kLogScaleLog2; + // Finally scale to our output scale + const uint32_t loge_scaled = (out_scale * loge + round) >> kLogScaleLog2; + return loge_scaled; +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/log.h b/src/signal/src/log.h new file mode 100644 index 0000000..13045f4 --- /dev/null +++ b/src/signal/src/log.h @@ -0,0 +1,30 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_LOG_H_ +#define SIGNAL_SRC_LOG_H_ + +#include + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +// Natural logarithm of an integer. The result is multiplied by out_scale +uint32_t Log32(uint32_t x, uint32_t out_scale); + +} // namespace tflm_signal +} // namespace tflite +#endif // SIGNAL_SRC_LOG_H_ diff --git a/src/signal/src/max_abs.cpp b/src/signal/src/max_abs.cpp new file mode 100644 index 0000000..0ad117a --- /dev/null +++ b/src/signal/src/max_abs.cpp @@ -0,0 +1,84 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/max_abs.h" + +#if defined(XTENSA) +#include +#include +#endif +#if XCHAL_HAVE_HIFI3 +#include +static inline ae_p24x2s MaxAbs16Single(ae_p24x2s max, ae_p24x2s current) { + return AE_MAXABSSP24S(max, current); +} +#elif XCHAL_HAVE_HIFI_MINI || XCHAL_HAVE_HIFI2 || XCHAL_HAVE_HIFI_EP +#include +static inline ae_p24x2s MaxAbs16Single(ae_p24x2s max, ae_p24x2s current) { + current = AE_ABSSP24S(current); + return AE_MAXP24S(max, current); +} +#endif + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflite { +namespace tflm_signal { + +#if XCHAL_HAVE_HIFI_MINI || XCHAL_HAVE_HIFI2 || XCHAL_HAVE_HIFI_EP || \ + XCHAL_HAVE_HIFI3 +int16_t XtensaMaxAbs16(const int16_t* input, int size) { + int i; + ae_p24x2s current_24x2; + // AE_LP16X2F_IU() effectively pre-increments the address in input_16x2 by 4 + // bytes before loading, so we need to initialize it accordingly. + const ae_p16x2s* input_16x2 = (const ae_p16x2s*)(input - 2); + ae_p24x2s max = AE_ZEROP48(); + const int num_iterations = size / 2; + for (i = 0; i < num_iterations; i++) { + // Advancing the pointer by 2 X 16-bits. + AE_LP16X2F_IU(current_24x2, input_16x2, 4); + max = MaxAbs16Single(max, current_24x2); + } + if (size & 1) { // n is odd + // Advancing the pointer by 2 X 16-bits. + current_24x2 = AE_LP16F_I((ae_p16s*)input_16x2, 4); + max = MaxAbs16Single(max, current_24x2); + } + const int max_L = AE_TRUNCA16P24S_L(max); + const int max_H = AE_TRUNCA16P24S_H(max); + return (max_L >= max_H) ? max_L : max_H; +} +#endif + +int16_t MaxAbs16(const int16_t* input, int size) { +#if XCHAL_HAVE_HIFI_MINI || XCHAL_HAVE_HIFI2 || XCHAL_HAVE_HIFI_EP || \ + XCHAL_HAVE_HIFI3 + return XtensaMaxAbs16(input, size); +#else + int16_t max = 0; + for (int i = 0; i < size; i++) { + const int16_t value = input[i]; + if (value > max) { + max = value; + } else if (-value > max) { + max = -value; + } + } + return max; +#endif +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/max_abs.h b/src/signal/src/max_abs.h new file mode 100644 index 0000000..538f796 --- /dev/null +++ b/src/signal/src/max_abs.h @@ -0,0 +1,31 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_MAX_ABS_H_ +#define SIGNAL_SRC_MAX_ABS_H_ + +#include + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflite { +namespace tflm_signal { + +// Returns the maximum absolute value of the `size` elements in `input` +int16_t MaxAbs16(const int16_t* input, int size); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_MAX_ABS_H_ diff --git a/src/signal/src/msb.h b/src/signal/src/msb.h new file mode 100644 index 0000000..2bdcb47 --- /dev/null +++ b/src/signal/src/msb.h @@ -0,0 +1,32 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_MSB_H_ +#define SIGNAL_SRC_MSB_H_ + +#include + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +// Index of the most significant bit +uint32_t MostSignificantBit32(uint32_t x); +uint32_t MostSignificantBit64(uint64_t x); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_MSB_H_ diff --git a/src/signal/src/msb_32.cpp b/src/signal/src/msb_32.cpp new file mode 100644 index 0000000..67f2664 --- /dev/null +++ b/src/signal/src/msb_32.cpp @@ -0,0 +1,48 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/msb.h" + +#if defined(XTENSA) +#include +#endif + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +// TODO(b/291167350): can allow __builtin_clz to be used in more cases here +uint32_t MostSignificantBit32(uint32_t x) { +#if defined(XTENSA) + // XT_NSAU returns the number of left shifts needed to put the MSB in the + // leftmost position. Returns 32 if the argument is 0. + return 32 - XT_NSAU(x); +#elif defined(__GNUC__) + if (x) { + return 32 - __builtin_clz(x); + } + return 32; +#else + uint32_t temp = 0; + while (x) { + x = x >> 1; + ++temp; + } + return temp; +#endif +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/msb_64.cpp b/src/signal/src/msb_64.cpp new file mode 100644 index 0000000..7416438 --- /dev/null +++ b/src/signal/src/msb_64.cpp @@ -0,0 +1,52 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/msb.h" + +#if defined(XTENSA) +#include +#endif + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +uint32_t MostSignificantBit64(uint64_t x) { +#if defined(XTENSA) + // XT_NSAU returns the number of left shifts needed to put the MSB in the + // leftmost position. Returns 32 if the argument is 0. + uint32_t upper = 64 - XT_NSAU((uint32_t)(x >> 32)); + if (upper != 32) { + return upper; + } + // Only if the upper bits are all clear do we want to look at the lower bits. + return 32 - XT_NSAU((uint32_t)x); +#elif defined(__GNUC__) + if (x) { + return 64 - __builtin_clzll(x); + } + return 64; +#else + uint32_t temp = 0; + while (x) { + x = x >> 1; + ++temp; + } + return temp; +#endif +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/overlap_add.cpp b/src/signal/src/overlap_add.cpp new file mode 100644 index 0000000..fe6e664 --- /dev/null +++ b/src/signal/src/overlap_add.cpp @@ -0,0 +1,51 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "signal/src/overlap_add.h" + +#include +#include + +namespace tflm_signal { + +void OverlapAdd(const int16_t* input, int16_t* buffer, int input_size, + int16_t* output, int output_size) { + for (int i = 0; i < input_size; ++i) { + int32_t overlap_added_sample = input[i] + buffer[i]; + if (overlap_added_sample < INT16_MIN) { + buffer[i] = INT16_MIN; + } else if (overlap_added_sample > INT16_MAX) { + buffer[i] = INT16_MAX; + } else { + buffer[i] = (int16_t)overlap_added_sample; + } + } + memcpy(output, buffer, output_size * sizeof(output[0])); + memmove(buffer, &buffer[output_size], + (input_size - output_size) * sizeof(buffer[0])); + memset(&buffer[input_size - output_size], 0, output_size * sizeof(buffer[0])); +} + +void OverlapAdd(const float* input, float* buffer, int input_size, + float* output, int output_size) { + for (int i = 0; i < input_size; ++i) { + buffer[i] += input[i]; + } + memcpy(output, buffer, output_size * sizeof(output[0])); + memmove(buffer, &buffer[output_size], + (input_size - output_size) * sizeof(buffer[0])); + memset(&buffer[input_size - output_size], 0, output_size * sizeof(buffer[0])); +} + +} // namespace tflm_signal diff --git a/src/signal/src/overlap_add.h b/src/signal/src/overlap_add.h new file mode 100644 index 0000000..6189809 --- /dev/null +++ b/src/signal/src/overlap_add.h @@ -0,0 +1,46 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef SIGNAL_SRC_OVERLAP_ADD_H_ +#define SIGNAL_SRC_OVERLAP_ADD_H_ + +#include +#include + +namespace tflm_signal { +// Adds (with saturation) the contents of `input` to the contents of `buffer`, +// both of size `input_size`, then copies the first `output_size` elements of +// `buffer` to `output`, shifts the last `input_size`-`output_size` elements of +// `buffer` to the beginning of `buffer` and fills the trailing `output_size` +// samples in `buffer` with zeros. +// input: {input[0] ... input[input_size-1]} +// buffer: {buffer[0] ... buffer[input_size-1]} +// After invocation: +// output: {saturate(input[0] + buffer[0]), +// ... , +// saturate(input[output_size-1] + buffer[output_size-1])} +// buffer: {saturate(input[output_size] + buffer[output_size]), +// ... +// saturate( input[input_size-output_size-1] +// + buffer[input_size-output_size-1]), +// zeros(output_size)} +void OverlapAdd(const int16_t* input, int16_t* buffer, int input_size, + int16_t* output, int output_size); + +// The same as the int16_t variant above, but without saturation +void OverlapAdd(const float* input, float* buffer, int input_size, + float* output, int output_size); + +} // namespace tflm_signal +#endif // SIGNAL_SRC_OVERLAP_ADD_H_ diff --git a/src/signal/src/pcan_argc_fixed.cpp b/src/signal/src/pcan_argc_fixed.cpp new file mode 100644 index 0000000..2700b28 --- /dev/null +++ b/src/signal/src/pcan_argc_fixed.cpp @@ -0,0 +1,75 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "pcan_argc_fixed.h" + +namespace tflite { +namespace tflm_signal { + +int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) { + if (x <= 2) { + return lut[x]; + } + + const int16_t interval = MostSignificantBit32(x); + lut += 4 * interval - 6; + + const int16_t frac = + ((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) & + 0x3FF; + + int32_t result = ((int32_t)lut[2] * frac) >> 5; + result += (int32_t)((uint32_t)lut[1] << 5); + result *= frac; + result = (result + (1 << 14)) >> 15; + result += lut[0]; + return (int16_t)result; +} + +// Evaluate the piecewise polynomial "shrink" function defined by +// shrink(x) = x^2 / 4 for x < 2, +// shrink(x) = x - 1 for x >= 2. +// The input x has kPcanSnrBits fractional bits, and the output has +// kPcanOutputBits fractional bits. +uint32_t PcanShrink(const uint32_t x) { + TFLITE_DCHECK(kPcanSnrBits >= kPcanOutputBits); + if (x < (2 << kPcanSnrBits)) { + // Compute x^2 / 4. + return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits); + } else { + // Compute x - 1. + return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits); + } +} + +void ApplyPcanAutoGainControlFixed(const int16_t* gain_lut, int32_t snr_shift, + const uint32_t* noise_estimate, + uint32_t* filterbank_output, + int num_channels) { + int i; + for (i = 0; i < num_channels; ++i) { + // The gain has gain_bits fractional bits, and filterbank_output[i] has + // -input_correction_bits fractional bits. The product is shifted so that + // the resulting snr has kPcanSnrBits fractional bits. + const uint32_t gain = WideDynamicFunction(noise_estimate[i], gain_lut); + const uint32_t snr = ((uint64_t)filterbank_output[i] * gain) >> snr_shift; + // Result has kPcanOutputBits fractional bits. + // NOTE: This assumes filterbank_output_scale = 1 << kPcanOutputBits. + filterbank_output[i] = PcanShrink(snr); + } +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/pcan_argc_fixed.h b/src/signal/src/pcan_argc_fixed.h new file mode 100644 index 0000000..36eaf3d --- /dev/null +++ b/src/signal/src/pcan_argc_fixed.h @@ -0,0 +1,41 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_MICRO_KERNELS__SRC_PCAN_AGC_FIXED_H +#define SIGNAL_MICRO_KERNELS__SRC_PCAN_AGC_FIXED_H +#include + +#include "msb.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" + +namespace tflite { +namespace tflm_signal { + +#define kPcanSnrBits 12 +#define kPcanOutputBits 6 + +int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut); + +uint32_t PcanShrink(const uint32_t x); + +void ApplyPcanAutoGainControlFixed(const int16_t* gain_lut, int32_t snr_shift, + const uint32_t* noise_estimate, + uint32_t* filterbank_output, + int num_channels); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_MICRO_KERNELS__PCAN_AGC_FIXED_H diff --git a/src/signal/src/rfft.h b/src/signal/src/rfft.h new file mode 100644 index 0000000..3305af6 --- /dev/null +++ b/src/signal/src/rfft.h @@ -0,0 +1,85 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_RFFT_H_ +#define SIGNAL_SRC_RFFT_H_ + +#include +#include + +#include "signal/src/complex.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +// RFFT (Real Fast Fourier Transform) +// FFT for real valued time domain inputs. + +// 16-bit Integer input/output + +// Returns the size of the memory that an RFFT of `fft_length` needs +size_t RfftInt16GetNeededMemory(int32_t fft_length); + +// Initialize the state of an RFFT of `fft_length` +// `state` points to an opaque state of size `state_size`, which +// must be greater or equal to the value returned by +// RfftGetNeededMemory(fft_length). +// Return the value of `state` on success or nullptr on failure +void* RfftInt16Init(int32_t fft_length, void* state, size_t state_size); + +// Applies RFFT to `input` and writes the result to `output` +// * `input` must be of size `fft_length` elements (see RfftInit) +// * `output` must be of size (`fft_length` * 2) + 1 elements +void RfftInt16Apply(void* state, const int16_t* input, + Complex* output); + +// 32-bit Integer input/output + +// Returns the size of the memory that an RFFT of `fft_length` needs +size_t RfftInt32GetNeededMemory(int32_t fft_length); + +// Initialize the state of an RFFT of `fft_length` +// `state` points to an opaque state of size `state_size`, which +// must be greater or equal to the value returned by +// RfftGetNeededMemory(fft_length). +// Return the value of `state` on success or nullptr on failure +void* RfftInt32Init(int32_t fft_length, void* state, size_t state_size); + +// Applies RFFT to `input` and writes the result to `output` +// * `input` must be of size `fft_length` elements (see RfftInit) +// * `output` must be of size (`fft_length` * 2) + 1 elements +void RfftInt32Apply(void* state, const int32_t* input, + Complex* output); + +// Floating point input/output + +// Returns the size of the memory that an RFFT of `fft_length` needs +size_t RfftFloatGetNeededMemory(int32_t fft_length); + +// Initialize the state of an RFFT of `fft_length` +// `state` points to an opaque state of size `state_size`, which +// must be greater or equal to the value returned by +// RfftGetNeededMemory(fft_length). +// Return the value of `state` on success or nullptr on failure +void* RfftFloatInit(int32_t fft_length, void* state, size_t state_size); + +// Applies RFFT to `input` and writes the result to `output` +// * `input` must be of size `fft_length` elements (see RfftInit) +// * `output` must be of size (`fft_length` * 2) + 1 elements +void RfftFloatApply(void* state, const float* input, Complex* output); + +} // namespace tflm_signal + +#endif // SIGNAL_SRC_RFFT_H_ diff --git a/src/signal/src/rfft_float.cpp b/src/signal/src/rfft_float.cpp new file mode 100644 index 0000000..e42c757 --- /dev/null +++ b/src/signal/src/rfft_float.cpp @@ -0,0 +1,43 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "signal/src/complex.h" +#include "signal/src/kiss_fft_wrappers/kiss_fft_float.h" +#include "signal/src/rfft.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +size_t RfftFloatGetNeededMemory(int32_t fft_length) { + size_t state_size = 0; + kiss_fft_float::kiss_fftr_alloc(fft_length, 0, nullptr, &state_size); + return state_size; +} + +void* RfftFloatInit(int32_t fft_length, void* state, size_t state_size) { + return kiss_fft_float::kiss_fftr_alloc(fft_length, 0, state, &state_size); +} + +void RfftFloatApply(void* state, const float* input, Complex* output) { + kiss_fft_float::kiss_fftr( + static_cast(state), + reinterpret_cast(input), + reinterpret_cast(output)); +} + +} // namespace tflm_signal diff --git a/src/signal/src/rfft_int16.cpp b/src/signal/src/rfft_int16.cpp new file mode 100644 index 0000000..2d26fa1 --- /dev/null +++ b/src/signal/src/rfft_int16.cpp @@ -0,0 +1,44 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "signal/src/complex.h" +#include "signal/src/kiss_fft_wrappers/kiss_fft_int16.h" +#include "signal/src/rfft.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +size_t RfftInt16GetNeededMemory(int32_t fft_length) { + size_t state_size = 0; + kiss_fft_fixed16::kiss_fftr_alloc(fft_length, 0, nullptr, &state_size); + return state_size; +} + +void* RfftInt16Init(int32_t fft_length, void* state, size_t state_size) { + return kiss_fft_fixed16::kiss_fftr_alloc(fft_length, 0, state, &state_size); +} + +void RfftInt16Apply(void* state, const int16_t* input, + Complex* output) { + kiss_fft_fixed16::kiss_fftr( + static_cast(state), + reinterpret_cast(input), + reinterpret_cast(output)); +} + +} // namespace tflm_signal diff --git a/src/signal/src/rfft_int32.cpp b/src/signal/src/rfft_int32.cpp new file mode 100644 index 0000000..572494e --- /dev/null +++ b/src/signal/src/rfft_int32.cpp @@ -0,0 +1,44 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "signal/src/complex.h" +#include "signal/src/kiss_fft_wrappers/kiss_fft_int32.h" +#include "signal/src/rfft.h" + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +size_t RfftInt32GetNeededMemory(int32_t fft_length) { + size_t state_size = 0; + kiss_fft_fixed32::kiss_fftr_alloc(fft_length, 0, nullptr, &state_size); + return state_size; +} + +void* RfftInt32Init(int32_t fft_length, void* state, size_t state_size) { + return kiss_fft_fixed32::kiss_fftr_alloc(fft_length, 0, state, &state_size); +} + +void RfftInt32Apply(void* state, const int32_t* input, + Complex* output) { + kiss_fft_fixed32::kiss_fftr( + static_cast(state), + reinterpret_cast(input), + reinterpret_cast(output)); +} + +} // namespace tflm_signal diff --git a/src/signal/src/square_root.h b/src/signal/src/square_root.h new file mode 100644 index 0000000..b32855c --- /dev/null +++ b/src/signal/src/square_root.h @@ -0,0 +1,32 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_SQUARE_ROOT_H_ +#define SIGNAL_SRC_SQUARE_ROOT_H_ + +#include + +namespace tflite { +namespace tflm_signal { +// TODO(b/286250473): remove namespace once de-duped libraries above + +// Square root +uint16_t Sqrt32(uint32_t num); +uint32_t Sqrt64(uint64_t num); + +} // namespace tflm_signal +} // namespace tflite + +#endif // SIGNAL_SRC_SQUARE_ROOT_H_ diff --git a/src/signal/src/square_root_32.cpp b/src/signal/src/square_root_32.cpp new file mode 100644 index 0000000..3ca11ef --- /dev/null +++ b/src/signal/src/square_root_32.cpp @@ -0,0 +1,46 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/msb.h" +#include "signal/src/square_root.h" + +namespace tflite { +namespace tflm_signal { + +uint16_t Sqrt32(uint32_t num) { + if (num == 0) { + return 0; + }; + uint32_t res = 0; + int max_bit_number = 32 - MostSignificantBit32(num); + max_bit_number |= 1; + uint32_t bit = 1u << (31 - max_bit_number); + int iterations = (31 - max_bit_number) / 2 + 1; + while (iterations--) { + if (num >= res + bit) { + num -= res + bit; + res = (res >> 1U) + bit; + } else { + res >>= 1U; + } + bit >>= 2U; + } + // Do rounding - if we have the bits. + if (num > res && res != 0xFFFF) ++res; + return res; +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/square_root_64.cpp b/src/signal/src/square_root_64.cpp new file mode 100644 index 0000000..54e8439 --- /dev/null +++ b/src/signal/src/square_root_64.cpp @@ -0,0 +1,49 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/msb.h" +#include "signal/src/square_root.h" + +namespace tflite { +namespace tflm_signal { + +uint32_t Sqrt64(uint64_t num) { + // Take a shortcut and just use 32 bit operations if the upper word is all + // clear. This will cause a slight off by one issue for numbers close to 2^32, + // but it probably isn't going to matter (and gives us a big performance win). + if ((num >> 32) == 0) { + return Sqrt32(static_cast(num)); + } + uint64_t res = 0; + int max_bit_number = 64 - MostSignificantBit64(num); + max_bit_number |= 1; + uint64_t bit = UINT64_C(1) << (63 - max_bit_number); + int iterations = (63 - max_bit_number) / 2 + 1; + while (iterations--) { + if (num >= res + bit) { + num -= res + bit; + res = (res >> 1U) + bit; + } else { + res >>= 1U; + } + bit >>= 2U; + } + // Do rounding - if we have the bits. + if (num > res && res != 0xFFFFFFFFLL) ++res; + return res; +} + +} // namespace tflm_signal +} // namespace tflite diff --git a/src/signal/src/window.cpp b/src/signal/src/window.cpp new file mode 100644 index 0000000..c61282b --- /dev/null +++ b/src/signal/src/window.cpp @@ -0,0 +1,36 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "signal/src/window.h" + +#include + +// TODO(b/286250473): remove namespace once de-duped libraries +namespace tflm_signal { + +void ApplyWindow(const int16_t* input, const int16_t* window, int size, + int shift, int16_t* output) { + for (int i = 0; i < size; ++i) { + int32_t raw = (static_cast(input[i]) * window[i]) >> shift; + if (raw < INT16_MIN) { + output[i] = INT16_MIN; + } else if (raw > INT16_MAX) { + output[i] = INT16_MAX; + } else { + output[i] = static_cast(raw); + } + } +} +} // namespace tflm_signal diff --git a/src/signal/src/window.h b/src/signal/src/window.h new file mode 100644 index 0000000..88e8d11 --- /dev/null +++ b/src/signal/src/window.h @@ -0,0 +1,31 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef SIGNAL_SRC_WINDOW_H_ +#define SIGNAL_SRC_WINDOW_H_ + +#include + +namespace tflm_signal { + +// Applies a window function to an input signal +// +// * `input` and `window` must be both of size `size` elements and are +// multiplied element-by element. +// * `shift` is a right shift to apply before writing the result to `output`. +void ApplyWindow(const int16_t* input, const int16_t* window, int size, + int shift, int16_t* output); +} // namespace tflm_signal +#endif // SIGNAL_SRC_WINDOW_H_ diff --git a/src/tensorflow/lite/core/api/error_reporter.cpp b/src/tensorflow/compiler/mlir/lite/core/api/error_reporter.cpp similarity index 94% rename from src/tensorflow/lite/core/api/error_reporter.cpp rename to src/tensorflow/compiler/mlir/lite/core/api/error_reporter.cpp index 7070eaa..96f7561 100644 --- a/src/tensorflow/lite/core/api/error_reporter.cpp +++ b/src/tensorflow/compiler/mlir/lite/core/api/error_reporter.cpp @@ -12,7 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/core/api/error_reporter.h" +#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h" + #include namespace tflite { diff --git a/src/tensorflow/compiler/mlir/lite/core/api/error_reporter.h b/src/tensorflow/compiler/mlir/lite/core/api/error_reporter.h new file mode 100644 index 0000000..79c9fc9 --- /dev/null +++ b/src/tensorflow/compiler/mlir/lite/core/api/error_reporter.h @@ -0,0 +1,72 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_CORE_API_ERROR_REPORTER_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_CORE_API_ERROR_REPORTER_H_ + +#include + +namespace tflite { + +/// A functor that reports error to supporting system. Invoked similar to +/// printf. +/// +/// Usage: +/// ErrorReporter foo; +/// foo.Report("test %d", 5); +/// or +/// va_list args; +/// foo.Report("test %d", args); // where args is va_list +/// +/// Subclass ErrorReporter to provide another reporting destination. +/// For example, if you have a GUI program, you might redirect to a buffer +/// that drives a GUI error log box. +class ErrorReporter { + public: + virtual ~ErrorReporter() = default; + /// Converts `args` to character equivalents according to `format` string, + /// constructs the error string and report it. + /// Returns number of characters written or zero on success, and negative + /// number on error. + virtual int Report(const char* format, va_list args) = 0; + + /// Converts arguments to character equivalents according to `format` string, + /// constructs the error string and report it. + /// Returns number of characters written or zero on success, and negative + /// number on error. + int Report(const char* format, ...); + + /// Equivalent to `Report` above. The additional `void*` parameter is unused. + /// This method is for compatibility with macros that takes `TfLiteContext`, + /// like TF_LITE_ENSURE and related macros. + int ReportError(void*, const char* format, ...); +}; + +} // namespace tflite + +// You should not make bare calls to the error reporter, instead use the +// TF_LITE_REPORT_ERROR macro, since this allows message strings to be +// stripped when the binary size has to be optimized. If you are looking to +// reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and +// every call will be stubbed out, taking no memory. +#ifndef TF_LITE_STRIP_ERROR_STRINGS +#define TF_LITE_REPORT_ERROR(reporter, ...) \ + do { \ + static_cast<::tflite::ErrorReporter*>(reporter)->Report(__VA_ARGS__); \ + } while (false) +#else // TF_LITE_STRIP_ERROR_STRINGS +#define TF_LITE_REPORT_ERROR(reporter, ...) +#endif // TF_LITE_STRIP_ERROR_STRINGS + +#endif // TENSORFLOW_COMPILER_MLIR_LITE_CORE_API_ERROR_REPORTER_H_ diff --git a/src/tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h b/src/tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h new file mode 100644 index 0000000..1327162 --- /dev/null +++ b/src/tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h @@ -0,0 +1,670 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/builtin_op_data.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_BUILTIN_OP_DATA_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_BUILTIN_OP_DATA_H_ + +#include // IWYU pragma: keep +#include +#include + +#include "tensorflow/compiler/mlir/lite/core/c/tflite_types.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// TfLiteReshapeParams can't have dynamic data so we fix the maximum possible +// number of dimensions. +#define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8 +#define TFLITE_STABLEHLO_SCATTER_PARAMS_MAX_DIMENSION_COUNT 8 +#define TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT 8 +#define TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT 8 +#define TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT 8 +#define TFLITE_STABLEHLO_CASE_PARAMS_MAX_BRANCHES_COUNT 20 + +// TODO(aselle): Consider using "if this then that" for testing. + +// Useful placeholder to put in otherwise empty structs to avoid size warnings. +typedef struct { + char dummy; +} EmptyStructPlaceholder; + +// IMPORTANT: All new members of structs must be added at the end to ensure +// backwards compatibility. + +// Possible padding types (for convolutions) +typedef enum { + kTfLitePaddingUnknown = 0, + kTfLitePaddingSame, + kTfLitePaddingValid, +} TfLitePadding; + +typedef enum { + kTfLiteMirrorPaddingUnknown = 0, + kTfLiteMirrorPaddingReflect, + kTfLiteMirrorPaddingSymmetric, +} TfLiteMirrorPaddingMode; + +// TODO(b/130259536): We should move this out of builtin_op_data. +typedef struct { + int width; + int height; + int width_offset; + int height_offset; +} TfLitePaddingValues; + +typedef struct { + TfLiteMirrorPaddingMode mode; +} TfLiteMirrorPaddingParams; + +// Possible fused activation functions. +typedef enum { + kTfLiteActNone = 0, + kTfLiteActRelu, + kTfLiteActReluN1To1, // min(max(-1, x), 1) + kTfLiteActRelu6, // min(max(0, x), 6) + kTfLiteActTanh, + kTfLiteActSignBit, + kTfLiteActSigmoid, +} TfLiteFusedActivation; + +typedef struct { + // Parameters for CONV_2D version 1. + TfLitePadding padding; + int stride_width; + int stride_height; + TfLiteFusedActivation activation; + + // Parameters for CONV_2D version 2. + // Note: Version 2 supports dilation values not equal to 1. + int dilation_width_factor; + int dilation_height_factor; + + // Parameters for CONV_2D version 7 or above. + // Used to determine the default value for the quantized bias. + TfLiteType quantized_bias_type; +} TfLiteConvParams; + +typedef struct { + TfLitePadding padding; + int stride_width; + int stride_height; + int stride_depth; + int dilation_width_factor; + int dilation_height_factor; + int dilation_depth_factor; + TfLiteFusedActivation activation; +} TfLiteConv3DParams; + +typedef TfLiteConv3DParams TfLiteConv3DTransposeParams; + +typedef struct { + TfLitePadding padding; + int stride_width; + int stride_height; + int filter_width; + int filter_height; + TfLiteFusedActivation activation; + struct { + TfLitePaddingValues padding; + } computed; +} TfLitePoolParams; + +typedef struct { + // Parameters for DepthwiseConv version 1 or above. + TfLitePadding padding; + int stride_width; + int stride_height; + // `depth_multiplier` is redundant. It's used by CPU kernels in + // TensorFlow 2.0 or below, but ignored in versions above. + // + // The information can be deduced from the shape of input and the shape of + // weights. Since the TFLiteConverter toolchain doesn't support partially + // specified shapes, relying on `depth_multiplier` stops us from supporting + // graphs with dynamic shape tensors. + // + // Note: Some of the delegates (e.g. NNAPI, GPU) are still relying on this + // field. + int depth_multiplier; + TfLiteFusedActivation activation; + // Parameters for DepthwiseConv version 2 or above. + int dilation_width_factor; + int dilation_height_factor; +} TfLiteDepthwiseConvParams; + +typedef struct { + int rank; + TfLiteFusedActivation activation; + + // Parameter for SVDF version 4. + bool asymmetric_quantize_inputs; +} TfLiteSVDFParams; + +typedef struct { + TfLiteFusedActivation activation; + + // Parameter for RNN version 3. + bool asymmetric_quantize_inputs; +} TfLiteRNNParams; + +typedef struct { + bool time_major; + TfLiteFusedActivation activation; + + // Parameter for Sequence RNN version 3. + bool asymmetric_quantize_inputs; +} TfLiteSequenceRNNParams; + +typedef struct { + bool time_major; + TfLiteFusedActivation activation; + bool merge_outputs; + + // Parameter for Bidirectional RNN version 3. + bool asymmetric_quantize_inputs; +} TfLiteBidirectionalSequenceRNNParams; + +typedef enum { + kTfLiteFullyConnectedWeightsFormatDefault = 0, + kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1, +} TfLiteFullyConnectedWeightsFormat; + +typedef struct { + // Parameters for FullyConnected version 1 or above. + TfLiteFusedActivation activation; + + // Parameters for FullyConnected version 2 or above. + TfLiteFullyConnectedWeightsFormat weights_format; + + // Parameters for FullyConnected version 5 or above. + // If set to true, then the number of dimensions in the input and the output + // tensors are the same. Furthermore, all but the last dimension of the input + // and output shapes will be equal. + bool keep_num_dims; + + // Parameters for FullyConnected version 7 or above. + // If set to true and the weights are quantized, then non constant inputs + // are quantized at evaluation time with asymmetric quantization. + bool asymmetric_quantize_inputs; + + // Parameters for FullyConnected version 10 or above. + // Used to determine the default value for the quantized bias. + TfLiteType quantized_bias_type; +} TfLiteFullyConnectedParams; + +typedef enum { + kTfLiteLshProjectionUnknown = 0, + kTfLiteLshProjectionSparse = 1, + kTfLiteLshProjectionDense = 2, +} TfLiteLSHProjectionType; + +typedef struct { + TfLiteLSHProjectionType type; +} TfLiteLSHProjectionParams; + +typedef struct { + float beta; +} TfLiteSoftmaxParams; + +typedef struct { + int axis; + TfLiteFusedActivation activation; +} TfLiteConcatenationParams; + +typedef struct { + TfLiteFusedActivation activation; + // Parameter added for the version 4. + bool pot_scale_int16; +} TfLiteAddParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteSpaceToBatchNDParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteBatchToSpaceNDParams; + +typedef struct { + bool adj_x; + bool adj_y; + // Parameters for BatchMatMul version 4 or above. + // If set to true and the weights are quantized, then non constant inputs + // are quantized at evaluation time with asymmetric quantization. + bool asymmetric_quantize_inputs; +} TfLiteBatchMatMulParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteMulParams; + +typedef struct { + TfLiteFusedActivation activation; + // Parameter added for the version 5. + bool pot_scale_int16; +} TfLiteSubParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteDivParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteL2NormParams; + +typedef struct { + int radius; + float bias; + float alpha; + float beta; +} TfLiteLocalResponseNormParams; + +typedef enum { + kTfLiteLSTMFullKernel = 0, + kTfLiteLSTMBasicKernel +} TfLiteLSTMKernelType; + +typedef struct { + // Parameters for LSTM version 1. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // Parameters for LSTM version 2. + // kTfLiteLSTMBasicKernel is only supported in version 2 or above. + TfLiteLSTMKernelType kernel_type; + + // Parameters for LSTM version 4. + bool asymmetric_quantize_inputs; +} TfLiteLSTMParams; + +typedef struct { + // Parameters needed for the underlying LSTM. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // If set to true then the first dimension is time, otherwise batch. + bool time_major; + + // Parameter for unidirectional sequence RNN version 3. + bool asymmetric_quantize_inputs; + + // Parameter for unidirectional sequence RNN version 4. + bool diagonal_recurrent_tensors; +} TfLiteUnidirectionalSequenceLSTMParams; + +typedef struct { + // Parameters supported by version 1: + // Parameters inherited for the LSTM kernel. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // If true, store the outputs of both directions in the first output. + bool merge_outputs; + + // Parameters supported by version 2: + // If set to true then the first dimension is time, otherwise batch. + bool time_major; + + // Parameters supported by version 3: + // If set to true, then hybrid ops use asymmetric quantization for inputs. + bool asymmetric_quantize_inputs; +} TfLiteBidirectionalSequenceLSTMParams; + +typedef struct { + bool align_corners; + // half_pixel_centers assumes pixels are of half the actual dimensions, and + // yields more accurate resizes. Corresponds to the same argument for the + // original TensorFlow op in TF2.0. + bool half_pixel_centers; +} TfLiteResizeBilinearParams; + +typedef struct { + bool align_corners; + bool half_pixel_centers; +} TfLiteResizeNearestNeighborParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLitePadParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLitePadV2Params; + +typedef struct { + // These fields are only used in old models for backward compatibility. + // In the current implementation, we use the 2nd input of the op as the shape, + // and these fields are unused. + int32_t shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT]; + int num_dimensions; +} TfLiteReshapeParams; + +typedef struct { + int ngram_size; + int max_skip_size; + bool include_all_ngrams; +} TfLiteSkipGramParams; + +typedef struct { + int block_size; +} TfLiteSpaceToDepthParams; + +typedef struct { + int block_size; +} TfLiteDepthToSpaceParams; + +typedef struct { + TfLiteType in_data_type; + TfLiteType out_data_type; +} TfLiteCastParams; + +typedef enum { + kTfLiteCombinerTypeSum = 0, + kTfLiteCombinerTypeMean = 1, + kTfLiteCombinerTypeSqrtn = 2, +} TfLiteCombinerType; + +typedef struct { + TfLiteCombinerType combiner; +} TfLiteEmbeddingLookupSparseParams; + +typedef struct { + int axis; + int batch_dims; +} TfLiteGatherParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteTransposeParams; + +typedef struct { + bool keep_dims; +} TfLiteReducerParams; + +typedef struct { + int num_splits; +} TfLiteSplitParams; + +typedef struct { + int num_splits; +} TfLiteSplitVParams; + +typedef struct { + // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. + // For now we will fix the maximum possible number of dimensions. + int32_t squeeze_dims[8]; + int num_squeeze_dims; +} TfLiteSqueezeParams; + +typedef struct { + int begin_mask; + int end_mask; + int ellipsis_mask; + int new_axis_mask; + int shrink_axis_mask; + + // Parameters supported by version 8: + // If true, then the end tensor is an offset of the begin tensor. + bool offset; +} TfLiteStridedSliceParams; + +typedef struct { + TfLiteType output_type; +} TfLiteArgMaxParams; + +typedef struct { + TfLiteType output_type; +} TfLiteArgMinParams; + +typedef struct { + // Parameters supported by version 1: + TfLitePadding padding; + int stride_width; + int stride_height; + + // Parameters supported by version 4: + TfLiteFusedActivation activation; + + // Parameters for TransposeConv version 5 or above. + // Used to determine the default value for the quantized bias. + TfLiteType quantized_bias_type; +} TfLiteTransposeConvParams; + +typedef struct { + bool validate_indices; +} TfLiteSparseToDenseParams; + +typedef struct { + TfLiteType out_type; +} TfLiteShapeParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteRankParams; + +typedef struct { + // Parameters supported by version 1: + float min; + float max; + int num_bits; + + // Parameters supported by version 2: + bool narrow_range; +} TfLiteFakeQuantParams; + +typedef struct { + int values_count; + int axis; +} TfLitePackParams; + +typedef struct { + int axis; +} TfLiteOneHotParams; + +typedef struct { + int num; + int axis; +} TfLiteUnpackParams; + +typedef struct { + float alpha; +} TfLiteLeakyReluParams; + +typedef struct { + TfLiteType index_out_type; +} TfLiteUniqueParams; + +typedef struct { + int seq_dim; + int batch_dim; +} TfLiteReverseSequenceParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteMatrixDiagParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteMatrixSetDiagParams; + +typedef struct { + int then_subgraph_index; + int else_subgraph_index; +} TfLiteIfParams; + +typedef struct { + int cond_subgraph_index; + int body_subgraph_index; +} TfLiteWhileParams; + +typedef struct { + bool exclusive; + bool reverse; +} TfLiteCumsumParams; + +typedef struct { + int init_subgraph_index; +} TfLiteCallOnceParams; + +typedef struct { + int table_id; + TfLiteType key_dtype; + TfLiteType value_dtype; +} TfLiteHashtableParams; + +typedef struct { + const char* container; + const char* shared_name; +} TfLiteVarHandleParams; + +typedef struct { + int seed; + int seed2; +} TfLiteRandomParams; + +typedef struct { + int num_boundaries; + // This points to the memory stored in the model (flatbuffer), + // and is not owned. + const float* boundaries; +} TfLiteBucketizeParams; + +typedef struct { + bool approximate; +} TfLiteGeluParams; + +typedef struct { + int64_t dimension; +} TfLiteStablehloConcatenateParams; + +typedef struct { + // See the stablehlo spec for the explanation of the attributes: + // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#scatter + bool indices_are_sorted; + int64_t + update_window_dims[TFLITE_STABLEHLO_SCATTER_PARAMS_MAX_DIMENSION_COUNT]; + int num_update_window_dims; + int64_t + inserted_window_dims[TFLITE_STABLEHLO_SCATTER_PARAMS_MAX_DIMENSION_COUNT]; + int num_inserted_window_dims; + int64_t scatter_dims_to_operand_dims + [TFLITE_STABLEHLO_SCATTER_PARAMS_MAX_DIMENSION_COUNT]; + int num_scatter_dims_to_operand_dims; + int64_t index_vector_dim; + bool unique_indices; + int update_computation_subgraph_index; +} TfLiteStablehloScatterParams; + +typedef enum { + kTfLiteRngAlgorithmUnknown = 0, + // An algorithm auto-selected by the system according to device type. + kTfLiteRngAlgorithmDefault, + // The Philox algorithm, as described in paper + // ['Parallel Random Numbers: As Easy as 1, 2, 3'] + // (https://www.thesalmons.org/john/random123/papers/random123sc11.pdf) + kTfLiteRngAlgorithmPhilox, + // The ThreeFry algorithm, as described in paper + // ['Parallel Random Numbers: As Easy as 1, 2, 3'] + // (https://www.thesalmons.org/john/random123/papers/random123sc11.pdf) + kTfLiteRngAlgorithmThreefry, +} TfLiteRngAlgorithm; + +typedef struct { + TfLiteRngAlgorithm algorithm; +} TfLiteStablehloRngBitGeneratorParams; + +typedef struct { + // See the stablehlo spec for the explanation of the attributes: + // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#gather + int64_t offset_dims[TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT]; + int num_offset_dims; + int64_t + collapsed_slice_dims[TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT]; + int num_collapsed_slice_dims; + int64_t start_index_map[TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT]; + int num_start_index_map; + int64_t index_vector_dim; + int64_t slice_sizes[TFLITE_STABLEHLO_GATHER_PARAMS_MAX_DIMENSION_COUNT]; + int num_slice_sizes; + bool indices_are_sorted; +} TfLiteStablehloGatherParams; + +typedef struct { + // See the stablehlo spec for the explanation of the attributes: + // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reduce_window + int64_t window_dimensions + [TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT]; + int64_t + window_strides[TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT]; + int64_t + base_dilations[TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT]; + int64_t window_dilations + [TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT]; + int64_t + padding[2 * TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT]; + int body_subgraph_index; +} TfLiteStablehloReduceWindowParams; + +enum TfLiteReduceWindowFunction { + TfLiteReduceWindowFunctionUnsupported, + TfLiteReduceWindowFunctionAdd, + TfLiteReduceWindowFunctionMul, + TfLiteReduceWindowFunctionMin, + TfLiteReduceWindowFunctionMax, + TfLiteReduceWindowFunctionAll, + TfLiteReduceWindowFunctionAny +}; + +typedef struct { + enum TfLiteReduceWindowFunction reduce_function; +} TfLiteReduceWindowParams; + +typedef struct { + // See the stablehlo spec for the explanation of the attributes: + // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#pad + int64_t edge_padding_low[TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT]; + int64_t edge_padding_high[TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT]; + int64_t interior_padding[TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT]; +} TfLiteStablehloPadParams; + +typedef struct { + const char* name; + int32_t subgraph_index; + int32_t version; + const uint8_t* attributes; + size_t attributes_size; +} TfLiteStablehloCompositeParams; + +typedef struct { + // See the stablehlo spec for the explanation of the attributes: + // https://github.com/openxla/stablehlo/blob/main/docs/spec.md#case + int32_t + branch_subgraph_indices[TFLITE_STABLEHLO_CASE_PARAMS_MAX_BRANCHES_COUNT]; + uint32_t num_branches; +} TfLiteStablehloCaseParams; + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_BUILTIN_OP_DATA_H_ diff --git a/src/tensorflow/compiler/mlir/lite/core/c/tflite_types.h b/src/tensorflow/compiler/mlir/lite/core/c/tflite_types.h new file mode 100644 index 0000000..068facb --- /dev/null +++ b/src/tensorflow/compiler/mlir/lite/core/c/tflite_types.h @@ -0,0 +1,90 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// This file hosts data structures that are needed both for LiteRT and +// Compiler. + +// WARNING: Users of TensorFlow Lite should not include this file directly, but +// should instead include "third_party/tensorflow/lite/c/c_api_types.h". +// Only the TensorFlow Lite implementation itself should include this file +// directly. + +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/// \note Users of TensorFlow Lite should use +/// \code +/// #include "tensorflow/lite/c/c_api_types.h" +/// \endcode +/// to access the APIs documented on this page. +// NOLINTEND(whitespace/line_length) +// clang-format on + +// IWYU pragma: private, include "third_party/tensorflow/lite/c/c_api_types.h" + +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_TFLITE_TYPES_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_TFLITE_TYPES_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/// Types supported by tensor +// LINT.IfChange +typedef enum { + kTfLiteNoType = 0, + kTfLiteFloat32 = 1, + kTfLiteInt32 = 2, + kTfLiteUInt8 = 3, + kTfLiteInt64 = 4, + kTfLiteString = 5, + kTfLiteBool = 6, + kTfLiteInt16 = 7, + kTfLiteComplex64 = 8, + kTfLiteInt8 = 9, + kTfLiteFloat16 = 10, + kTfLiteFloat64 = 11, + kTfLiteComplex128 = 12, + kTfLiteUInt64 = 13, + kTfLiteResource = 14, + kTfLiteVariant = 15, + kTfLiteUInt32 = 16, + kTfLiteUInt16 = 17, + kTfLiteInt4 = 18, + kTfLiteBFloat16 = 19, +} TfLiteType; +// LINT.ThenChange(//tensorflow/lite/profiling/proto/model_runtime_info.proto:EdgeDataType) + +/// Legacy. Will be deprecated in favor of `TfLiteAffineQuantization`. +/// If per-layer quantization is specified this field will still be populated in +/// addition to `TfLiteAffineQuantization`. +/// Parameters for asymmetric quantization. Quantized values can be converted +/// back to float using: `real_value = scale * (quantized_value - zero_point)` +typedef struct TfLiteQuantizationParams { + float scale; + int32_t zero_point; +} TfLiteQuantizationParams; + +/// Storage format of each dimension in a sparse tensor. +typedef enum TfLiteDimensionType { + kTfLiteDimDense = 0, + kTfLiteDimSparseCSR, +} TfLiteDimensionType; + +#ifdef __cplusplus +} // extern C +#endif + +#endif // TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_TFLITE_TYPES_H_ diff --git a/src/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h b/src/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h new file mode 100644 index 0000000..17de9fb --- /dev/null +++ b/src/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h @@ -0,0 +1,21 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_ + +#include "tensorflow/lite/kernels/internal/compatibility.h" + +#endif // TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_ diff --git a/src/tensorflow/compiler/mlir/lite/schema/schema_generated.h b/src/tensorflow/compiler/mlir/lite/schema/schema_generated.h new file mode 100644 index 0000000..87fed47 --- /dev/null +++ b/src/tensorflow/compiler/mlir/lite/schema/schema_generated.h @@ -0,0 +1,22 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_ + +// This file should only be used by the make build to redirect schema_utils.cc +// usage of the generated schema to the proper location. +#include "tensorflow/lite/schema/schema_generated.h" + +#endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ diff --git a/src/tensorflow/lite/schema/schema_utils.cpp b/src/tensorflow/compiler/mlir/lite/schema/schema_utils.cpp similarity index 89% rename from src/tensorflow/lite/schema/schema_utils.cpp rename to src/tensorflow/compiler/mlir/lite/schema/schema_utils.cpp index fc19290..a173380 100644 --- a/src/tensorflow/lite/schema/schema_utils.cpp +++ b/src/tensorflow/compiler/mlir/lite/schema/schema_utils.cpp @@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/schema/schema_utils.h" +#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h" #include -#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h" namespace tflite { // The following GetBuiltinCode methods are the utility methods for reading -// builtin operatore code, ensuring compatibility issues between v3 and v3a +// builtin operator code, ensuring compatibility issues between v3 and v3a // schema. Always the maximum value of the two fields always will be the correct // value as follows: // @@ -29,7 +29,7 @@ namespace tflite { // // The `builtin_code` field is not available in the v3 models. Flatbuffer // library will feed zero value, which is the default value in the v3a schema. -// The actual builtin operatore code value will exist in the +// The actual builtin operator code value will exist in the // `deprecated_builtin_code` field. At the same time, it implies that // `deprecated_builtin_code` >= `builtin_code` and the maximum value of the two // fields will be same with `deprecated_builtin_code'. diff --git a/src/tensorflow/compiler/mlir/lite/schema/schema_utils.h b/src/tensorflow/compiler/mlir/lite/schema/schema_utils.h new file mode 100644 index 0000000..90a3ae0 --- /dev/null +++ b/src/tensorflow/compiler/mlir/lite/schema/schema_utils.h @@ -0,0 +1,33 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_ + +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h" + +namespace tflite { + +// The following methods are introduced to resolve op builtin code shortage +// problem. The new builtin operator will be assigned to the extended builtin +// code field in the flatbuffer schema. Those methods helps to hide builtin code +// details. +BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); + +BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code); + +} // namespace tflite + +#endif // TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_ diff --git a/src/tensorflow/core/public/version.h b/src/tensorflow/core/public/version.h deleted file mode 100644 index 3903025..0000000 --- a/src/tensorflow/core/public/version.h +++ /dev/null @@ -1,139 +0,0 @@ -/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_CORE_PUBLIC_VERSION_H_ -#define TENSORFLOW_CORE_PUBLIC_VERSION_H_ - -// TensorFlow uses semantic versioning, see http://semver.org/. - -// Also update tensorflow/tensorflow.bzl and -// tensorflow/tools/pip_package/setup.py -#define TF_MAJOR_VERSION 2 -#define TF_MINOR_VERSION 5 -#define TF_PATCH_VERSION 0 - -// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", -// "-beta", "-rc", "-rc.1") -#define TF_VERSION_SUFFIX "" - -#define TF_STR_HELPER(x) #x -#define TF_STR(x) TF_STR_HELPER(x) - -// e.g. "0.5.0" or "0.6.0-alpha". -#define TF_VERSION_STRING \ - (TF_STR(TF_MAJOR_VERSION) "." TF_STR(TF_MINOR_VERSION) "." TF_STR( \ - TF_PATCH_VERSION) TF_VERSION_SUFFIX) - -// GraphDef compatibility versions (the versions field in graph.proto). -// -// Each graph has producer and min_consumer versions, and each -// consumer has its own version and a min_producer. In addition, graphs can -// mark specific consumer versions as bad (to prevent bugs from executing). -// A consumer will execute a graph if the consumer's version is at least the -// graph's min_consumer, the graph's producer version is at least the consumer's -// min_producer, and the consumer version isn't specifically disallowed by the -// graph. -// -// By default, newly created graphs have producer version TF_GRAPH_DEF_VERSION -// min_consumer TF_GRAPH_DEF_MIN_CONSUMER, and no other bad consumer versions. -// -// Version history: -// -// 0. Graphs created before GraphDef versioning -// 1. First real version (2dec2015) -// 2. adjust_contrast only takes float, doesn't perform clamping (11dec2015) -// 3. Remove TileGrad, since it was equivalent to reduce_sum (30dec2015) -// 4. When support for this version is removed, we can safely make AttrValue -// parsing more strict with respect to empty list values (see -// 111635679, 7jan2016). -// 5. Graphs are wholly-validated during Session::Create() (7jan2016). -// 6. TensorFlow is scalar strict within Google (27jan2016). -// 7. Remove TopK in favor of TopKV2 (5feb2016). -// 8. Replace RandomCrop from C++ with pure Python (5feb2016). -// 9. Deprecate batch_norm_with_global_normalization (16feb2016). -// 10. Deprecate conv3d_backprop_{filter,input} (10jun2016). -// 11. Deprecate {batch}_self_adjoint_eig (3aug2016). -// 12. Graph consumers understand the node_def field of FunctionDef (22aug2016). -// 13. Deprecate multiple batch linear algebra ops (9sep2016). -// 14. Deprecate batch_matrix_* ops. (10sep2016). -// 15. Deprecate batch_fft_* ops. (14sep2016). -// 16. Deprecate tensor_array (v1) ops in favor of v2 (10nov2016). -// 17. Deprecate inv (11nov2016). -// 17. Expose reverse_v2 (10nov2016) -// 18. Add VariableV2 (30nov2016) -// 19. Deprecated ops created by models moved out of core SkipGram, NegTrain. -// (08dec2016) -// 20. Catch all version 1.0 changes to Python API generation. SplitV is now -// used for tf.split, ReverseV2 is now used by tf.reverse, ConcatV2 is -// now used by tf.concat. Graphs use flooring -// division and mod semantics. TensorArrayV3. (12dec2016) -// Also considered the version for when it is required for reduction -// ops' indices to be scalar or vector, and not higher rank. -// Some earlier graph def versions allowed this. -// 21. Dropped FunctionDef.Node support, switched to node_def introduced -// in version 12. (11jan2017) -// 22. Placeholder now can specify and enforce scalar and partial -// shapes, particularly when restoring a graph from GraphDef -// produced at version 22 or later. (04/10/2016) -// 23. Remove NonMaxSuppression in favor of NonMaxSuppressionV2. -// 24. Deprecate lookup ops (v1) ops in favor of v2 (30may2017) -// 25. Deprecate stack (v1) ops in favor of v2 (2017/6/15). -// 25. Deprecate RandomPoisson (v1) ops in favor of v2 (2017/10/25). -// 26. Add a bool 'stripped_default_attrs' to MetaInfoDef indicating -// whether default-valued attrs have been stripped from the nodes in the -// GraphDef. (7dec2017) -// 27. Deprecate TensorArray ops v2 in favor of v3 and deprecated io_ops -// deprecated in favor of V2 ops. (2018/01/23) -// 28. Deprecate MatrixExponential op in favor of Python implementation. -// (2018/08/21). -// (2019/02/15). Added `control_ret` field to FunctionDef proto, and -// `control_output` field to OpDef proto. -// 29. Deprecate StatefulStandardNormal op in favor of StatefulStandardNormalV2. -// (2019/03/25). -// (2019/04/17). Added `arg_attr` field to FunctionDefProto. -// 30. (2019/05/09) First date based GraphDef version. GraphDef -// versions advance by 1 each day after this point. - -#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 -#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 -#define TF_GRAPH_DEF_VERSION 607 // Updated: 2020/12/6 - -// Checkpoint compatibility versions (the versions field in SavedSliceMeta). -// -// The checkpoint versions have the same semantics as GraphDef versions, but the -// numbering scheme is separate. We have no plans to ever deprecate checkpoint -// versions, but it's good to have this in place in case we ever need to. -// -// Version history: -// -// 0. Checkpoints saved before checkpoint versioning. -// 1. First real version (10feb2015). -#define TF_CHECKPOINT_VERSION_MIN_PRODUCER 0 -#define TF_CHECKPOINT_VERSION_MIN_CONSUMER 0 -#define TF_CHECKPOINT_VERSION 1 - -/// Version query functions (defined in generated version_info.cc) - -// Host compiler version (declared elsewhere to be __VERSION__) -extern const char* tf_compiler_version(); -// The git commit designator when tensorflow was built -// If no git repository, this will be "internal". -extern const char* tf_git_version(); -// Value of the _GLIBCXX_USE_CXX11_ABI flag, or 0 if it's not set. -extern int tf_cxx11_abi_flag(); -// Returns 1 if build is monolithic, or 0 otherwise. -extern int tf_monolithic_build(); - -#endif // TENSORFLOW_CORE_PUBLIC_VERSION_H_ diff --git a/src/tensorflow/lite/builtin_op_data.h b/src/tensorflow/lite/builtin_op_data.h new file mode 100644 index 0000000..161801c --- /dev/null +++ b/src/tensorflow/lite/builtin_op_data.h @@ -0,0 +1,22 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// Compatibility shim for new location of interface definitions. + +#ifndef TENSORFLOW_LITE_BUILTIN_OP_DATA_H_ +#define TENSORFLOW_LITE_BUILTIN_OP_DATA_H_ + +#include "tensorflow/lite/core/c/builtin_op_data.h" + +#endif // TENSORFLOW_LITE_BUILTIN_OP_DATA_H_ diff --git a/src/tensorflow/lite/builtin_ops.h b/src/tensorflow/lite/builtin_ops.h new file mode 100644 index 0000000..21c59eb --- /dev/null +++ b/src/tensorflow/lite/builtin_ops.h @@ -0,0 +1,245 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_ +#define TENSORFLOW_LITE_BUILTIN_OPS_H_ + +// DO NOT EDIT MANUALLY: This file is automatically generated by +// `schema/builtin_ops_header/generator.cc`. + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// The enum for builtin operators. +// Note: CUSTOM, DELEGATE, and PLACEHOLDER_FOR_GREATER_OP_CODES are 3 special +// ops which are not real built-in ops. +typedef enum { + kTfLiteBuiltinAdd = 0, + kTfLiteBuiltinAveragePool2d = 1, + kTfLiteBuiltinConcatenation = 2, + kTfLiteBuiltinConv2d = 3, + kTfLiteBuiltinDepthwiseConv2d = 4, + kTfLiteBuiltinDepthToSpace = 5, + kTfLiteBuiltinDequantize = 6, + kTfLiteBuiltinEmbeddingLookup = 7, + kTfLiteBuiltinFloor = 8, + kTfLiteBuiltinFullyConnected = 9, + kTfLiteBuiltinHashtableLookup = 10, + kTfLiteBuiltinL2Normalization = 11, + kTfLiteBuiltinL2Pool2d = 12, + kTfLiteBuiltinLocalResponseNormalization = 13, + kTfLiteBuiltinLogistic = 14, + kTfLiteBuiltinLshProjection = 15, + kTfLiteBuiltinLstm = 16, + kTfLiteBuiltinMaxPool2d = 17, + kTfLiteBuiltinMul = 18, + kTfLiteBuiltinRelu = 19, + kTfLiteBuiltinReluN1To1 = 20, + kTfLiteBuiltinRelu6 = 21, + kTfLiteBuiltinReshape = 22, + kTfLiteBuiltinResizeBilinear = 23, + kTfLiteBuiltinRnn = 24, + kTfLiteBuiltinSoftmax = 25, + kTfLiteBuiltinSpaceToDepth = 26, + kTfLiteBuiltinSvdf = 27, + kTfLiteBuiltinTanh = 28, + kTfLiteBuiltinConcatEmbeddings = 29, + kTfLiteBuiltinSkipGram = 30, + kTfLiteBuiltinCall = 31, + kTfLiteBuiltinCustom = 32, + kTfLiteBuiltinEmbeddingLookupSparse = 33, + kTfLiteBuiltinPad = 34, + kTfLiteBuiltinUnidirectionalSequenceRnn = 35, + kTfLiteBuiltinGather = 36, + kTfLiteBuiltinBatchToSpaceNd = 37, + kTfLiteBuiltinSpaceToBatchNd = 38, + kTfLiteBuiltinTranspose = 39, + kTfLiteBuiltinMean = 40, + kTfLiteBuiltinSub = 41, + kTfLiteBuiltinDiv = 42, + kTfLiteBuiltinSqueeze = 43, + kTfLiteBuiltinUnidirectionalSequenceLstm = 44, + kTfLiteBuiltinStridedSlice = 45, + kTfLiteBuiltinBidirectionalSequenceRnn = 46, + kTfLiteBuiltinExp = 47, + kTfLiteBuiltinTopkV2 = 48, + kTfLiteBuiltinSplit = 49, + kTfLiteBuiltinLogSoftmax = 50, + kTfLiteBuiltinDelegate = 51, + kTfLiteBuiltinBidirectionalSequenceLstm = 52, + kTfLiteBuiltinCast = 53, + kTfLiteBuiltinPrelu = 54, + kTfLiteBuiltinMaximum = 55, + kTfLiteBuiltinArgMax = 56, + kTfLiteBuiltinMinimum = 57, + kTfLiteBuiltinLess = 58, + kTfLiteBuiltinNeg = 59, + kTfLiteBuiltinPadv2 = 60, + kTfLiteBuiltinGreater = 61, + kTfLiteBuiltinGreaterEqual = 62, + kTfLiteBuiltinLessEqual = 63, + kTfLiteBuiltinSelect = 64, + kTfLiteBuiltinSlice = 65, + kTfLiteBuiltinSin = 66, + kTfLiteBuiltinTransposeConv = 67, + kTfLiteBuiltinSparseToDense = 68, + kTfLiteBuiltinTile = 69, + kTfLiteBuiltinExpandDims = 70, + kTfLiteBuiltinEqual = 71, + kTfLiteBuiltinNotEqual = 72, + kTfLiteBuiltinLog = 73, + kTfLiteBuiltinSum = 74, + kTfLiteBuiltinSqrt = 75, + kTfLiteBuiltinRsqrt = 76, + kTfLiteBuiltinShape = 77, + kTfLiteBuiltinPow = 78, + kTfLiteBuiltinArgMin = 79, + kTfLiteBuiltinFakeQuant = 80, + kTfLiteBuiltinReduceProd = 81, + kTfLiteBuiltinReduceMax = 82, + kTfLiteBuiltinPack = 83, + kTfLiteBuiltinLogicalOr = 84, + kTfLiteBuiltinOneHot = 85, + kTfLiteBuiltinLogicalAnd = 86, + kTfLiteBuiltinLogicalNot = 87, + kTfLiteBuiltinUnpack = 88, + kTfLiteBuiltinReduceMin = 89, + kTfLiteBuiltinFloorDiv = 90, + kTfLiteBuiltinReduceAny = 91, + kTfLiteBuiltinSquare = 92, + kTfLiteBuiltinZerosLike = 93, + kTfLiteBuiltinFill = 94, + kTfLiteBuiltinFloorMod = 95, + kTfLiteBuiltinRange = 96, + kTfLiteBuiltinResizeNearestNeighbor = 97, + kTfLiteBuiltinLeakyRelu = 98, + kTfLiteBuiltinSquaredDifference = 99, + kTfLiteBuiltinMirrorPad = 100, + kTfLiteBuiltinAbs = 101, + kTfLiteBuiltinSplitV = 102, + kTfLiteBuiltinUnique = 103, + kTfLiteBuiltinCeil = 104, + kTfLiteBuiltinReverseV2 = 105, + kTfLiteBuiltinAddN = 106, + kTfLiteBuiltinGatherNd = 107, + kTfLiteBuiltinCos = 108, + kTfLiteBuiltinWhere = 109, + kTfLiteBuiltinRank = 110, + kTfLiteBuiltinElu = 111, + kTfLiteBuiltinReverseSequence = 112, + kTfLiteBuiltinMatrixDiag = 113, + kTfLiteBuiltinQuantize = 114, + kTfLiteBuiltinMatrixSetDiag = 115, + kTfLiteBuiltinRound = 116, + kTfLiteBuiltinHardSwish = 117, + kTfLiteBuiltinIf = 118, + kTfLiteBuiltinWhile = 119, + kTfLiteBuiltinNonMaxSuppressionV4 = 120, + kTfLiteBuiltinNonMaxSuppressionV5 = 121, + kTfLiteBuiltinScatterNd = 122, + kTfLiteBuiltinSelectV2 = 123, + kTfLiteBuiltinDensify = 124, + kTfLiteBuiltinSegmentSum = 125, + kTfLiteBuiltinBatchMatmul = 126, + kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127, + kTfLiteBuiltinCumsum = 128, + kTfLiteBuiltinCallOnce = 129, + kTfLiteBuiltinBroadcastTo = 130, + kTfLiteBuiltinRfft2d = 131, + kTfLiteBuiltinConv3d = 132, + kTfLiteBuiltinImag = 133, + kTfLiteBuiltinReal = 134, + kTfLiteBuiltinComplexAbs = 135, + kTfLiteBuiltinHashtable = 136, + kTfLiteBuiltinHashtableFind = 137, + kTfLiteBuiltinHashtableImport = 138, + kTfLiteBuiltinHashtableSize = 139, + kTfLiteBuiltinReduceAll = 140, + kTfLiteBuiltinConv3dTranspose = 141, + kTfLiteBuiltinVarHandle = 142, + kTfLiteBuiltinReadVariable = 143, + kTfLiteBuiltinAssignVariable = 144, + kTfLiteBuiltinBroadcastArgs = 145, + kTfLiteBuiltinRandomStandardNormal = 146, + kTfLiteBuiltinBucketize = 147, + kTfLiteBuiltinRandomUniform = 148, + kTfLiteBuiltinMultinomial = 149, + kTfLiteBuiltinGelu = 150, + kTfLiteBuiltinDynamicUpdateSlice = 151, + kTfLiteBuiltinRelu0To1 = 152, + kTfLiteBuiltinUnsortedSegmentProd = 153, + kTfLiteBuiltinUnsortedSegmentMax = 154, + kTfLiteBuiltinUnsortedSegmentSum = 155, + kTfLiteBuiltinAtan2 = 156, + kTfLiteBuiltinUnsortedSegmentMin = 157, + kTfLiteBuiltinSign = 158, + kTfLiteBuiltinBitcast = 159, + kTfLiteBuiltinBitwiseXor = 160, + kTfLiteBuiltinRightShift = 161, + kTfLiteBuiltinStablehloLogistic = 162, + kTfLiteBuiltinStablehloAdd = 163, + kTfLiteBuiltinStablehloDivide = 164, + kTfLiteBuiltinStablehloMultiply = 165, + kTfLiteBuiltinStablehloMaximum = 166, + kTfLiteBuiltinStablehloReshape = 167, + kTfLiteBuiltinStablehloClamp = 168, + kTfLiteBuiltinStablehloConcatenate = 169, + kTfLiteBuiltinStablehloBroadcastInDim = 170, + kTfLiteBuiltinStablehloConvolution = 171, + kTfLiteBuiltinStablehloSlice = 172, + kTfLiteBuiltinStablehloCustomCall = 173, + kTfLiteBuiltinStablehloReduce = 174, + kTfLiteBuiltinStablehloAbs = 175, + kTfLiteBuiltinStablehloAnd = 176, + kTfLiteBuiltinStablehloCosine = 177, + kTfLiteBuiltinStablehloExponential = 178, + kTfLiteBuiltinStablehloFloor = 179, + kTfLiteBuiltinStablehloLog = 180, + kTfLiteBuiltinStablehloMinimum = 181, + kTfLiteBuiltinStablehloNegate = 182, + kTfLiteBuiltinStablehloOr = 183, + kTfLiteBuiltinStablehloPower = 184, + kTfLiteBuiltinStablehloRemainder = 185, + kTfLiteBuiltinStablehloRsqrt = 186, + kTfLiteBuiltinStablehloSelect = 187, + kTfLiteBuiltinStablehloSubtract = 188, + kTfLiteBuiltinStablehloTanh = 189, + kTfLiteBuiltinStablehloScatter = 190, + kTfLiteBuiltinStablehloCompare = 191, + kTfLiteBuiltinStablehloConvert = 192, + kTfLiteBuiltinStablehloDynamicSlice = 193, + kTfLiteBuiltinStablehloDynamicUpdateSlice = 194, + kTfLiteBuiltinStablehloPad = 195, + kTfLiteBuiltinStablehloIota = 196, + kTfLiteBuiltinStablehloDotGeneral = 197, + kTfLiteBuiltinStablehloReduceWindow = 198, + kTfLiteBuiltinStablehloSort = 199, + kTfLiteBuiltinStablehloWhile = 200, + kTfLiteBuiltinStablehloGather = 201, + kTfLiteBuiltinStablehloTranspose = 202, + kTfLiteBuiltinDilate = 203, + kTfLiteBuiltinStablehloRngBitGenerator = 204, + kTfLiteBuiltinReduceWindow = 205, + kTfLiteBuiltinStablehloComposite = 206, + kTfLiteBuiltinStablehloShiftLeft = 207, + kTfLiteBuiltinStablehloCbrt = 208, + kTfLiteBuiltinStablehloCase = 209, +} TfLiteBuiltinOperator; + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus +#endif // TENSORFLOW_LITE_BUILTIN_OPS_H_ diff --git a/src/tensorflow/lite/c/builtin_op_data.h b/src/tensorflow/lite/c/builtin_op_data.h index 31d792a..0606819 100644 --- a/src/tensorflow/lite/c/builtin_op_data.h +++ b/src/tensorflow/lite/c/builtin_op_data.h @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,470 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ #define TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ -#include +/// For documentation, see +/// third_party/tensorflow/lite/core/c/builtin_op_data.h -#include "tensorflow/lite/c/common.h" - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -// TfLiteReshapeParams can't have dynamic data so we fix the maximum possible -// number of dimensions. -#define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8 - -// TODO(aselle): Consider using "if this then that" for testing. - -// Useful placeholder to put in otherwise empty structs to avoid size warnings. -typedef struct { - char dummy; -} EmptyStructPlaceholder; - -// IMPORTANT: All new members of structs must be added at the end to ensure -// backwards compatibility. - -// Possible padding types (for convolutions) -typedef enum { - kTfLitePaddingUnknown = 0, - kTfLitePaddingSame, - kTfLitePaddingValid, -} TfLitePadding; - -typedef enum { - kTfLiteMirrorPaddingUnknown = 0, - kTfLiteMirrorPaddingReflect, - kTfLiteMirrorPaddingSymmetric, -} TfLiteMirrorPaddingMode; - -// TODO(b/130259536): We should move this out of builtin_op_data. -typedef struct { - int width; - int height; - int width_offset; - int height_offset; -} TfLitePaddingValues; - -typedef struct { - TfLiteMirrorPaddingMode mode; -} TfLiteMirrorPaddingParams; - -// Possible fused activation functions. -// TODO(aselle): rename to TfLiteActivation -typedef enum { - kTfLiteActNone = 0, - kTfLiteActRelu, - kTfLiteActReluN1To1, // min(max(-1, x), 1) - kTfLiteActRelu6, // min(max(0, x), 6) - kTfLiteActTanh, - kTfLiteActSignBit, - kTfLiteActSigmoid, -} TfLiteFusedActivation; - -typedef struct { - // Parameters for CONV_2D version 1. - TfLitePadding padding; - int stride_width; - int stride_height; - TfLiteFusedActivation activation; - - // Parameters for CONV_2D version 2. - // Note: Version 2 supports dilation values not equal to 1. - int dilation_width_factor; - int dilation_height_factor; -} TfLiteConvParams; - -typedef struct { - TfLitePadding padding; - int stride_width; - int stride_height; - int filter_width; - int filter_height; - TfLiteFusedActivation activation; - struct { - TfLitePaddingValues padding; - } computed; -} TfLitePoolParams; - -typedef struct { - // Parameters for DepthwiseConv version 1 or above. - TfLitePadding padding; - int stride_width; - int stride_height; - // `depth_multiplier` is redundant. It's used by CPU kernels in - // TensorFlow 2.0 or below, but ignored in versions above. - // - // The information can be deduced from the shape of input and the shape of - // weights. Since the TFLiteConverter toolchain doesn't support partially - // specified shapes, relying on `depth_multiplier` stops us from supporting - // graphs with dynamic shape tensors. - // - // Note: Some of the delegates (e.g. NNAPI, GPU) are still relying on this - // field. - int depth_multiplier; - TfLiteFusedActivation activation; - // Parameters for DepthwiseConv version 2 or above. - int dilation_width_factor; - int dilation_height_factor; -} TfLiteDepthwiseConvParams; - -typedef struct { - int rank; - TfLiteFusedActivation activation; - - // Parameter for SVDF version 4. - bool asymmetric_quantize_inputs; -} TfLiteSVDFParams; - -typedef struct { - TfLiteFusedActivation activation; - - // Parameter for RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteRNNParams; - -typedef struct { - bool time_major; - TfLiteFusedActivation activation; - - // Parameter for Sequence RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteSequenceRNNParams; - -typedef struct { - bool time_major; - TfLiteFusedActivation activation; - bool merge_outputs; - - // Parameter for Bidirectional RNN verison 3. - bool asymmetric_quantize_inputs; -} TfLiteBidirectionalSequenceRNNParams; - -typedef enum { - kTfLiteFullyConnectedWeightsFormatDefault = 0, - kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1, -} TfLiteFullyConnectedWeightsFormat; - -typedef struct { - // Parameters for FullyConnected version 1 or above. - TfLiteFusedActivation activation; - - // Parameters for FullyConnected version 2 or above. - TfLiteFullyConnectedWeightsFormat weights_format; - - // Parameters for FullyConnected version 5 or above. - // If set to true, then the number of dimensions in the input and the output - // tensors are the same. Furthermore, all but the last dimension of the input - // and output shapes will be equal. - bool keep_num_dims; - - // Parameters for FullyConnected version 7 or above. - // If set to true and the weights are quantized, then non constant inputs - // are quantized at evaluation time with asymmetric quantization. - bool asymmetric_quantize_inputs; -} TfLiteFullyConnectedParams; - -typedef enum { - kTfLiteLshProjectionUnknown = 0, - kTfLiteLshProjectionSparse = 1, - kTfLiteLshProjectionDense = 2, -} TfLiteLSHProjectionType; - -typedef struct { - TfLiteLSHProjectionType type; -} TfLiteLSHProjectionParams; - -typedef struct { - float beta; -} TfLiteSoftmaxParams; - -typedef struct { - int axis; - TfLiteFusedActivation activation; -} TfLiteConcatenationParams; - -typedef struct { - TfLiteFusedActivation activation; - // Parameter added for the version 4. - bool pot_scale_int16; -} TfLiteAddParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteSpaceToBatchNDParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteBatchToSpaceNDParams; - -typedef struct { - bool adj_x; - bool adj_y; - // Parameters for BatchMatMul version 4 or above. - // If set to true and the weights are quantized, then non constant inputs - // are quantized at evaluation time with asymmetric quantization. - bool asymmetric_quantize_inputs; -} TfLiteBatchMatMulParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteMulParams; - -typedef struct { - TfLiteFusedActivation activation; - // Parameter added for the version 5. - bool pot_scale_int16; -} TfLiteSubParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteDivParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteL2NormParams; - -typedef struct { - int radius; - float bias; - float alpha; - float beta; -} TfLiteLocalResponseNormParams; - -typedef enum { - kTfLiteLSTMFullKernel = 0, - kTfLiteLSTMBasicKernel -} TfLiteLSTMKernelType; - -typedef struct { - // Parameters for LSTM version 1. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // Parameters for LSTM version 2. - // kTfLiteLSTMBasicKernel is only supported in version 2 or above. - TfLiteLSTMKernelType kernel_type; - - // Parameters for LSTM version 4. - bool asymmetric_quantize_inputs; -} TfLiteLSTMParams; - -typedef struct { - // Parameters needed for the underlying LSTM. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // If set to true then the first dimension is time, otherwise batch. - bool time_major; - - // Parameter for unidirectional sequence RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteUnidirectionalSequenceLSTMParams; - -typedef struct { - // Parameters supported by version 1: - // Parameters inherited for the LSTM kernel. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // If true, store the outputs of both directions in the first output. - bool merge_outputs; - - // Parameters supported by version 2: - // If set to true then the first dimension is time, otherwise batch. - bool time_major; - - // Parameters supported by version 4: - // If set to true, then hybrid ops use asymmetric quantization for inputs. - bool asymmetric_quantize_inputs; -} TfLiteBidirectionalSequenceLSTMParams; - -typedef struct { - bool align_corners; - // half_pixel_centers assumes pixels are of half the actual dimensions, and - // yields more accurate resizes. Corresponds to the same argument for the - // original TensorFlow op in TF2.0. - bool half_pixel_centers; -} TfLiteResizeBilinearParams; - -typedef struct { - bool align_corners; - bool half_pixel_centers; -} TfLiteResizeNearestNeighborParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLitePadParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLitePadV2Params; - -typedef struct { - // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. - // For now we will fix the maximum possible number of dimensions. - int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT]; - int num_dimensions; -} TfLiteReshapeParams; - -typedef struct { - int ngram_size; - int max_skip_size; - bool include_all_ngrams; -} TfLiteSkipGramParams; - -typedef struct { - int block_size; -} TfLiteSpaceToDepthParams; - -typedef struct { - int block_size; -} TfLiteDepthToSpaceParams; - -typedef struct { - TfLiteType in_data_type; - TfLiteType out_data_type; -} TfLiteCastParams; - -typedef enum { - kTfLiteCombinerTypeSum = 0, - kTfLiteCombinerTypeMean = 1, - kTfLiteCombinerTypeSqrtn = 2, -} TfLiteCombinerType; - -typedef struct { - TfLiteCombinerType combiner; -} TfLiteEmbeddingLookupSparseParams; - -typedef struct { - int axis; -} TfLiteGatherParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteTransposeParams; - -typedef struct { - bool keep_dims; -} TfLiteReducerParams; - -typedef struct { - int num_splits; -} TfLiteSplitParams; - -typedef struct { - int num_splits; -} TfLiteSplitVParams; - -typedef struct { - // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. - // For now we will fix the maximum possible number of dimensions. - int squeeze_dims[8]; - int num_squeeze_dims; -} TfLiteSqueezeParams; - -typedef struct { - int begin_mask; - int end_mask; - int ellipsis_mask; - int new_axis_mask; - int shrink_axis_mask; -} TfLiteStridedSliceParams; - -typedef struct { - TfLiteType output_type; -} TfLiteArgMaxParams; - -typedef struct { - TfLiteType output_type; -} TfLiteArgMinParams; - -typedef struct { - TfLitePadding padding; - int stride_width; - int stride_height; -} TfLiteTransposeConvParams; - -typedef struct { - bool validate_indices; -} TfLiteSparseToDenseParams; - -typedef struct { - TfLiteType out_type; -} TfLiteShapeParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteRankParams; - -typedef struct { - // Parameters supported by version 1: - float min; - float max; - int num_bits; - - // Parameters supported by version 2: - bool narrow_range; -} TfLiteFakeQuantParams; - -typedef struct { - int values_count; - int axis; -} TfLitePackParams; - -typedef struct { - int axis; -} TfLiteOneHotParams; - -typedef struct { - int num; - int axis; -} TfLiteUnpackParams; - -typedef struct { - float alpha; -} TfLiteLeakyReluParams; - -typedef struct { - TfLiteType index_out_type; -} TfLiteUniqueParams; - -typedef struct { - int seq_dim; - int batch_dim; -} TfLiteReverseSequenceParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteMatrixDiagParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteMatrixSetDiagParams; - -typedef struct { - int then_subgraph_index; - int else_subgraph_index; -} TfLiteIfParams; - -typedef struct { - int cond_subgraph_index; - int body_subgraph_index; -} TfLiteWhileParams; - -typedef struct { - bool exclusive; - bool reverse; -} TfLiteCumsumParams; - -typedef struct { - int init_subgraph_index; -} TfLiteCallOnceParams; - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus +#include "tensorflow/lite/core/c/builtin_op_data.h" #endif // TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ diff --git a/src/tensorflow/lite/c/c_api_types.h b/src/tensorflow/lite/c/c_api_types.h new file mode 100644 index 0000000..05cda07 --- /dev/null +++ b/src/tensorflow/lite/c/c_api_types.h @@ -0,0 +1,26 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_ +#define TENSORFLOW_LITE_C_C_API_TYPES_H_ + +/// \file +/// +/// C API types for TensorFlow Lite. +/// +/// For documentation, see tensorflow/lite/core/c/c_api_types.h + +#include "tensorflow/lite/core/c/c_api_types.h" + +#endif // TENSORFLOW_LITE_C_C_API_TYPES_H_ diff --git a/src/tensorflow/lite/c/common.c b/src/tensorflow/lite/c/common.c deleted file mode 100644 index e43984e..0000000 --- a/src/tensorflow/lite/c/common.c +++ /dev/null @@ -1,234 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" -#ifndef TF_LITE_STATIC_MEMORY -#include -#include -#endif // TF_LITE_STATIC_MEMORY - -int TfLiteIntArrayGetSizeInBytes(int size) { - static TfLiteIntArray dummy; - return sizeof(dummy) + sizeof(dummy.data[0]) * size; -} - -int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) { - if (a == b) return 1; - if (a == NULL || b == NULL) return 0; - return TfLiteIntArrayEqualsArray(a, b->size, b->data); -} - -int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, - const int b_data[]) { - if (a == NULL) return (b_size == 0); - if (a->size != b_size) return 0; - int i = 0; - for (; i < a->size; i++) - if (a->data[i] != b_data[i]) return 0; - return 1; -} - -#ifndef TF_LITE_STATIC_MEMORY - -TfLiteIntArray* TfLiteIntArrayCreate(int size) { - TfLiteIntArray* ret = - (TfLiteIntArray*)malloc(TfLiteIntArrayGetSizeInBytes(size)); - ret->size = size; - return ret; -} - -TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) { - if (!src) return NULL; - TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size); - if (ret) { - memcpy(ret->data, src->data, src->size * sizeof(int)); - } - return ret; -} - -void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); } - -#endif // TF_LITE_STATIC_MEMORY - -int TfLiteFloatArrayGetSizeInBytes(int size) { - static TfLiteFloatArray dummy; - return sizeof(dummy) + sizeof(dummy.data[0]) * size; -} - -#ifndef TF_LITE_STATIC_MEMORY - -TfLiteFloatArray* TfLiteFloatArrayCreate(int size) { - TfLiteFloatArray* ret = - (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size)); - ret->size = size; - return ret; -} - -void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); } - -void TfLiteTensorDataFree(TfLiteTensor* t) { - if (t->allocation_type == kTfLiteDynamic || - t->allocation_type == kTfLitePersistentRo) { - free(t->data.raw); - } - t->data.raw = NULL; -} - -void TfLiteQuantizationFree(TfLiteQuantization* quantization) { - if (quantization->type == kTfLiteAffineQuantization) { - TfLiteAffineQuantization* q_params = - (TfLiteAffineQuantization*)(quantization->params); - if (q_params->scale) { - TfLiteFloatArrayFree(q_params->scale); - q_params->scale = NULL; - } - if (q_params->zero_point) { - TfLiteIntArrayFree(q_params->zero_point); - q_params->zero_point = NULL; - } - free(q_params); - } - quantization->params = NULL; - quantization->type = kTfLiteNoQuantization; -} - -void TfLiteSparsityFree(TfLiteSparsity* sparsity) { - if (sparsity == NULL) { - return; - } - - if (sparsity->traversal_order) { - TfLiteIntArrayFree(sparsity->traversal_order); - sparsity->traversal_order = NULL; - } - - if (sparsity->block_map) { - TfLiteIntArrayFree(sparsity->block_map); - sparsity->block_map = NULL; - } - - if (sparsity->dim_metadata) { - int i = 0; - for (; i < sparsity->dim_metadata_size; i++) { - TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i]; - if (metadata.format == kTfLiteDimSparseCSR) { - TfLiteIntArrayFree(metadata.array_segments); - metadata.array_segments = NULL; - TfLiteIntArrayFree(metadata.array_indices); - metadata.array_indices = NULL; - } - } - free(sparsity->dim_metadata); - sparsity->dim_metadata = NULL; - } - - free(sparsity); -} - -void TfLiteTensorFree(TfLiteTensor* t) { - TfLiteTensorDataFree(t); - if (t->dims) TfLiteIntArrayFree(t->dims); - t->dims = NULL; - - if (t->dims_signature) { - TfLiteIntArrayFree((TfLiteIntArray *) t->dims_signature); - } - t->dims_signature = NULL; - - TfLiteQuantizationFree(&t->quantization); - TfLiteSparsityFree(t->sparsity); - t->sparsity = NULL; -} - -void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, - TfLiteQuantizationParams quantization, char* buffer, - size_t size, TfLiteAllocationType allocation_type, - const void* allocation, bool is_variable, - TfLiteTensor* tensor) { - TfLiteTensorFree(tensor); - tensor->type = type; - tensor->name = name; - tensor->dims = dims; - tensor->params = quantization; - tensor->data.raw = buffer; - tensor->bytes = size; - tensor->allocation_type = allocation_type; - tensor->allocation = allocation; - tensor->is_variable = is_variable; - - tensor->quantization.type = kTfLiteNoQuantization; - tensor->quantization.params = NULL; -} - -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) { - if (tensor->allocation_type != kTfLiteDynamic && - tensor->allocation_type != kTfLitePersistentRo) { - return; - } - // TODO(b/145340303): Tensor data should be aligned. - if (!tensor->data.raw) { - tensor->data.raw = malloc(num_bytes); - } else if (num_bytes > tensor->bytes) { - tensor->data.raw = realloc(tensor->data.raw, num_bytes); - } - tensor->bytes = num_bytes; -} -#endif // TF_LITE_STATIC_MEMORY - -const char* TfLiteTypeGetName(TfLiteType type) { - switch (type) { - case kTfLiteNoType: - return "NOTYPE"; - case kTfLiteFloat32: - return "FLOAT32"; - case kTfLiteInt16: - return "INT16"; - case kTfLiteInt32: - return "INT32"; - case kTfLiteUInt8: - return "UINT8"; - case kTfLiteInt8: - return "INT8"; - case kTfLiteInt64: - return "INT64"; - case kTfLiteUInt64: - return "UINT64"; - case kTfLiteBool: - return "BOOL"; - case kTfLiteComplex64: - return "COMPLEX64"; - case kTfLiteComplex128: - return "COMPLEX128"; - case kTfLiteString: - return "STRING"; - case kTfLiteFloat16: - return "FLOAT16"; - case kTfLiteFloat64: - return "FLOAT64"; - } - return "Unknown type"; -} - -TfLiteDelegate TfLiteDelegateCreate() { - TfLiteDelegate d = { - .data_ = NULL, - .Prepare = NULL, - .CopyFromBufferHandle = NULL, - .CopyToBufferHandle = NULL, - .FreeBufferHandle = NULL, - .flags = kTfLiteDelegateFlagsNone, - }; - return d; -} diff --git a/src/tensorflow/lite/c/common.h b/src/tensorflow/lite/c/common.h index 923a0fa..8a8b513 100644 --- a/src/tensorflow/lite/c/common.h +++ b/src/tensorflow/lite/c/common.h @@ -13,959 +13,21 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// This file defines common C types and APIs for implementing operations, -// delegates and other constructs in TensorFlow Lite. The actual operations and -// delegates can be defined using C++, but the interface between the interpreter -// and the operations are C. -// -// Summary of abstractions -// TF_LITE_ENSURE - Self-sufficient error checking -// TfLiteStatus - Status reporting -// TfLiteIntArray - stores tensor shapes (dims), -// TfLiteContext - allows an op to access the tensors -// TfLiteTensor - tensor (a multidimensional array) -// TfLiteNode - a single node or operation -// TfLiteRegistration - the implementation of a conceptual operation. -// TfLiteDelegate - allows delegation of nodes to alternative backends. -// -// Some abstractions in this file are created and managed by Interpreter. -// -// NOTE: The order of values in these structs are "semi-ABI stable". New values -// should be added only to the end of structs and never reordered. +/// \file +/// +/// This file defines common C types and APIs for implementing operations, +/// delegates and other constructs in TensorFlow Lite. The actual operations and +/// delegates can be defined using C++, but the interface between the +/// interpreter and the operations are C. +/// +/// For documentation, see tensorflow/lite/core/c/common.h. +/// +/// See also c_api_opaque.h which has more ABI-stable variants of some of these +/// APIs. #ifndef TENSORFLOW_LITE_C_COMMON_H_ #define TENSORFLOW_LITE_C_COMMON_H_ -#include -#include -#include +#include "tensorflow/lite/core/c/common.h" -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -typedef enum TfLiteStatus { - kTfLiteOk = 0, - - // Generally referring to an error in the runtime (i.e. interpreter) - kTfLiteError = 1, - - // Generally referring to an error from a TfLiteDelegate itself. - kTfLiteDelegateError = 2, - - // Generally referring to an error in applying a delegate due to - // incompatibility between runtime and delegate, e.g., this error is returned - // when trying to apply a TfLite delegate onto a model graph that's already - // immutable. - kTfLiteApplicationError = 3 -} TfLiteStatus; - -// The list of external context types known to TF Lite. This list exists solely -// to avoid conflicts and to ensure ops can share the external contexts they -// need. Access to the external contexts is controlled by one of the -// corresponding support files. -typedef enum TfLiteExternalContextType { - kTfLiteEigenContext = 0, // include eigen_support.h to use. - kTfLiteGemmLowpContext = 1, // include gemm_support.h to use. - kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support. - kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use. - kTfLiteMaxExternalContexts = 4 -} TfLiteExternalContextType; - -// Forward declare so dependent structs and methods can reference these types -// prior to the struct definitions. -struct TfLiteContext; -struct TfLiteDelegate; -struct TfLiteRegistration; - -// An external context is a collection of information unrelated to the TF Lite -// framework, but useful to a subset of the ops. TF Lite knows very little -// about the actual contexts, but it keeps a list of them, and is able to -// refresh them if configurations like the number of recommended threads -// change. -typedef struct TfLiteExternalContext { - TfLiteExternalContextType type; - TfLiteStatus (*Refresh)(struct TfLiteContext* context); -} TfLiteExternalContext; - -#define kTfLiteOptionalTensor (-1) - -// Fixed size list of integers. Used for dimensions and inputs/outputs tensor -// indices -typedef struct TfLiteIntArray { - int size; -// gcc 6.1+ have a bug where flexible members aren't properly handled -// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c -#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ - __GNUC_MINOR__ >= 1) || \ - defined(HEXAGON) || (__clang_major__ == 7 && __clang_minor__ == 1) - int data[0]; -#else - int data[]; -#endif -} TfLiteIntArray; - -// Given the size (number of elements) in a TfLiteIntArray, calculate its size -// in bytes. -int TfLiteIntArrayGetSizeInBytes(int size); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteIntArrayFree(). -TfLiteIntArray* TfLiteIntArrayCreate(int size); -#endif - -// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. -int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b); - -// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. -int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, - const int b_data[]); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a copy of an array passed as `src`. -// You are expected to free memory with TfLiteIntArrayFree -TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src); - -// Free memory of array `a`. -void TfLiteIntArrayFree(TfLiteIntArray* a); -#endif // TF_LITE_STATIC_MEMORY - -// Fixed size list of floats. Used for per-channel quantization. -typedef struct TfLiteFloatArray { - int size; -// gcc 6.1+ have a bug where flexible members aren't properly handled -// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c -// This also applies to the toolchain used for Qualcomm Hexagon DSPs. -#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ - __GNUC_MINOR__ >= 1 - float data[0]; -#else - float data[]; -#endif -} TfLiteFloatArray; - -// Given the size (number of elements) in a TfLiteFloatArray, calculate its size -// in bytes. -int TfLiteFloatArrayGetSizeInBytes(int size); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteFloatArrayFree(). -TfLiteFloatArray* TfLiteFloatArrayCreate(int size); - -// Free memory of array `a`. -void TfLiteFloatArrayFree(TfLiteFloatArray* a); -#endif // TF_LITE_STATIC_MEMORY - -// Since we must not depend on any libraries, define a minimal subset of -// error macros while avoiding names that have pre-conceived meanings like -// assert and check. - -// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than -// calling the context->ReportError function directly, so that message strings -// can be stripped out if the binary size needs to be severely optimized. -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_KERNEL_LOG(context, ...) \ - do { \ - (context)->ReportError((context), __VA_ARGS__); \ - } while (false) - -#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \ - do { \ - if ((context) != nullptr) { \ - (context)->ReportError((context), __VA_ARGS__); \ - } \ - } while (false) -#else // TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_KERNEL_LOG(context, ...) -#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) -#endif // TF_LITE_STRIP_ERROR_STRINGS - -// Check whether value is true, and if not return kTfLiteError from -// the current function (and report the error string msg). -#define TF_LITE_ENSURE_MSG(context, value, msg) \ - do { \ - if (!(value)) { \ - TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \ - return kTfLiteError; \ - } \ - } while (0) - -// Check whether the value `a` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -#define TF_LITE_ENSURE(context, a) \ - do { \ - if (!(a)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \ - __LINE__, #a); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_STATUS(a) \ - do { \ - const TfLiteStatus s = (a); \ - if (s != kTfLiteOk) { \ - return s; \ - } \ - } while (0) - -// Check whether the value `a == b` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -// `a` and `b` may be evaluated more than once, so no side effects or -// extremely expensive computations should be done. -// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. -#define TF_LITE_ENSURE_EQ(context, a, b) \ - do { \ - if ((a) != (b)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \ - __LINE__, #a, #b, (a), (b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \ - do { \ - if ((a) != (b)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \ - __LINE__, #a, #b, TfLiteTypeGetName(a), \ - TfLiteTypeGetName(b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \ - do { \ - auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \ - if (delta > epsilon) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \ - __FILE__, __LINE__, #a, #b, static_cast(a), \ - static_cast(b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_OK(context, status) \ - do { \ - const TfLiteStatus s = (status); \ - if ((s) != kTfLiteOk) { \ - return s; \ - } \ - } while (0) - -// Define TFL_CAPI_EXPORT macro to export a function properly with a shared -// library. -#ifdef SWIG -#define TFL_CAPI_EXPORT -#else -#if defined(_WIN32) -#ifdef TFL_COMPILE_LIBRARY -#define TFL_CAPI_EXPORT __declspec(dllexport) -#else -#define TFL_CAPI_EXPORT __declspec(dllimport) -#endif // TFL_COMPILE_LIBRARY -#else -#define TFL_CAPI_EXPORT __attribute__((visibility("default"))) -#endif // _WIN32 -#endif // SWIG - -// Single-precision complex data type compatible with the C99 definition. -typedef struct TfLiteComplex64 { - float re, im; // real and imaginary parts, respectively. -} TfLiteComplex64; - -// Double-precision complex data type compatible with the C99 definition. -typedef struct TfLiteComplex128 { - double re, im; // real and imaginary parts, respectively. -} TfLiteComplex128; - -// Half precision data type compatible with the C99 definition. -typedef struct TfLiteFloat16 { - uint16_t data; -} TfLiteFloat16; - -// Types supported by tensor -typedef enum { - kTfLiteNoType = 0, - kTfLiteFloat32 = 1, - kTfLiteInt32 = 2, - kTfLiteUInt8 = 3, - kTfLiteInt64 = 4, - kTfLiteString = 5, - kTfLiteBool = 6, - kTfLiteInt16 = 7, - kTfLiteComplex64 = 8, - kTfLiteInt8 = 9, - kTfLiteFloat16 = 10, - kTfLiteFloat64 = 11, - kTfLiteComplex128 = 12, - kTfLiteUInt64 = 13, -} TfLiteType; - -// Return the name of a given type, for error reporting purposes. -const char* TfLiteTypeGetName(TfLiteType type); - -// SupportedQuantizationTypes. -typedef enum TfLiteQuantizationType { - // No quantization. - kTfLiteNoQuantization = 0, - // Affine quantization (with support for per-channel quantization). - // Corresponds to TfLiteAffineQuantization. - kTfLiteAffineQuantization = 1, -} TfLiteQuantizationType; - -// Structure specifying the quantization used by the tensor, if-any. -typedef struct TfLiteQuantization { - // The type of quantization held by params. - TfLiteQuantizationType type; - // Holds a reference to one of the quantization param structures specified - // below. - void* params; -} TfLiteQuantization; - -// Legacy. Will be deprecated in favor of TfLiteAffineQuantization. -// If per-layer quantization is specified this field will still be populated in -// addition to TfLiteAffineQuantization. -// Parameters for asymmetric quantization. Quantized values can be converted -// back to float using: -// real_value = scale * (quantized_value - zero_point) -typedef struct TfLiteQuantizationParams { - float scale; - int32_t zero_point; -} TfLiteQuantizationParams; - -// Parameters for asymmetric quantization across a dimension (i.e per output -// channel quantization). -// quantized_dimension specifies which dimension the scales and zero_points -// correspond to. -// For a particular value in quantized_dimension, quantized values can be -// converted back to float using: -// real_value = scale * (quantized_value - zero_point) -typedef struct TfLiteAffineQuantization { - TfLiteFloatArray* scale; - TfLiteIntArray* zero_point; - int32_t quantized_dimension; -} TfLiteAffineQuantization; - -/* A union of pointers that points to memory for a given tensor. */ -typedef union TfLitePtrUnion { - /* Do not access these members directly, if possible, use - * GetTensorData(tensor) instead, otherwise only access .data, as other - * members are deprecated. */ - int32_t* i32; - int64_t* i64; - uint64_t* u64; - float* f; - TfLiteFloat16* f16; - double* f64; - char* raw; - const char* raw_const; - uint8_t* uint8; - bool* b; - int16_t* i16; - TfLiteComplex64* c64; - TfLiteComplex128* c128; - int8_t* int8; - /* Only use this member. */ - void* data; -} TfLitePtrUnion; - -// Memory allocation strategies. -// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated. -// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence, -// and available during eval. -// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and -// only available during eval. -// * kTfLiteDynamic: Allocated during eval, or for string tensors. -// * kTfLitePersistentRo: Allocated and populated during prepare. This is -// useful for tensors that can be computed during prepare and treated -// as constant inputs for downstream ops (also in prepare). -// * kTfLiteCustom: Custom memory allocation provided by the user. See -// TfLiteCustomAllocation below. -typedef enum TfLiteAllocationType { - kTfLiteMemNone = 0, - kTfLiteMmapRo, - kTfLiteArenaRw, - kTfLiteArenaRwPersistent, - kTfLiteDynamic, - kTfLitePersistentRo, - kTfLiteCustom, -} TfLiteAllocationType; - -// The delegates should use zero or positive integers to represent handles. -// -1 is reserved from unallocated status. -typedef int TfLiteBufferHandle; -enum { - kTfLiteNullBufferHandle = -1, -}; - -// Storage format of each dimension in a sparse tensor. -typedef enum TfLiteDimensionType { - kTfLiteDimDense = 0, - kTfLiteDimSparseCSR, -} TfLiteDimensionType; - -// Metadata to encode each dimension in a sparse tensor. -typedef struct TfLiteDimensionMetadata { - TfLiteDimensionType format; - int dense_size; - TfLiteIntArray* array_segments; - TfLiteIntArray* array_indices; -} TfLiteDimensionMetadata; - -// Parameters used to encode a sparse tensor. For detailed explanation of each -// field please refer to lite/schema/schema.fbs. -typedef struct TfLiteSparsity { - TfLiteIntArray* traversal_order; - TfLiteIntArray* block_map; - TfLiteDimensionMetadata* dim_metadata; - int dim_metadata_size; -} TfLiteSparsity; - -// Defines a custom memory allocation not owned by the runtime. -// `data` should be aligned to kDefaultTensorAlignment defined in -// lite/util.h. (Currently 64 bytes) -// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage. -typedef struct TfLiteCustomAllocation { - void* data; - size_t bytes; -} TfLiteCustomAllocation; - -// A tensor in the interpreter system which is a wrapper around a buffer of -// data including a dimensionality (or NULL if not currently defined). -#ifndef TF_LITE_STATIC_MEMORY -typedef struct TfLiteTensor { - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. - TfLiteIntArray* dims; - // Quantization information. - TfLiteQuantizationParams params; - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). - TfLiteAllocationType allocation_type; - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. - size_t bytes; - - // An opaque pointer to a tflite::MMapAllocation - const void* allocation; - - // Null-terminated name of this tensor. - const char* name; - - // The delegate which knows how to handle `buffer_handle`. - // WARNING: This is an experimental interface that is subject to change. - struct TfLiteDelegate* delegate; - - // An integer buffer handle that can be handled by `delegate`. - // The value is valid only when delegate is not null. - // WARNING: This is an experimental interface that is subject to change. - TfLiteBufferHandle buffer_handle; - - // If the delegate uses its own buffer (e.g. GPU memory), the delegate is - // responsible to set data_is_stale to true. - // `delegate->CopyFromBufferHandle` can be called to copy the data from - // delegate buffer. - // WARNING: This is an // experimental interface that is subject to change. - bool data_is_stale; - - // True if the tensor is a variable. - bool is_variable; - - // Quantization information. Replaces params field above. - TfLiteQuantization quantization; - - // Parameters used to encode a sparse tensor. - // This is optional. The field is NULL if a tensor is dense. - // WARNING: This is an experimental interface that is subject to change. - TfLiteSparsity* sparsity; - - // Optional. Encodes shapes with unknown dimensions with -1. This field is - // only populated when unknown dimensions exist in a read-write tensor (i.e. - // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and - // `dims_signature` contains [1, -1, -1, 3]). - const TfLiteIntArray* dims_signature; -} TfLiteTensor; - -// A structure representing an instance of a node. -// This structure only exhibits the inputs, outputs and user defined data, not -// other features like the type. -typedef struct TfLiteNode { - // Inputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* inputs; - - // Outputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* outputs; - - // intermediate tensors to this node expressed as indices into the simulator's - // tensors. - TfLiteIntArray* intermediates; - - // Temporary tensors uses during the computations. This usually contains no - // tensors, but ops are allowed to change that if they need scratch space of - // any sort. - TfLiteIntArray* temporaries; - - // Opaque data provided by the node implementer through `Registration.init`. - void* user_data; - - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h - void* builtin_data; - - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. - const void* custom_initial_data; - int custom_initial_data_size; - - // The pointer to the delegate. This is non-null only when the node is - // created by calling `interpreter.ModifyGraphWithDelegate`. - // WARNING: This is an experimental interface that is subject to change. - struct TfLiteDelegate* delegate; -} TfLiteNode; -#else // defined(TF_LITE_STATIC_MEMORY)? -// NOTE: This flag is opt-in only at compile time. -// -// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct -// contains only the minimum fields required to initialize and prepare a micro -// inference graph. The fields in this struct have been ordered from -// largest-to-smallest for optimal struct sizeof. -// -// This struct does not use: -// - allocation -// - buffer_handle -// - data_is_stale -// - delegate -// - dims_signature -// - name -// - sparsity -typedef struct TfLiteTensor { - // TODO(b/155784997): Consider consolidating these quantization fields: - // Quantization information. Replaces params field above. - TfLiteQuantization quantization; - - // Quantization information. - TfLiteQuantizationParams params; - - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. - TfLiteIntArray* dims; - - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. - size_t bytes; - - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; - - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). - TfLiteAllocationType allocation_type; - - // True if the tensor is a variable. - bool is_variable; -} TfLiteTensor; - -// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains -// only the minimum fields required to represent a node. -// -// This struct does not use: -// - delegate -// - intermediates -// - temporaries -typedef struct TfLiteNode { - // Inputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* inputs; - - // Outputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* outputs; - - // Opaque data provided by the node implementer through `Registration.init`. - void* user_data; - - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h - void* builtin_data; - - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. - const void* custom_initial_data; - int custom_initial_data_size; -} TfLiteNode; -#endif // TF_LITE_STATIC_MEMORY - -// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount -// of information required for a kernel to run during TfLiteRegistration::Eval. -// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM -// builds with this flag by default internally. -typedef struct TfLiteEvalTensor { - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. - TfLiteIntArray* dims; - - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; -} TfLiteEvalTensor; - -#ifndef TF_LITE_STATIC_MEMORY -// Free data memory of tensor `t`. -void TfLiteTensorDataFree(TfLiteTensor* t); - -// Free quantization data. -void TfLiteQuantizationFree(TfLiteQuantization* quantization); - -// Free sparsity parameters. -void TfLiteSparsityFree(TfLiteSparsity* sparsity); - -// Free memory of tensor `t`. -void TfLiteTensorFree(TfLiteTensor* t); - -// Set all of a tensor's fields (and free any previously allocated data). -void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, - TfLiteQuantizationParams quantization, char* buffer, - size_t size, TfLiteAllocationType allocation_type, - const void* allocation, bool is_variable, - TfLiteTensor* tensor); - -// Resize the allocated data of a (dynamic) tensor. Tensors with allocation -// types other than kTfLiteDynamic will be ignored. -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); -#endif // TF_LITE_STATIC_MEMORY - -// WARNING: This is an experimental interface that is subject to change. -// -// Currently, TfLiteDelegateParams has to be allocated in a way that it's -// trivially destructable. It will be stored as `builtin_data` field in -// `TfLiteNode` of the delegate node. -// -// See also the `CreateDelegateParams` function in `interpreter.cc` details. -typedef struct TfLiteDelegateParams { - struct TfLiteDelegate* delegate; - TfLiteIntArray* nodes_to_replace; - TfLiteIntArray* input_tensors; - TfLiteIntArray* output_tensors; -} TfLiteDelegateParams; - -typedef struct TfLiteContext { - // Number of tensors in the context. - size_t tensors_size; - - // The execution plan contains a list of the node indices in execution - // order. execution_plan->size is the current number of nodes. And, - // execution_plan->data[0] is the first node that needs to be run. - // TfLiteDelegates can traverse the current execution plan by iterating - // through each member of this array and using GetNodeAndRegistration() to - // access details about a node. i.e. - // TfLiteIntArray* execution_plan; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan)); - // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) { - // int node_index = execution_plan->data[exec_index]; - // TfLiteNode* node; - // TfLiteRegistration* reg; - // context->GetNodeAndRegistration(context, node_index, &node, ®); - // } - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context, - TfLiteIntArray** execution_plan); - - // An array of tensors in the interpreter context (of length `tensors_size`) - TfLiteTensor* tensors; - - // opaque full context ptr (an opaque c++ data structure) - void* impl_; - - // Request memory pointer be resized. Updates dimensions on the tensor. - // NOTE: ResizeTensor takes ownership of newSize. - TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor, - TfLiteIntArray* new_size); - // Request that an error be reported with format string msg. - void (*ReportError)(struct TfLiteContext*, const char* msg, ...); - - // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If - // non-null, the value pointed to by `first_new_tensor_index` will be set to - // the index of the first new tensor. - TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add, - int* first_new_tensor_index); - - // Get a Tensor node by node_index. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetNodeAndRegistration)( - struct TfLiteContext*, int node_index, TfLiteNode** node, - struct TfLiteRegistration** registration); - - // Replace ops with one or more stub delegate operations. This function - // does not take ownership of `nodes_to_replace`. - TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)( - struct TfLiteContext*, struct TfLiteRegistration registration, - const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate); - - // Number of threads that are recommended to subsystems like gemmlowp and - // eigen. - int recommended_num_threads; - - // Access external contexts by type. - // WARNING: This is an experimental interface that is subject to change. - TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*, - TfLiteExternalContextType); - // Set the value of a external context. Does not take ownership of the - // pointer. - // WARNING: This is an experimental interface that is subject to change. - void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType, - TfLiteExternalContext*); - - // Flag for allowing float16 precision for FP32 calculation. - // default: false. - // WARNING: This is an experimental API and subject to change. - bool allow_fp32_relax_to_fp16; - - // Pointer to the op-level profiler, if set; nullptr otherwise. - void* profiler; - - // Allocate persistent buffer which has the same life time as the interpreter. - // Returns nullptr on failure. - // The memory is allocated from heap for TFL, and from tail in TFLM. - // This method is only available in Init or Prepare stage. - // WARNING: This is an experimental interface that is subject to change. - void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes); - - // Allocate a buffer which will be deallocated right after invoke phase. - // The memory is allocated from heap in TFL, and from volatile arena in TFLM. - // This method is only available in invoke stage. - // NOTE: If possible use RequestScratchBufferInArena method to avoid memory - // allocation during inference time. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes, - void** ptr); - - // Request a scratch buffer in the arena through static memory planning. - // This method is only available in Prepare stage and the buffer is allocated - // by the interpreter between Prepare and Eval stage. In Eval stage, - // GetScratchBuffer API can be used to fetch the address. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx, - size_t bytes, int* buffer_idx); - - // Get the scratch buffer pointer. - // This method is only available in Eval stage. - // WARNING: This is an experimental interface that is subject to change. - void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx); - - // Resize the memory pointer of the `tensor`. This method behaves the same as - // `ResizeTensor`, except that it makes a copy of the shape array internally - // so the shape array could be deallocated right afterwards. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx, - TfLiteTensor* tensor, int dims, - const int* shape); - - // This method provides a preview of post-delegation partitioning. Each - // TfLiteDelegateParams in the referenced array corresponds to one instance of - // the delegate kernel. - // Example usage: - // - // TfLiteIntArray* nodes_to_replace = ...; - // TfLiteDelegateParams* params_array; - // int num_partitions = 0; - // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( - // context, delegate, nodes_to_replace, ¶ms_array, &num_partitions)); - // for (int idx = 0; idx < num_partitions; idx++) { - // const auto& partition_params = params_array[idx]; - // ... - // } - // - // NOTE: The context owns the memory referenced by partition_params_array. It - // will be cleared with another call to PreviewDelegateParitioning, or after - // TfLiteDelegateParams::Prepare returns. - // - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*PreviewDelegatePartitioning)( - struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace, - TfLiteDelegateParams** partition_params_array, int* num_partitions); - - // Returns a TfLiteTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. - TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context, - int tensor_idx); - - // Returns a TfLiteEvalTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. - TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context, - int tensor_idx); -} TfLiteContext; - -typedef struct TfLiteRegistration { - // Initializes the op from serialized data. - // If a built-in op: - // `buffer` is the op's params data (TfLiteLSTMParams*). - // `length` is zero. - // If custom op: - // `buffer` is the op's `custom_options`. - // `length` is the size of the buffer. - // - // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer - // or an instance of a struct). - // - // The returned pointer will be stored with the node in the `user_data` field, - // accessible within prepare and invoke functions below. - // NOTE: if the data is already in the desired format, simply implement this - // function to return `nullptr` and implement the free function to be a no-op. - void* (*init)(TfLiteContext* context, const char* buffer, size_t length); - - // The pointer `buffer` is the data previously returned by an init invocation. - void (*free)(TfLiteContext* context, void* buffer); - - // prepare is called when the inputs this node depends on have been resized. - // context->ResizeTensor() can be called to request output tensors to be - // resized. - // - // Returns kTfLiteOk on success. - TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); - - // Execute the node (should read node->inputs and output to node->outputs). - // Returns kTfLiteOk on success. - TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); - - // profiling_string is called during summarization of profiling information - // in order to group executions together. Providing a value here will cause a - // given op to appear multiple times is the profiling report. This is - // particularly useful for custom ops that can perform significantly - // different calculations depending on their `user-data`. - const char* (*profiling_string)(const TfLiteContext* context, - const TfLiteNode* node); - - // Builtin codes. If this kernel refers to a builtin this is the code - // of the builtin. This is so we can do marshaling to other frameworks like - // NN API. - // Note: It is the responsibility of the registration binder to set this - // properly. - int32_t builtin_code; - - // Custom op name. If the op is a builtin, this will be null. - // Note: It is the responsibility of the registration binder to set this - // properly. - // WARNING: This is an experimental interface that is subject to change. - const char* custom_name; - - // The version of the op. - // Note: It is the responsibility of the registration binder to set this - // properly. - int version; -} TfLiteRegistration; - -// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the -// values should be 1, 2, 4, 8, ...etc. -typedef enum TfLiteDelegateFlags { - kTfLiteDelegateFlagsNone = 0, - // The flag is set if the delegate can handle dynamic sized tensors. - // For example, the output shape of a `Resize` op with non-constant shape - // can only be inferred when the op is invoked. - // In this case, the Delegate is responsible for calling - // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling - // `ResizeTensor` when invoking the op. - // - // If the delegate isn't capable to handle dynamic tensors, this flag need - // to be set to false. - kTfLiteDelegateFlagsAllowDynamicTensors = 1, - - // This flag can be used by delegates (that allow dynamic tensors) to ensure - // applicable tensor shapes are automatically propagated in the case of tensor - // resizing. - // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors - // of a delegate kernel will have correct shapes before its Prepare() method - // is called. The runtime leverages TFLite builtin ops in the original - // execution plan to propagate shapes. - // - // A few points to note: - // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is - // false, this one is redundant since the delegate kernels are re-initialized - // every time tensors are resized. - // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra - // work is required to prepare the original execution plan. - // 3. This flag requires that the original execution plan only have ops with - // valid registrations (and not 'dummy' custom ops like with Flex). - // WARNING: This feature is experimental and subject to change. - kTfLiteDelegateFlagsRequirePropagatedShapes = 2 -} TfLiteDelegateFlags; - -// WARNING: This is an experimental interface that is subject to change. -typedef struct TfLiteDelegate { - // Data that delegate needs to identify itself. This data is owned by the - // delegate. The delegate is owned in the user code, so the delegate is - // responsible for doing this when it is destroyed. - void* data_; - - // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the - // delegate a view of the current graph through TfLiteContext*. It typically - // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() - // to ask the TensorFlow lite runtime to create macro-nodes to represent - // delegated subgraphs of the original graph. - TfLiteStatus (*Prepare)(TfLiteContext* context, - struct TfLiteDelegate* delegate); - - // Copy the data from delegate buffer handle into raw memory of the given - // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as - // long as it follows the rules for kTfLiteDynamic tensors, in which case this - // cannot be null. - TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle buffer_handle, - TfLiteTensor* tensor); - - // Copy the data from raw memory of the given 'tensor' to delegate buffer - // handle. This can be null if the delegate doesn't use its own buffer. - TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle buffer_handle, - TfLiteTensor* tensor); - - // Free the Delegate Buffer Handle. Note: This only frees the handle, but - // this doesn't release the underlying resource (e.g. textures). The - // resources are either owned by application layer or the delegate. - // This can be null if the delegate doesn't use its own buffer. - void (*FreeBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle* handle); - - // Bitmask flags. See the comments in `TfLiteDelegateFlags`. - int64_t flags; -} TfLiteDelegate; - -// Build a 'null' delegate, with all the fields properly set to their default -// values. -TfLiteDelegate TfLiteDelegateCreate(); - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus #endif // TENSORFLOW_LITE_C_COMMON_H_ diff --git a/src/tensorflow/lite/context_util.h b/src/tensorflow/lite/context_util.h new file mode 100644 index 0000000..cbbe9f1 --- /dev/null +++ b/src/tensorflow/lite/context_util.h @@ -0,0 +1,54 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/// \file +/// +/// This provides a few C++ helpers that are useful for manipulating C +/// structures in C++. +#ifndef TENSORFLOW_LITE_CONTEXT_UTIL_H_ +#define TENSORFLOW_LITE_CONTEXT_UTIL_H_ + +#include + +#include "tensorflow/lite/core/c/common.h" + +namespace tflite { + +/// Provides a range iterable wrapper for TfLiteIntArray* (C lists) that TfLite +/// C api uses. +// Can't use the google array_view, since we can't depend on even +// absl for embedded device reasons. +class TfLiteIntArrayView { + public: + /// Construct a view of a TfLiteIntArray*. Note, `int_array` should be + /// non-null and this view does not take ownership of it. + explicit TfLiteIntArrayView(const TfLiteIntArray* int_array) + : int_array_(int_array) {} + + TfLiteIntArrayView(const TfLiteIntArrayView&) = default; + TfLiteIntArrayView& operator=(const TfLiteIntArrayView& rhs) = default; + + typedef const int* const_iterator; + const_iterator begin() const { return int_array_->data; } + const_iterator end() const { return &int_array_->data[int_array_->size]; } + size_t size() const { return end() - begin(); } + int operator[](size_t pos) const { return int_array_->data[pos]; } + + private: + const TfLiteIntArray* int_array_; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_CONTEXT_UTIL_H_ diff --git a/src/tensorflow/lite/core/api/error_reporter.h b/src/tensorflow/lite/core/api/error_reporter.h index 05839a6..5a98691 100644 --- a/src/tensorflow/lite/core/api/error_reporter.h +++ b/src/tensorflow/lite/core/api/error_reporter.h @@ -15,45 +15,6 @@ limitations under the License. #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ #define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ -#include - -namespace tflite { - -/// A functor that reports error to supporting system. Invoked similar to -/// printf. -/// -/// Usage: -/// ErrorReporter foo; -/// foo.Report("test %d", 5); -/// or -/// va_list args; -/// foo.Report("test %d", args); // where args is va_list -/// -/// Subclass ErrorReporter to provide another reporting destination. -/// For example, if you have a GUI program, you might redirect to a buffer -/// that drives a GUI error log box. -class ErrorReporter { - public: - virtual ~ErrorReporter() {} - virtual int Report(const char* format, va_list args) = 0; - int Report(const char* format, ...); - int ReportError(void*, const char* format, ...); -}; - -} // namespace tflite - -// You should not make bare calls to the error reporter, instead use the -// TF_LITE_REPORT_ERROR macro, since this allows message strings to be -// stripped when the binary size has to be optimized. If you are looking to -// reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and -// every call will be stubbed out, taking no memory. -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_REPORT_ERROR(reporter, ...) \ - do { \ - static_cast(reporter)->Report(__VA_ARGS__); \ - } while (false) -#else // TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_REPORT_ERROR(reporter, ...) -#endif // TF_LITE_STRIP_ERROR_STRINGS +#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h" #endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ diff --git a/src/tensorflow/lite/core/api/flatbuffer_conversions.cpp b/src/tensorflow/lite/core/api/flatbuffer_conversions.cpp index a4bfc77..8b3236c 100644 --- a/src/tensorflow/lite/core/api/flatbuffer_conversions.cpp +++ b/src/tensorflow/lite/core/api/flatbuffer_conversions.cpp @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,17 +15,21 @@ limitations under the License. #include "tensorflow/lite/core/api/flatbuffer_conversions.h" +#include #include #include #include -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" +#include "third_party/flatbuffers/include/flatbuffers/vector.h" +#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h" +#include "tensorflow/lite/core/c/builtin_op_data.h" +#include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/schema/schema_generated.h" +// TODO(sosagarcia): Rework all function implementations to wrap around the +// compiler flatbuffer_conversions. +// LINT.IfChange namespace tflite { namespace { @@ -76,9 +80,10 @@ void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter, // Copies the contents from the flatbuffer int vector `flatbuffer` into the // int array `buffer`. `flat_vector` and `buffer` represent the same // configuration operation for a given operation. -TfLiteStatus FlatBufferIntVectorToArray( - int max_size_of_buffer, const flatbuffers::Vector* flat_vector, - int* buffer, ErrorReporter* error_reporter, const char* op_name) { +template +static TfLiteStatus FlatBufferIntVectorToArray( + int max_size_of_buffer, const flatbuffers::Vector* flat_vector, + DataType* buffer, ErrorReporter* error_reporter, const char* op_name) { if (!flat_vector) { TF_LITE_REPORT_ERROR(error_reporter, "Input array not provided for operation '%s'.\n", @@ -86,7 +91,7 @@ TfLiteStatus FlatBufferIntVectorToArray( return kTfLiteError; } else { size_t num_dimensions = flat_vector->size(); - if (num_dimensions > max_size_of_buffer / sizeof(int)) { + if (num_dimensions > max_size_of_buffer / sizeof(DataType)) { TF_LITE_REPORT_ERROR( error_reporter, "Found too many dimensions in the input array of operation '%s'.\n", @@ -131,7 +136,30 @@ TfLitePadding ConvertPadding(Padding padding) { return kTfLitePaddingUnknown; } -#ifndef TF_LITE_STATIC_MEMORY +// Converts the flatbuffer mirror padding enum to what is used at runtime. +TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) { + switch (padding) { + case MirrorPadMode_REFLECT: + return kTfLiteMirrorPaddingReflect; + case MirrorPadMode_SYMMETRIC: + return kTfLiteMirrorPaddingSymmetric; + } + return kTfLiteMirrorPaddingUnknown; +} + +TfLiteRngAlgorithm ConvertRngAlgorithm(RngAlgorithm algorithm) { + switch (algorithm) { + case RngAlgorithm_THREEFRY: + return kTfLiteRngAlgorithmThreefry; + case RngAlgorithm_PHILOX: + return kTfLiteRngAlgorithmPhilox; + case RngAlgorithm_DEFAULT: + return kTfLiteRngAlgorithmDefault; + } + return kTfLiteRngAlgorithmUnknown; +} + +#ifndef ARDUINO TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -169,6 +197,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseAdd(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_ADD_N: { + return ParseAddN(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_ARG_MAX: { return ParseArgMax(op, error_reporter, allocator, builtin_data); } @@ -177,10 +209,34 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseArgMin(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_ASSIGN_VARIABLE: { + return ParseAssignVariable(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_AVERAGE_POOL_2D: { return ParsePool(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_BATCH_MATMUL: { + return ParseBatchMatMul(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_BATCH_TO_SPACE_ND: { + return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_BROADCAST_ARGS: { + return ParseBroadcastArgs(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_BROADCAST_TO: { + return ParseBroadcastTo(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_CALL_ONCE: { + return ParseCallOnce(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_CEIL: { return ParseCeil(op, error_reporter, allocator, builtin_data); } @@ -193,6 +249,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseConv2D(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_CUMSUM: { + return ParseCumsum(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_DEPTH_TO_SPACE: { + return ParseDepthToSpace(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_DEPTHWISE_CONV_2D: { return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data); } @@ -201,6 +265,26 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseDequantize(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_DIV: { + return ParseDiv(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_ELU: { + return ParseElu(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_EMBEDDING_LOOKUP: { + return ParseEmbeddingLookup(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_EXP: { + return ParseExp(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_EXPAND_DIMS: { + return ParseExpandDims(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_FILL: { return ParseFill(op, error_reporter, allocator, builtin_data); } @@ -209,10 +293,22 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseFloor(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_FLOOR_DIV: { + return ParseFloorDiv(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_FLOOR_MOD: { + return ParseFloorMod(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_FULLY_CONNECTED: { return ParseFullyConnected(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_GATHER_ND: { + return ParseGatherNd(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_GREATER: { return ParseGreater(op, error_reporter, allocator, builtin_data); } @@ -233,6 +329,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParsePool(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_LEAKY_RELU: { + return ParseLeakyRelu(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_LESS: { return ParseLess(op, error_reporter, allocator, builtin_data); } @@ -261,6 +361,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseLogistic(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_LOG_SOFTMAX: { + return ParseLogSoftmax(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_LSTM: { + return ParseLSTM(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_MAXIMUM: { return ParseMaximum(op, error_reporter, allocator, builtin_data); } @@ -269,6 +377,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParsePool(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_MIRROR_PAD: { + return ParseMirrorPad(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_MEAN: { return ParseReducer(op, error_reporter, allocator, builtin_data); } @@ -301,6 +413,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParsePadV2(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_POW: { + return ParsePow(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_PRELU: { return ParsePrelu(op, error_reporter, allocator, builtin_data); } @@ -309,10 +425,18 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseQuantize(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_READ_VARIABLE: { + return ParseReadVariable(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_REDUCE_ANY: { return ParseReducer(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_REDUCE_ALL: { + return ParseReducer(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_REDUCE_MAX: { return ParseReducer(op, error_reporter, allocator, builtin_data); } @@ -354,6 +478,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseRsqrt(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_SELECT_V2: { + return ParseSelectV2(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_SHAPE: { return ParseShape(op, error_reporter, allocator, builtin_data); } @@ -366,6 +494,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseSoftmax(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_SPACE_TO_BATCH_ND: { + return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_SPACE_TO_DEPTH: { + return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_SPLIT: { return ParseSplit(op, error_reporter, allocator, builtin_data); } @@ -382,6 +518,15 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseSquare(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_SQUARED_DIFFERENCE: { + return ParseSquaredDifference(op, error_reporter, allocator, + builtin_data); + } + + case BuiltinOperator_SQUEEZE: { + return ParseSqueeze(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_STRIDED_SLICE: { return ParseStridedSlice(op, error_reporter, allocator, builtin_data); } @@ -402,23 +547,32 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseTanh(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_TRANSPOSE_CONV: { + return ParseTransposeConv(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_UNPACK: { return ParseUnpack(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_VAR_HANDLE: { + return ParseVarHandle(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_ZEROS_LIKE: { + return ParseZerosLike(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_BITWISE_XOR: { + return ParseBitwiseXor(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_RIGHT_SHIFT: { + return ParseRightShift(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_CAST: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_CastOptions()) { - TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->in_data_type(), - ¶ms->in_data_type, - error_reporter)); - TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(), - ¶ms->out_data_type, - error_reporter)); - } - *builtin_data = params.release(); - return kTfLiteOk; + return ParseCast(op, error_reporter, allocator, builtin_data); } case BuiltinOperator_LSH_PROJECTION: { auto params = safe_allocator.Allocate(); @@ -487,16 +641,7 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, case BuiltinOperator_HASHTABLE_LOOKUP: // no-op. return kTfLiteOk; - case BuiltinOperator_DIV: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_DivOptions()) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } - *builtin_data = params.release(); - return kTfLiteOk; - } + case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: { auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); @@ -510,53 +655,9 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_LSTM: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) { - params->activation = - ConvertActivation(lstm_params->fused_activation_function()); - params->cell_clip = lstm_params->cell_clip(); - params->proj_clip = lstm_params->proj_clip(); - switch (lstm_params->kernel_type()) { - case LSTMKernelType_FULL: - params->kernel_type = kTfLiteLSTMFullKernel; - break; - case LSTMKernelType_BASIC: - params->kernel_type = kTfLiteLSTMBasicKernel; - break; - default: - TF_LITE_REPORT_ERROR(error_reporter, - "Unhandled LSTM kernel type: %d", - lstm_params->kernel_type()); - return kTfLiteError; - } - params->asymmetric_quantize_inputs = - lstm_params->asymmetric_quantize_inputs(); - } else { - TF_LITE_REPORT_ERROR(error_reporter, - "No valid LSTM builtin options exist"); - return kTfLiteError; - } - *builtin_data = params.release(); - return kTfLiteOk; - } case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* seq_lstm_params = - op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) { - params->activation = - ConvertActivation(seq_lstm_params->fused_activation_function()); - params->cell_clip = seq_lstm_params->cell_clip(); - params->proj_clip = seq_lstm_params->proj_clip(); - params->time_major = seq_lstm_params->time_major(); - params->asymmetric_quantize_inputs = - seq_lstm_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; + return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator, + builtin_data); } case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: { auto params = @@ -588,66 +689,9 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_SPACE_TO_DEPTH: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_SpaceToDepthOptions()) { - params->block_size = schema_params->block_size(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_DEPTH_TO_SPACE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_DepthToSpaceOptions()) { - params->block_size = schema_params->block_size(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_GATHER: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - params->axis = 0; - if (const auto* gather_params = op->builtin_options_as_GatherOptions()) { - params->axis = gather_params->axis(); - } - - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_SQUEEZE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_SqueezeOptions()) { - const auto* squeeze_dims = schema_params->squeeze_dims(); - if (squeeze_dims != nullptr) { - TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( - sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims, - error_reporter, "squeeze")); - params->num_squeeze_dims = squeeze_dims->size(); - } else { - params->num_squeeze_dims = 0; - } - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_TRANSPOSE_CONV: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* transpose_conv_params = - op->builtin_options_as_TransposeConvOptions()) { - params->padding = ConvertPadding(transpose_conv_params->padding()); - params->stride_width = transpose_conv_params->stride_w(); - params->stride_height = transpose_conv_params->stride_h(); - } - *builtin_data = params.release(); - return kTfLiteOk; + case BuiltinOperator_GATHER: { + return ParseGather(op, error_reporter, allocator, builtin_data); } case BuiltinOperator_SPARSE_TO_DENSE: { auto params = safe_allocator.Allocate(); @@ -660,7 +704,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return kTfLiteOk; } case BuiltinOperator_DELEGATE: { - // TODO(ycling): Revisit when supporting saving delegated models. TF_LITE_REPORT_ERROR(error_reporter, "DELEGATE op shouldn't exist in model."); return kTfLiteError; @@ -687,29 +730,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_LEAKY_RELU: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* leaky_relu_params = - op->builtin_options_as_LeakyReluOptions()) { - params->alpha = leaky_relu_params->alpha(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_MIRROR_PAD: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions(); - if (mirror_pad_params != nullptr) { - params->mode = - mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT - ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect - : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric; - } - *builtin_data = params.release(); - return kTfLiteOk; - } case BuiltinOperator_UNIQUE: { auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); @@ -754,86 +774,255 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_BATCH_MATMUL: { - auto params = safe_allocator.Allocate(); + case BuiltinOperator_CONV_3D: + case BuiltinOperator_CONV_3D_TRANSPOSE: { + auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bmm_params = - op->builtin_options_as_BatchMatMulOptions()) { - params->adj_x = bmm_params->adj_x(); - params->adj_y = bmm_params->adj_y(); + if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) { + params->padding = ConvertPadding(conv3d_params->padding()); + params->activation = + ConvertActivation(conv3d_params->fused_activation_function()); + params->stride_depth = conv3d_params->stride_d(); + params->stride_height = conv3d_params->stride_h(); + params->stride_width = conv3d_params->stride_w(); + params->dilation_depth_factor = conv3d_params->dilation_d_factor(); + params->dilation_height_factor = conv3d_params->dilation_h_factor(); + params->dilation_width_factor = conv3d_params->dilation_w_factor(); } *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_CALL_ONCE: { - auto params = safe_allocator.Allocate(); + case BuiltinOperator_HASHTABLE: { + auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* call_once_params = - op->builtin_options_as_CallOnceOptions()) { - params->init_subgraph_index = call_once_params->init_subgraph_index(); + if (const auto* hashtable_params = + op->builtin_options_as_HashtableOptions()) { + params->table_id = hashtable_params->table_id(); + TF_LITE_ENSURE_STATUS(ConvertTensorType( + hashtable_params->key_dtype(), ¶ms->key_dtype, error_reporter)); + TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(), + ¶ms->value_dtype, + error_reporter)); } *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_CUMSUM: { - auto params = safe_allocator.Allocate(); + case BuiltinOperator_MULTINOMIAL: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* multinomial_params = + op->builtin_options_as_RandomOptions()) { + params->seed = multinomial_params->seed(); + params->seed2 = multinomial_params->seed2(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_RANDOM_STANDARD_NORMAL: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* random_std_normal_params = + op->builtin_options_as_RandomOptions()) { + params->seed = random_std_normal_params->seed(); + params->seed2 = random_std_normal_params->seed2(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_BUCKETIZE: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* bucketize_params = + op->builtin_options_as_BucketizeOptions()) { + const flatbuffers::Vector* boundaries = + bucketize_params->boundaries(); + if (boundaries == nullptr) { + TF_LITE_REPORT_ERROR( + error_reporter, + "boundaries array not provided for operation 'bucketize'.\n"); + return kTfLiteError; + } + params->num_boundaries = boundaries->size(); + if (boundaries->data() == nullptr) { + TF_LITE_REPORT_ERROR(error_reporter, + "boundaries.data() returned nullptr for " + "operation 'bucketize'.\n"); + return kTfLiteError; + } + params->boundaries = boundaries->data(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_RANDOM_UNIFORM: { + auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) { - params->exclusive = cumsum_params->exclusive(); - params->reverse = cumsum_params->reverse(); + if (const auto* random_uniform_params = + op->builtin_options_as_RandomOptions()) { + params->seed = random_uniform_params->seed(); + params->seed2 = random_uniform_params->seed2(); } *builtin_data = params.release(); return kTfLiteOk; } + case BuiltinOperator_GELU: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) { + params->approximate = gelu_params->approximate(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_STABLEHLO_SCATTER: { + return ParseStablehloScatter(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR: { + return ParseStablehloRngBitGenerator(op, error_reporter, allocator, + builtin_data); + } + case BuiltinOperator_STABLEHLO_GATHER: { + return ParseStablehloGather(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_STABLEHLO_REDUCE_WINDOW: { + return ParseStablehloReduceWindow(op, error_reporter, allocator, + builtin_data); + } + case BuiltinOperator_REDUCE_WINDOW: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* reduce_params = + op->builtin_options_2_as_ReduceWindowOptions()) { + switch (reduce_params->reduce_function()) { + case ReduceWindowFunction_ADD: + params->reduce_function = TfLiteReduceWindowFunctionAdd; + break; + case ReduceWindowFunction_MUL: + params->reduce_function = TfLiteReduceWindowFunctionMul; + break; + case ReduceWindowFunction_MINIMUM: + params->reduce_function = TfLiteReduceWindowFunctionMin; + break; + case ReduceWindowFunction_MAXIMUM: + params->reduce_function = TfLiteReduceWindowFunctionMax; + break; + case ReduceWindowFunction_ALL: + params->reduce_function = TfLiteReduceWindowFunctionAll; + break; + case ReduceWindowFunction_ANY: + params->reduce_function = TfLiteReduceWindowFunctionAny; + break; + case ReduceWindowFunction_UNSUPPORTED: + default: + return kTfLiteError; + } + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_STABLEHLO_PAD: { + return ParseStablehloPad(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_STABLEHLO_COMPOSITE: { + return ParseStablehloComposite(op, error_reporter, allocator, + builtin_data); + } + case BuiltinOperator_STABLEHLO_SHIFT_LEFT: { + return ParseStablehloShiftLeft(op, error_reporter, allocator, + builtin_data); + } + case BuiltinOperator_STABLEHLO_CASE: { + return ParseStablehloCase(op, error_reporter, allocator, builtin_data); + } + // TODO: skip param parsing for now since ops below don't have kernels + case BuiltinOperator_STABLEHLO_SLICE: + case BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM: + case BuiltinOperator_STABLEHLO_CONVOLUTION: + case BuiltinOperator_STABLEHLO_LOGISTIC: + case BuiltinOperator_STABLEHLO_ADD: + case BuiltinOperator_STABLEHLO_DIVIDE: + case BuiltinOperator_STABLEHLO_MULTIPLY: + case BuiltinOperator_STABLEHLO_MAXIMUM: + case BuiltinOperator_STABLEHLO_RESHAPE: + case BuiltinOperator_STABLEHLO_CLAMP: + case BuiltinOperator_STABLEHLO_CONCATENATE: + case BuiltinOperator_STABLEHLO_CUSTOM_CALL: + case BuiltinOperator_STABLEHLO_REDUCE: + case BuiltinOperator_STABLEHLO_ABS: + case BuiltinOperator_STABLEHLO_AND: + case BuiltinOperator_STABLEHLO_COSINE: + case BuiltinOperator_STABLEHLO_EXPONENTIAL: + case BuiltinOperator_STABLEHLO_FLOOR: + case BuiltinOperator_STABLEHLO_LOG: + case BuiltinOperator_STABLEHLO_MINIMUM: + case BuiltinOperator_STABLEHLO_NEGATE: + case BuiltinOperator_STABLEHLO_OR: + case BuiltinOperator_STABLEHLO_POWER: + case BuiltinOperator_STABLEHLO_REMAINDER: + case BuiltinOperator_STABLEHLO_RSQRT: + case BuiltinOperator_STABLEHLO_SELECT: + case BuiltinOperator_STABLEHLO_SUBTRACT: + case BuiltinOperator_STABLEHLO_TANH: + case BuiltinOperator_STABLEHLO_DYNAMIC_SLICE: + case BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE: + case BuiltinOperator_STABLEHLO_IOTA: + case BuiltinOperator_STABLEHLO_COMPARE: + case BuiltinOperator_STABLEHLO_CONVERT: + case BuiltinOperator_STABLEHLO_DOT_GENERAL: + case BuiltinOperator_STABLEHLO_SORT: + case BuiltinOperator_STABLEHLO_WHILE: + case BuiltinOperator_STABLEHLO_TRANSPOSE: + case BuiltinOperator_STABLEHLO_CBRT: + // Below are the ops with no builtin_data structure. - case BuiltinOperator_BATCH_TO_SPACE_ND: // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are // ok for now, since there is no call implementation either. case BuiltinOperator_CALL: + case BuiltinOperator_COMPLEX_ABS: case BuiltinOperator_CONCAT_EMBEDDINGS: case BuiltinOperator_COS: case BuiltinOperator_CUSTOM: - case BuiltinOperator_ELU: - case BuiltinOperator_EMBEDDING_LOOKUP: + case BuiltinOperator_DENSIFY: + case BuiltinOperator_DYNAMIC_UPDATE_SLICE: case BuiltinOperator_EQUAL: - case BuiltinOperator_EXP: - case BuiltinOperator_EXPAND_DIMS: - case BuiltinOperator_LOG_SOFTMAX: + case BuiltinOperator_HASHTABLE_FIND: + case BuiltinOperator_HASHTABLE_IMPORT: + case BuiltinOperator_HASHTABLE_SIZE: + case BuiltinOperator_IMAG: case BuiltinOperator_MATRIX_DIAG: case BuiltinOperator_MATRIX_SET_DIAG: + case BuiltinOperator_NON_MAX_SUPPRESSION_V4: + case BuiltinOperator_NON_MAX_SUPPRESSION_V5: case BuiltinOperator_RELU_N1_TO_1: + case BuiltinOperator_RELU_0_TO_1: + case BuiltinOperator_SCATTER_ND: case BuiltinOperator_SELECT: - case BuiltinOperator_SELECT_V2: case BuiltinOperator_SLICE: - case BuiltinOperator_SPACE_TO_BATCH_ND: case BuiltinOperator_TILE: case BuiltinOperator_TOPK_V2: case BuiltinOperator_TRANSPOSE: - case BuiltinOperator_POW: - case BuiltinOperator_FLOOR_DIV: - case BuiltinOperator_ZEROS_LIKE: - case BuiltinOperator_FLOOR_MOD: case BuiltinOperator_RANGE: - case BuiltinOperator_SQUARED_DIFFERENCE: - case BuiltinOperator_REVERSE_V2: - case BuiltinOperator_ADD_N: - case BuiltinOperator_GATHER_ND: - case BuiltinOperator_WHERE: case BuiltinOperator_RANK: - case BuiltinOperator_NON_MAX_SUPPRESSION_V4: - case BuiltinOperator_NON_MAX_SUPPRESSION_V5: - case BuiltinOperator_SCATTER_ND: - case BuiltinOperator_DENSIFY: - case BuiltinOperator_SEGMENT_SUM: - case BuiltinOperator_BROADCAST_TO: + case BuiltinOperator_REAL: case BuiltinOperator_RFFT2D: + case BuiltinOperator_SEGMENT_SUM: + case BuiltinOperator_REVERSE_V2: + case BuiltinOperator_UNSORTED_SEGMENT_MAX: + case BuiltinOperator_UNSORTED_SEGMENT_MIN: + case BuiltinOperator_UNSORTED_SEGMENT_PROD: + case BuiltinOperator_UNSORTED_SEGMENT_SUM: + case BuiltinOperator_ATAN2: + case BuiltinOperator_SIGN: + case BuiltinOperator_BITCAST: + case BuiltinOperator_WHERE: + case BuiltinOperator_DILATE: return kTfLiteOk; case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES: return kTfLiteError; } return kTfLiteError; } // NOLINT[readability/fn_size] -#endif // !defined(TF_LITE_STATIC_MEMORY) +#endif // !defined(ARDUINO) } // namespace TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, @@ -842,6 +1031,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_FLOAT16: *type = kTfLiteFloat16; return kTfLiteOk; + case TensorType_BFLOAT16: + *type = kTfLiteBFloat16; + return kTfLiteOk; case TensorType_FLOAT32: *type = kTfLiteFloat32; return kTfLiteOk; @@ -851,9 +1043,15 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_INT16: *type = kTfLiteInt16; return kTfLiteOk; + case TensorType_UINT16: + *type = kTfLiteUInt16; + return kTfLiteOk; case TensorType_INT32: *type = kTfLiteInt32; return kTfLiteOk; + case TensorType_UINT32: + *type = kTfLiteUInt32; + return kTfLiteOk; case TensorType_UINT8: *type = kTfLiteUInt8; return kTfLiteOk; @@ -878,6 +1076,15 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_COMPLEX128: *type = kTfLiteComplex128; return kTfLiteOk; + case TensorType_RESOURCE: + *type = kTfLiteResource; + return kTfLiteOk; + case TensorType_VARIANT: + *type = kTfLiteVariant; + return kTfLiteOk; + case TensorType_INT4: + *type = kTfLiteInt4; + return kTfLiteOk; default: *type = kTfLiteNoType; TF_LITE_REPORT_ERROR(error_reporter, @@ -912,13 +1119,18 @@ TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); return kTfLiteOk; } +TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + return kTfLiteOk; +} + TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { CheckParsePointerParams(op, error_reporter, allocator, builtin_data); @@ -937,7 +1149,7 @@ TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -962,7 +1174,7 @@ TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -972,25 +1184,126 @@ TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { +TfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { return kTfLiteOk; } -TfLiteStatus ParseConcatenation(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { CheckParsePointerParams(op, error_reporter, allocator, builtin_data); SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); + auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ConcatenationOptions* schema_params = - op->builtin_options_as_ConcatenationOptions(); + if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) { + params->adj_x = bmm_params->adj_x(); + params->adj_y = bmm_params->adj_y(); + params->asymmetric_quantize_inputs = + bmm_params->asymmetric_quantize_inputs(); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBroadcastArgs(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBroadcastTo(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const CallOnceOptions* schema_params = + op->builtin_options_as_CallOnceOptions(); + + if (schema_params != nullptr) { + params->init_subgraph_index = schema_params->init_subgraph_index(); + + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* schema_params = op->builtin_options_as_CastOptions()) { + TF_LITE_ENSURE_STATUS(ConvertTensorType( + schema_params->in_data_type(), ¶ms->in_data_type, error_reporter)); + TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(), + ¶ms->out_data_type, + error_reporter)); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseConcatenation(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const ConcatenationOptions* schema_params = + op->builtin_options_as_ConcatenationOptions(); if (schema_params != nullptr) { params->activation = @@ -999,7 +1312,7 @@ TfLiteStatus ParseConcatenation(const Operator* op, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1027,16 +1340,37 @@ TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, params->dilation_width_factor = schema_params->dilation_w_factor(); params->dilation_height_factor = schema_params->dilation_h_factor(); + TF_LITE_ENSURE_STATUS( + ConvertTensorType(schema_params->quantized_bias_type(), + ¶ms->quantized_bias_type, error_reporter)); } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) { + params->exclusive = cumsum_params->exclusive(); + params->reverse = cumsum_params->reverse(); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1045,6 +1379,31 @@ TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +TfLiteStatus ParseDepthToSpace(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions(); + if (schema_params != nullptr) { + params->block_size = schema_params->block_size(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + TfLiteStatus ParseDepthwiseConv2D(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -1074,7 +1433,7 @@ TfLiteStatus ParseDepthwiseConv2D(const Operator* op, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1089,6 +1448,37 @@ TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*, return kTfLiteOk; } +TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* schema_params = op->builtin_options_as_DivOptions()) { + params->activation = + ConvertActivation(schema_params->fused_activation_function()); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseEmbeddingLookup(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1097,6 +1487,22 @@ TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1113,6 +1519,22 @@ TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseFullyConnected(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -1135,7 +1557,9 @@ TfLiteStatus ParseFullyConnected(const Operator* op, params->keep_num_dims = schema_params->keep_num_dims(); params->asymmetric_quantize_inputs = schema_params->asymmetric_quantize_inputs(); - + TF_LITE_ENSURE_STATUS( + ConvertTensorType(schema_params->quantized_bias_type(), + ¶ms->quantized_bias_type, error_reporter)); switch (schema_params->weights_format()) { case FullyConnectedOptionsWeightsFormat_DEFAULT: params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault; @@ -1152,13 +1576,42 @@ TfLiteStatus ParseFullyConnected(const Operator* op, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + params->axis = 0; + params->batch_dims = 0; + if (const auto* gather_params = op->builtin_options_as_GatherOptions()) { + params->axis = gather_params->axis(); + params->batch_dims = gather_params->batch_dims(); } *builtin_data = params.release(); return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1183,6 +1636,30 @@ TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*, return kTfLiteOk; } +TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const IfOptions* schema_params = op->builtin_options_as_IfOptions(); + + if (schema_params != nullptr) { + params->then_subgraph_index = schema_params->then_subgraph_index(); + params->else_subgraph_index = schema_params->else_subgraph_index(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + TfLiteStatus ParseL2Normalization(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -1203,13 +1680,29 @@ TfLiteStatus ParseL2Normalization(const Operator* op, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); return kTfLiteOk; } +TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* leaky_relu_params = + op->builtin_options_as_LeakyReluOptions()) { + params->alpha = leaky_relu_params->alpha(); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1266,6 +1759,48 @@ TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) { + params->activation = + ConvertActivation(lstm_params->fused_activation_function()); + params->cell_clip = lstm_params->cell_clip(); + params->proj_clip = lstm_params->proj_clip(); + switch (lstm_params->kernel_type()) { + case LSTMKernelType_FULL: + params->kernel_type = kTfLiteLSTMFullKernel; + break; + case LSTMKernelType_BASIC: + params->kernel_type = kTfLiteLSTMBasicKernel; + break; + default: + TF_LITE_REPORT_ERROR(error_reporter, "Unhandled LSTM kernel type: %d", + lstm_params->kernel_type()); + return kTfLiteError; + } + params->asymmetric_quantize_inputs = + lstm_params->asymmetric_quantize_inputs(); + } else { + TF_LITE_REPORT_ERROR(error_reporter, "No valid LSTM builtin options exist"); + return kTfLiteError; + } + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1282,6 +1817,32 @@ TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*, return kTfLiteOk; } +TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const MirrorPadOptions* schema_params = + op->builtin_options_as_MirrorPadOptions(); + + if (schema_params != nullptr) { + params->mode = ConvertMirrorPadding(schema_params->mode()); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { CheckParsePointerParams(op, error_reporter, allocator, builtin_data); @@ -1299,7 +1860,7 @@ TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1340,7 +1901,7 @@ TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1386,7 +1947,7 @@ TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1396,8 +1957,16 @@ TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { +TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { return kTfLiteOk; } @@ -1409,6 +1978,14 @@ TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { @@ -1428,7 +2005,7 @@ TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1481,7 +2058,7 @@ TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1542,6 +2119,354 @@ TfLiteStatus ParseResizeNearestNeighbor(const Operator* op, return kTfLiteOk; } +TfLiteStatus ParseStablehloReduceWindow(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + + const StablehloReduceWindowOptions* schema_params = + op->builtin_options_2_as_StablehloReduceWindowOptions(); + if (schema_params) { + if (!schema_params->window_dimensions() || + schema_params->window_dimensions()->size() == 0) { + TF_LITE_REPORT_ERROR(error_reporter, + "'window_dimensions' attribute is not optional for " + "'stablehlo.reduce_window' and cannot be empty."); + return kTfLiteError; + } + + const size_t rank = schema_params->window_dimensions()->size(); + + auto LoadAttr = [&error_reporter]( + int64_t* params_array, size_t params_array_size_bytes, + const flatbuffers::Vector* flatbuffer_vector, + const char* attr_name, const size_t expected_size, + const int64_t fill_value) -> TfLiteStatus { + if (flatbuffer_vector && flatbuffer_vector->size()) { + if (expected_size != 0 && flatbuffer_vector->size() != expected_size) { + TF_LITE_REPORT_ERROR( + error_reporter, + "'%s' attribute of 'stablehlo.reduce_window' does not have the " + "expected size (%llu != %llu).", + attr_name, flatbuffer_vector->size(), expected_size); + return kTfLiteError; + } + TfLiteStatus status = FlatBufferIntVectorToArray( + params_array_size_bytes, flatbuffer_vector, params_array, + error_reporter, "stablehlo.reduce_window"); + if (status != kTfLiteOk) { + TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.", + attr_name); + return status; + } + } else { + std::fill_n(params_array, params_array_size_bytes / sizeof(int64_t), + fill_value); + } + return kTfLiteOk; + }; + + TF_LITE_ENSURE_STATUS( + LoadAttr(params->window_dimensions, sizeof(params->window_dimensions), + schema_params->window_dimensions(), "window_dimensions", + /*expected_size=*/rank, /*fill_value=*/1)); + TF_LITE_ENSURE_STATUS( + LoadAttr(params->window_strides, sizeof(params->window_strides), + schema_params->window_strides(), "window_strides", + /*expected_size=*/rank, /*fill_value=*/1)); + TF_LITE_ENSURE_STATUS( + LoadAttr(params->base_dilations, sizeof(params->base_dilations), + schema_params->base_dilations(), "base_dilations", + /*expected_size=*/rank, /*fill_value=*/1)); + TF_LITE_ENSURE_STATUS( + LoadAttr(params->window_dilations, sizeof(params->window_dilations), + schema_params->window_dilations(), "window_dilations", + /*expected_size=*/rank, /*fill_value=*/1)); + TF_LITE_ENSURE_STATUS(LoadAttr(params->padding, sizeof(params->padding), + schema_params->padding(), "padding", + /*expected_size=*/2 * rank, + /*fill_value=*/0)); + + params->body_subgraph_index = schema_params->body_subgraph_index(); + *builtin_data = params.release(); + return kTfLiteOk; + } + TF_LITE_REPORT_ERROR( + error_reporter, + "Could not get 'stablehlo.reduce_window' operation parameters."); + return kTfLiteError; +} + +TfLiteStatus ParseStablehloScatter(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const StablehloScatterOptions* schema_params = + op->builtin_options_2_as_StablehloScatterOptions(); + if (schema_params) { + params->indices_are_sorted = schema_params->indices_are_sorted(); + + if (schema_params->update_window_dims()) { + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + schema_params->update_window_dims()->size() * sizeof(int64_t), + schema_params->update_window_dims(), params->update_window_dims, + error_reporter, "stablehlo_scatter")); + params->num_update_window_dims = + schema_params->update_window_dims()->size(); + } + + if (schema_params->inserted_window_dims()) { + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + schema_params->inserted_window_dims()->size() * sizeof(int64_t), + schema_params->inserted_window_dims(), params->inserted_window_dims, + error_reporter, "stablehlo_scatter")); + params->num_inserted_window_dims = + schema_params->inserted_window_dims()->size(); + } + + if (schema_params->scatter_dims_to_operand_dims()) { + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + schema_params->scatter_dims_to_operand_dims()->size() * + sizeof(int64_t), + schema_params->scatter_dims_to_operand_dims(), + params->scatter_dims_to_operand_dims, error_reporter, + "stablehlo_scatter")); + params->num_scatter_dims_to_operand_dims = + schema_params->scatter_dims_to_operand_dims()->size(); + } + + params->index_vector_dim = schema_params->index_vector_dim(); + params->unique_indices = schema_params->unique_indices(); + params->update_computation_subgraph_index = + schema_params->update_computation_subgraph_index(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + *builtin_data = params.release(); + return kTfLiteOk; +} + +TfLiteStatus ParseStablehloRngBitGenerator(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const StablehloRngBitGeneratorOptions* schema_params = + op->builtin_options_2_as_StablehloRngBitGeneratorOptions(); + if (schema_params != nullptr) { + params->algorithm = ConvertRngAlgorithm(schema_params->algorithm()); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +TfLiteStatus ParseStablehloGather(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const StablehloGatherOptions* schema_params = + op->builtin_options_2_as_StablehloGatherOptions(); + + if (schema_params != nullptr) { + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + /*max_size_of_buffer=*/schema_params->offset_dims()->size() * + sizeof(int64_t), + /*flat_vector=*/schema_params->offset_dims(), + /*buffer=*/params->offset_dims, /*error_reporter=*/error_reporter, + /*op_name=*/"stablehlo_gather")); + params->num_offset_dims = schema_params->offset_dims()->size(); + + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + schema_params->collapsed_slice_dims()->size() * sizeof(int64_t), + schema_params->collapsed_slice_dims(), params->collapsed_slice_dims, + error_reporter, "stablehlo_gather")); + params->num_collapsed_slice_dims = + schema_params->collapsed_slice_dims()->size(); + + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + schema_params->start_index_map()->size() * sizeof(int64_t), + schema_params->start_index_map(), params->start_index_map, + error_reporter, "stablehlo_gather")); + params->num_start_index_map = schema_params->start_index_map()->size(); + + params->index_vector_dim = schema_params->index_vector_dim(); + + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + schema_params->slice_sizes()->size() * sizeof(int64_t), + schema_params->slice_sizes(), params->slice_sizes, error_reporter, + "stablehlo_gather")); + params->num_slice_sizes = schema_params->slice_sizes()->size(); + + params->indices_are_sorted = schema_params->indices_are_sorted(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +TfLiteStatus ParseStablehloPad(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + const StablehloPadOptions* schema_params = + op->builtin_options_2_as_StablehloPadOptions(); + + if (schema_params) { + auto LoadAttr = + [&error_reporter]( + int64_t* params_array, const size_t params_array_size_bytes, + const flatbuffers::Vector* const flatbuffer_vector, + const char* const attr_name) -> TfLiteStatus { + TfLiteStatus status = FlatBufferIntVectorToArray( + params_array_size_bytes, flatbuffer_vector, params_array, + error_reporter, "stablehlo.pad"); + if (status != kTfLiteOk) { + TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.", + attr_name); + } + return status; + }; + + TF_LITE_ENSURE_STATUS( + LoadAttr(params->edge_padding_low, sizeof(params->edge_padding_low), + schema_params->edge_padding_low(), "edge_padding_low")); + TF_LITE_ENSURE_STATUS( + LoadAttr(params->edge_padding_high, sizeof(params->edge_padding_high), + schema_params->edge_padding_high(), "edge_padding_high")); + TF_LITE_ENSURE_STATUS( + LoadAttr(params->interior_padding, sizeof(params->interior_padding), + schema_params->interior_padding(), "interior_padding")); + if (schema_params->edge_padding_low()->size() != + schema_params->edge_padding_high()->size() || + schema_params->edge_padding_low()->size() != + schema_params->interior_padding()->size()) { + TF_LITE_REPORT_ERROR(error_reporter, + "'stablehlo.pad' operation parameter array sizes " + "are not consistent."); + return kTfLiteError; + } + *builtin_data = params.release(); + return kTfLiteOk; + } + TF_LITE_REPORT_ERROR(error_reporter, + "Could not get 'stablehlo.pad' operation parameters."); + return kTfLiteError; +} + +TfLiteStatus ParseStablehloComposite(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + const StableHLOCompositeOptions* schema_params = + op->builtin_options_2_as_StableHLOCompositeOptions(); + if (schema_params) { + params->name = schema_params->name()->c_str(); + params->version = schema_params->version(); + params->subgraph_index = schema_params->decomposition_subgraph_index(); + params->attributes = schema_params->composite_attributes()->data(); + params->attributes_size = schema_params->composite_attributes()->size(); + *builtin_data = params.release(); + return kTfLiteOk; + } + TF_LITE_REPORT_ERROR( + error_reporter, + "Could not get 'stablehlo.composite' operation parameters."); + return kTfLiteError; +} + +TfLiteStatus ParseStablehloShiftLeft(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + return kTfLiteOk; +} + +TfLiteStatus ParseStablehloCase(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + + const StablehloCaseOptions* schema_params = + op->builtin_options_2_as_StablehloCaseOptions(); + if (schema_params) { + auto LoadAttr = + [&error_reporter]( + int32_t* params_array, const size_t params_array_size_bytes, + const flatbuffers::Vector* const flatbuffer_vector, + const char* const attr_name) -> TfLiteStatus { + TfLiteStatus status = FlatBufferIntVectorToArray( + params_array_size_bytes, flatbuffer_vector, params_array, + error_reporter, "stablehlo.case"); + if (status != kTfLiteOk) { + TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.", + attr_name); + } + return status; + }; + + TF_LITE_ENSURE_STATUS(LoadAttr(params->branch_subgraph_indices, + sizeof(params->branch_subgraph_indices), + schema_params->branch_subgraph_indices(), + "branch subgraph indices")); + params->num_branches = schema_params->branch_subgraph_indices()->size(); + *builtin_data = params.release(); + return kTfLiteOk; + } + TF_LITE_REPORT_ERROR(error_reporter, + "Could not get 'stablehlo.case' operation parameters."); + return kTfLiteError; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1558,6 +2483,14 @@ TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSelectV2(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { SafeBuiltinDataAllocator safe_allocator(allocator); @@ -1574,7 +2507,7 @@ TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1589,6 +2522,14 @@ TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { @@ -1607,7 +2548,40 @@ TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseSpaceToDepth(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions(); + if (schema_params != nullptr) { + params->block_size = schema_params->block_size(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1631,7 +2605,7 @@ TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1655,7 +2629,65 @@ TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = + safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* seq_lstm_params = + op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) { + params->activation = + ConvertActivation(seq_lstm_params->fused_activation_function()); + params->cell_clip = seq_lstm_params->cell_clip(); + params->proj_clip = seq_lstm_params->proj_clip(); + params->time_major = seq_lstm_params->time_major(); + params->asymmetric_quantize_inputs = + seq_lstm_params->asymmetric_quantize_inputs(); + params->diagonal_recurrent_tensors = + seq_lstm_params->diagonal_recurrent_tensors(); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + +TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + SafeBuiltinDataAllocator safe_allocator(allocator); + + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions(); + + if (schema_params != nullptr) { + const auto* squeeze_dims = schema_params->squeeze_dims(); + if (squeeze_dims != nullptr) { + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims, + error_reporter, "squeeze")); + params->num_squeeze_dims = squeeze_dims->size(); + } else { + params->num_squeeze_dims = 0; + } + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1678,6 +2710,14 @@ TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSquaredDifference(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseStridedSlice(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -1699,10 +2739,11 @@ TfLiteStatus ParseStridedSlice(const Operator* op, params->ellipsis_mask = schema_params->ellipsis_mask(); params->new_axis_mask = schema_params->new_axis_mask(); params->shrink_axis_mask = schema_params->shrink_axis_mask(); + params->offset = schema_params->offset(); } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1727,7 +2768,7 @@ TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1754,7 +2795,7 @@ TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); @@ -1768,6 +2809,46 @@ TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*, void**) { return kTfLiteOk; } +// +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseTransposeConv(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + const TransposeConvOptions* transpose_conv_params = + op->builtin_options_as_TransposeConvOptions(); + if (transpose_conv_params != nullptr) { + params->padding = ConvertPadding(transpose_conv_params->padding()); + params->stride_width = transpose_conv_params->stride_w(); + params->stride_height = transpose_conv_params->stride_h(); + + params->activation = + ConvertActivation(transpose_conv_params->fused_activation_function()); + TF_LITE_ENSURE_STATUS( + ConvertTensorType(transpose_conv_params->quantized_bias_type(), + ¶ms->quantized_bias_type, error_reporter)); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + *builtin_data = params.release(); + return kTfLiteOk; +} TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { @@ -1787,13 +2868,93 @@ TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const VarHandleOptions* schema_params = + op->builtin_options_as_VarHandleOptions(); + + if (schema_params != nullptr) { + if (schema_params->container()) { + params->container = schema_params->container()->c_str(); + } + if (schema_params->shared_name()) { + params->shared_name = schema_params->shared_name()->c_str(); + } + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const WhileOptions* schema_params = op->builtin_options_as_WhileOptions(); + + if (schema_params != nullptr) { + params->cond_subgraph_index = schema_params->cond_subgraph_index(); + params->body_subgraph_index = schema_params->body_subgraph_index(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better understand the ramifications of changing the legacy behavior. } *builtin_data = params.release(); return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBitwiseXor(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseRightShift(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { @@ -1815,7 +2976,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, // * If all the builtin operators were to have their own parse functions, or we // were ok with some amount of code duplication, then this split of the .cc // files would be a lot more feasible. -#ifdef TF_LITE_STATIC_MEMORY +#ifdef ARDUINO TF_LITE_REPORT_ERROR( error_reporter, "ParseOpData is unsupported on TfLiteMicro, please use the operator " @@ -1828,3 +2989,4 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, } } // namespace tflite +// LINT.ThenChange(//tensorflow/compiler/mlir/lite/core/api/flatbuffer_conversions.cc) diff --git a/src/tensorflow/lite/core/api/flatbuffer_conversions.h b/src/tensorflow/lite/core/api/flatbuffer_conversions.h index 2540d35..5bc70cc 100644 --- a/src/tensorflow/lite/core/api/flatbuffer_conversions.h +++ b/src/tensorflow/lite/core/api/flatbuffer_conversions.h @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,8 +23,9 @@ limitations under the License. #include #include -#include "tensorflow/lite/c/common.h" +#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h" #include "tensorflow/lite/core/api/error_reporter.h" +#include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -41,9 +42,8 @@ class BuiltinDataAllocator { // deallocation. template T* AllocatePOD() { - // TODO(b/154346074): Change this to is_trivially_destructible when all - // platform targets support that properly. - static_assert(std::is_pod::value, "Builtin data structure must be POD."); + static_assert(std::is_trivially_destructible::value, + "Builtin data structure must be POD."); void* allocated_memory = this->Allocate(sizeof(T), alignof(T)); return new (allocated_memory) T(); } @@ -75,15 +75,48 @@ TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseAssignVariable(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseBatchToSpaceNd(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseBroadcastArgs(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseBroadcastTo(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseConcatenation(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -95,6 +128,14 @@ TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseDepthToSpace(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseDepthwiseConv2D(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -104,20 +145,53 @@ TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseElu(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseEmbeddingLookup(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseExp(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseExpandDims(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseFill(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseFloorDiv(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseFloorMod(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseFullyConnected(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseGatherNd(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -130,11 +204,18 @@ TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseL2Normalization(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -161,12 +242,23 @@ TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseLogSoftmax(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -189,6 +281,9 @@ TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParsePow(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -196,6 +291,11 @@ TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseReadVariable(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -224,27 +324,52 @@ TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSelectV2(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSlice(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSpaceToBatchNd(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseSpaceToDepth(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSquaredDifference(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseStridedSlice(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -259,9 +384,82 @@ TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseTranspose(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseTransposeConv(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseBitwiseXor(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseRightShift(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseStablehloScatter(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseStablehloRngBitGenerator(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseStablehloGather(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseStablehloReduceWindow(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseStablehloPad(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseStablehloComposite(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseStablehloShiftLeft(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseStablehloCase(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + } // namespace tflite #endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ diff --git a/src/tensorflow/lite/core/api/op_resolver.cpp b/src/tensorflow/lite/core/api/op_resolver.cpp deleted file mode 100644 index c5dffb6..0000000 --- a/src/tensorflow/lite/core/api/op_resolver.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/core/api/op_resolver.h" - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/schema/schema_utils.h" - -namespace tflite { - -TfLiteStatus GetRegistrationFromOpCode( - const OperatorCode* opcode, const OpResolver& op_resolver, - ErrorReporter* error_reporter, const TfLiteRegistration** registration) { - TfLiteStatus status = kTfLiteOk; - *registration = nullptr; - auto builtin_code = GetBuiltinCode(opcode); - int version = opcode->version(); - - if (builtin_code > BuiltinOperator_MAX || - builtin_code < BuiltinOperator_MIN) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Op builtin_code out of range: %d. Are you using old TFLite binary " - "with newer model?", - builtin_code); - status = kTfLiteError; - } else if (builtin_code != BuiltinOperator_CUSTOM) { - *registration = op_resolver.FindOp(builtin_code, version); - if (*registration == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Didn't find op for builtin opcode '%s' version '%d'\n", - EnumNameBuiltinOperator(builtin_code), version); - status = kTfLiteError; - } - } else if (!opcode->custom_code()) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Operator with CUSTOM builtin_code has no custom_code.\n"); - status = kTfLiteError; - } else { - const char* name = opcode->custom_code()->c_str(); - *registration = op_resolver.FindOp(name, version); - if (*registration == nullptr) { - // Do not report error for unresolved custom op, we do the final check - // while preparing ops. - status = kTfLiteError; - } - } - return status; -} - -} // namespace tflite diff --git a/src/tensorflow/lite/core/api/op_resolver.h b/src/tensorflow/lite/core/api/op_resolver.h deleted file mode 100644 index b6a8171..0000000 --- a/src/tensorflow/lite/core/api/op_resolver.h +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ -#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -/// Abstract interface that returns TfLiteRegistrations given op codes or custom -/// op names. This is the mechanism that ops being referenced in the flatbuffer -/// model are mapped to executable function pointers (TfLiteRegistrations). -class OpResolver { - public: - /// Finds the op registration for a builtin operator by enum code. - virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, - int version) const = 0; - /// Finds the op registration of a custom operator by op name. - virtual const TfLiteRegistration* FindOp(const char* op, - int version) const = 0; - - // Returns optional delegates for resolving and handling ops in the flatbuffer - // model. This may be used in addition to the standard TfLiteRegistration - // lookup for graph resolution. - using TfLiteDelegatePtrVector = - std::vector>; - virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const { - return TfLiteDelegatePtrVector(); - } - - virtual ~OpResolver() {} -}; - -// Handles the logic for converting between an OperatorCode structure extracted -// from a flatbuffer and information about a registered operator -// implementation. -TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, - const OpResolver& op_resolver, - ErrorReporter* error_reporter, - const TfLiteRegistration** registration); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ diff --git a/src/tensorflow/lite/core/api/profiler.h b/src/tensorflow/lite/core/api/profiler.h deleted file mode 100644 index 897efbe..0000000 --- a/src/tensorflow/lite/core/api/profiler.h +++ /dev/null @@ -1,194 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_ -#define TENSORFLOW_LITE_CORE_API_PROFILER_H_ - -#include - -namespace tflite { - -// A simple utility for enabling profiled event tracing in TensorFlow Lite. -class Profiler { - public: - // As certain Profiler instance might be only interested in certain event - // types, we define each event type value to allow a Profiler to use - // bitmasking bitwise operations to determine whether an event should be - // recorded or not. - enum class EventType { - // Default event type, the metadata field has no special significance. - DEFAULT = 1, - - // The event is an operator invocation and the event_metadata field is the - // index of operator node. - OPERATOR_INVOKE_EVENT = 2, - - // The event is an invocation for an internal operator of a TFLite delegate. - // The event_metadata field is the index of operator node that's specific to - // the delegate. - DELEGATE_OPERATOR_INVOKE_EVENT = 4, - - // The event is a recording of runtime instrumentation such as the overall - // TFLite runtime status, the TFLite delegate status (if a delegate - // is applied), and the overall model inference latency etc. - // Note, the delegate status and overall status are stored as separate - // event_metadata fields. In particular, the delegate status is encoded - // as DelegateStatus::full_status(). - GENERAL_RUNTIME_INSTRUMENTATION_EVENT = 8, - }; - - virtual ~Profiler() {} - - // Signals the beginning of an event and returns a handle to the profile - // event. The `event_metadata1` and `event_metadata2` have different - // interpretations based on the actual Profiler instance and the `event_type`. - // For example, as for the 'SubgraphAwareProfiler' defined in - // lite/core/subgraph.h, when the event_type is OPERATOR_INVOKE_EVENT, - // `event_metadata1` represents the index of a TFLite node, and - // `event_metadata2` represents the index of the subgraph that this event - // comes from. - virtual uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) = 0; - // Similar w/ the above, but `event_metadata2` defaults to 0. - uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata) { - return BeginEvent(tag, event_type, event_metadata, /*event_metadata2*/ 0); - } - - // Signals an end to the specified profile event with 'event_metadata's, This - // is useful when 'event_metadata's are not available when the event begins - // or when one wants to overwrite the 'event_metadata's set at the beginning. - virtual void EndEvent(uint32_t event_handle, int64_t event_metadata1, - int64_t event_metadata2) {} - // Signals an end to the specified profile event. - virtual void EndEvent(uint32_t event_handle) = 0; - - // Appends an event of type 'event_type' with 'tag' and 'event_metadata' - // which started at 'start' and ended at 'end' - // Note: - // In cases were ProfileSimmarizer and tensorflow::StatsCalculator are used - // they assume the value is in "usec", if in any case subclasses - // didn't put usec, then the values are not meaningful. - // TODO karimnosseir: Revisit and make the function more clear. - void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata) { - AddEvent(tag, event_type, start, end, event_metadata, - /*event_metadata2*/ 0); - } - - virtual void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata1, - int64_t event_metadata2) {} - - protected: - friend class ScopedProfile; -}; - -// Adds a profile event to `profiler` that begins with the construction -// of the object and ends when the object goes out of scope. -// The lifetime of tag should be at least the lifetime of `profiler`. -// `profiler` may be null, in which case nothing is profiled. -class ScopedProfile { - public: - ScopedProfile(Profiler* profiler, const char* tag, - Profiler::EventType event_type = Profiler::EventType::DEFAULT, - int64_t event_metadata = 0) - : profiler_(profiler), event_handle_(0) { - if (profiler) { - event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata); - } - } - - ~ScopedProfile() { - if (profiler_) { - profiler_->EndEvent(event_handle_); - } - } - - protected: - Profiler* profiler_; - uint32_t event_handle_; -}; - -class ScopedOperatorProfile : public ScopedProfile { - public: - ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index) - : ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT, - static_cast(node_index)) {} -}; - -class ScopedDelegateOperatorProfile : public ScopedProfile { - public: - ScopedDelegateOperatorProfile(Profiler* profiler, const char* tag, - int node_index) - : ScopedProfile(profiler, tag, - Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT, - static_cast(node_index)) {} -}; - -class ScopedRuntimeInstrumentationProfile : public ScopedProfile { - public: - ScopedRuntimeInstrumentationProfile(Profiler* profiler, const char* tag) - : ScopedProfile( - profiler, tag, - Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, -1) {} - - void set_runtime_status(int64_t delegate_status, int64_t interpreter_status) { - if (profiler_) { - delegate_status_ = delegate_status; - interpreter_status_ = interpreter_status; - } - } - - ~ScopedRuntimeInstrumentationProfile() { - if (profiler_) { - profiler_->EndEvent(event_handle_, delegate_status_, interpreter_status_); - } - } - - private: - int64_t delegate_status_; - int64_t interpreter_status_; -}; - -} // namespace tflite - -#define TFLITE_VARNAME_UNIQ_IMPL(name, ctr) name##ctr -#define TFLITE_VARNAME_UNIQ(name, ctr) TFLITE_VARNAME_UNIQ_IMPL(name, ctr) - -#define TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler, tag) \ - tflite::ScopedProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \ - (profiler), (tag)) - -#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \ - tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \ - (profiler), (tag), (node_index)) - -#define TFLITE_SCOPED_DELEGATE_OPERATOR_PROFILE(profiler, tag, node_index) \ - tflite::ScopedDelegateOperatorProfile TFLITE_VARNAME_UNIQ( \ - _profile_, __COUNTER__)((profiler), (tag), (node_index)) - -#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \ - profiler, tag, delegate_status, interpreter_status) \ - do { \ - if (!profiler) { \ - const auto handle = profiler->BeginEvent( \ - tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \ - delegate_status, interpreter_status); \ - profiler->EndEvent(handle); \ - } \ - } while (false); - -#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_ diff --git a/src/tensorflow/lite/core/api/tensor_utils.cpp b/src/tensorflow/lite/core/api/tensor_utils.cpp index 3aac16b..18a643c 100644 --- a/src/tensorflow/lite/core/api/tensor_utils.cpp +++ b/src/tensorflow/lite/core/api/tensor_utils.cpp @@ -17,7 +17,7 @@ limitations under the License. #include -#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/core/c/common.h" namespace tflite { diff --git a/src/tensorflow/lite/core/api/tensor_utils.h b/src/tensorflow/lite/core/api/tensor_utils.h index 9f1cf94..440da8a 100644 --- a/src/tensorflow/lite/core/api/tensor_utils.h +++ b/src/tensorflow/lite/core/api/tensor_utils.h @@ -16,7 +16,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ -#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/core/c/common.h" namespace tflite { diff --git a/src/tensorflow/lite/core/c/builtin_op_data.h b/src/tensorflow/lite/core/c/builtin_op_data.h new file mode 100644 index 0000000..02e8bc0 --- /dev/null +++ b/src/tensorflow/lite/core/c/builtin_op_data.h @@ -0,0 +1,26 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/builtin_op_data.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +#ifndef TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ +#define TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ + +#include "tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h" +#include "tensorflow/lite/core/c/common.h" + +#endif // TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ diff --git a/src/tensorflow/lite/core/c/c_api_types.h b/src/tensorflow/lite/core/c/c_api_types.h new file mode 100644 index 0000000..78b8c34 --- /dev/null +++ b/src/tensorflow/lite/core/c/c_api_types.h @@ -0,0 +1,165 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// WARNING: Users of TensorFlow Lite should not include this file directly, but +// should instead include "third_party/tensorflow/lite/c/c_api_types.h". +// Only the TensorFlow Lite implementation itself should include this file +// directly. + +/// This file declares types used by the pure C inference API defined in +/// c_api.h, some of which are also used in the C++ and C kernel and interpreter +/// APIs. +/// +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/// \note Users of TensorFlow Lite should use +/// \code +/// #include "tensorflow/lite/c/c_api_types.h" +/// \endcode +/// to access the APIs documented on this page. +// NOLINTEND(whitespace/line_length) +// clang-format on + +// IWYU pragma: private, include "third_party/tensorflow/lite/c/c_api_types.h" + +#ifndef TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ +#define TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tensorflow/compiler/mlir/lite/core/c/tflite_types.h" + +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \defgroup c_api_types lite/c/c_api_types.h + * @{ + */ +// NOLINTEND(whitespace/line_length) +// clang-format on + +// Define TFL_CAPI_EXPORT macro to export a function properly with a shared +// library. +#ifdef SWIG +#define TFL_CAPI_EXPORT +#elif defined(TFL_STATIC_LIBRARY_BUILD) +#define TFL_CAPI_EXPORT +#else // not defined TFL_STATIC_LIBRARY_BUILD +#if defined(_WIN32) +#ifdef TFL_COMPILE_LIBRARY +#define TFL_CAPI_EXPORT __declspec(dllexport) +#else +#define TFL_CAPI_EXPORT __declspec(dllimport) +#endif // TFL_COMPILE_LIBRARY +#else +#define TFL_CAPI_EXPORT __attribute__((visibility("default"))) +#endif // _WIN32 +#endif // SWIG + +/// Note that new error status values may be added in future in order to +/// indicate more fine-grained internal states, therefore, applications should +/// not rely on status values being members of the enum. +typedef enum TfLiteStatus { + /// Success + kTfLiteOk = 0, + + /// Generally referring to an error in the runtime (i.e. interpreter) + kTfLiteError = 1, + + /// Generally referring to an error from a TfLiteDelegate itself. + kTfLiteDelegateError = 2, + + /// Generally referring to an error in applying a delegate due to + /// incompatibility between runtime and delegate, e.g., this error is returned + /// when trying to apply a TF Lite delegate onto a model graph that's already + /// immutable. + kTfLiteApplicationError = 3, + + /// Generally referring to serialized delegate data not being found. + /// See tflite::delegates::Serialization. + kTfLiteDelegateDataNotFound = 4, + + /// Generally referring to data-writing issues in delegate serialization. + /// See tflite::delegates::Serialization. + kTfLiteDelegateDataWriteError = 5, + + /// Generally referring to data-reading issues in delegate serialization. + /// See tflite::delegates::Serialization. + kTfLiteDelegateDataReadError = 6, + + /// Generally referring to issues when the TF Lite model has ops that cannot + /// be resolved at runtime. This could happen when the specific op is not + /// registered or built with the TF Lite framework. + kTfLiteUnresolvedOps = 7, + + /// Generally referring to invocation cancelled by the user. + /// See `interpreter::Cancel`. + // TODO(b/194915839): Implement `interpreter::Cancel`. + // TODO(b/250636993): Cancellation triggered by `SetCancellationFunction` + // should also return this status code. + kTfLiteCancelled = 8, + + // This status is returned by Prepare when the output shape cannot be + // determined but the size of the output tensor is known. For example, the + // output of reshape is always the same size as the input. This means that + // such ops may be + // done in place. + kTfLiteOutputShapeNotKnown = 9, +} TfLiteStatus; + +// -------------------------------------------------------------------------- +// Opaque types used by c_api.h, c_api_opaque.h and common.h. + +/// TfLiteOpaqueContext is an opaque version of TfLiteContext; +typedef struct TfLiteOpaqueContext TfLiteOpaqueContext; + +/// TfLiteOpaqueNode is an opaque version of TfLiteNode; +typedef struct TfLiteOpaqueNode TfLiteOpaqueNode; + +/// TfLiteOpaqueTensor is an opaque version of TfLiteTensor; +typedef struct TfLiteOpaqueTensor TfLiteOpaqueTensor; + +/// TfLiteDelegate: allows delegation of nodes to alternative backends. +/// Forward declaration of concrete type declared in common.h. +typedef struct TfLiteDelegate TfLiteDelegate; + +/// TfLiteOpaqueDelegateStruct: unconditionally opaque version of +/// TfLiteDelegate; allows delegation of nodes to alternative backends. +/// +/// This is an abstract type that is intended to have the same +/// role as TfLiteDelegate, but without exposing the implementation +/// details of how delegates are implemented. +/// +/// WARNING: This is an experimental type and subject to change. +typedef struct TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegateStruct; + +/// TfLiteOpaqueDelegate: conditionally opaque version of +/// TfLiteDelegate; allows delegation of nodes to alternative backends. +/// For TF Lite in Play Services, this is an opaque type, +/// but for regular TF Lite, this is just a typedef for TfLiteDelegate. +/// +/// WARNING: This is an experimental type and subject to change. +#if TFLITE_WITH_STABLE_ABI || TFLITE_USE_OPAQUE_DELEGATE +typedef TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegate; +#else +typedef TfLiteDelegate TfLiteOpaqueDelegate; +#endif + +/** @} */ + +#ifdef __cplusplus +} // extern C +#endif +#endif // TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ diff --git a/src/tensorflow/lite/core/c/common.cpp b/src/tensorflow/lite/core/c/common.cpp new file mode 100644 index 0000000..09e71d5 --- /dev/null +++ b/src/tensorflow/lite/core/c/common.cpp @@ -0,0 +1,511 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/core/c/common.h" + +#ifndef TF_LITE_STATIC_MEMORY +#include +#endif // TF_LITE_STATIC_MEMORY + +#include +#include +#include + +#include "tensorflow/lite/core/c/c_api_types.h" +#ifdef TF_LITE_TENSORFLOW_PROFILER +#include "tensorflow/lite/tensorflow_profiler_logger.h" +#endif + +namespace { + +template +size_t TfLiteVarArrayGetSizeInBytes(const int size) { + constexpr size_t data_size = sizeof(std::declval().data[0]); + size_t computed_size = sizeof(T) + data_size * size; +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + computed_size -= data_size; +#endif + return computed_size; +} + +template +int TfLiteVarArrayEqualsArray(const T* const a, const int b_size, + const U* const b_data) { + static_assert(std::is_samedata[0]), const U&>::value, + "TfLiteVarArrayEqualsArray can only compare same type arrays"); + if (a == nullptr) { + return b_size == 0; + } + if (a->size != b_size) { + return 0; + } + return !memcmp(a->data, b_data, a->size * sizeof(a->data[0])); +} + +template +int TfLiteVarArrayEqual(const T* const a, const T* const b) { + // This goes first because null arrays must compare equal. + if (a == b) { + return 1; + } + if (a == nullptr || b == nullptr) { + return 0; + } + return TfLiteVarArrayEqualsArray(a, b->size, b->data); +} + +#ifndef TF_LITE_STATIC_MEMORY + +template +T* TfLiteVarArrayCreate(const int size) { + const size_t alloc_size = TfLiteVarArrayGetSizeInBytes(size); + if (alloc_size <= 0) { + return nullptr; + } + T* ret = (T*)malloc(alloc_size); + if (!ret) { + return nullptr; + } + ret->size = size; + return ret; +} + +template +T* TfLiteVarArrayCopy(const T* const src) { + if (!src) { + return nullptr; + } + T* const ret = TfLiteVarArrayCreate(src->size); + if (ret) { + memcpy(ret->data, src->data, src->size * sizeof(src->data[0])); + } + return ret; +} + +#endif // TF_LITE_STATIC_MEMORY + +template +void TfLiteVarArrayFree(T* a) { + free(a); +} + +} // namespace + +extern "C" { + +size_t TfLiteIntArrayGetSizeInBytes(int size) { + return TfLiteVarArrayGetSizeInBytes(size); +} + +int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) { + return TfLiteVarArrayEqual(a, b); +} + +int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, + const int b_data[]) { + return TfLiteVarArrayEqualsArray(a, b_size, b_data); +} + +#ifndef TF_LITE_STATIC_MEMORY + +TfLiteIntArray* TfLiteIntArrayCreate(int size) { + return TfLiteVarArrayCreate(size); +} + +TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) { + return TfLiteVarArrayCopy(src); +} + +void TfLiteIntArrayFree(TfLiteIntArray* a) { TfLiteVarArrayFree(a); } + +#endif // TF_LITE_STATIC_MEMORY + +int TfLiteFloatArrayGetSizeInBytes(int size) { + return TfLiteVarArrayGetSizeInBytes(size); +} + +#ifndef TF_LITE_STATIC_MEMORY + +TfLiteFloatArray* TfLiteFloatArrayCreate(int size) { + return TfLiteVarArrayCreate(size); +} + +TfLiteFloatArray* TfLiteFloatArrayCopy(const TfLiteFloatArray* src) { + return TfLiteVarArrayCopy(src); +} + +void TfLiteFloatArrayFree(TfLiteFloatArray* a) { TfLiteVarArrayFree(a); } + +void TfLiteTensorDataFree(TfLiteTensor* t) { + if (t->allocation_type == kTfLiteVariantObject && t->data.data) { + delete static_cast(t->data.data); + } else if (t->allocation_type == kTfLiteDynamic || + t->allocation_type == kTfLitePersistentRo) { + if (t->data.raw) { +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::PauseHeapMonitoring(/*pause=*/true); + tflite::OnTfLiteTensorDealloc(t); +#endif + free(t->data.raw); +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::PauseHeapMonitoring(/*pause=*/false); +#endif + } + } + t->data.raw = nullptr; +} + +void TfLiteQuantizationFree(TfLiteQuantization* quantization) { + if (quantization->type == kTfLiteAffineQuantization) { + TfLiteAffineQuantization* q_params = + (TfLiteAffineQuantization*)(quantization->params); + if (q_params->scale) { + TfLiteFloatArrayFree(q_params->scale); + q_params->scale = nullptr; + } + if (q_params->zero_point) { + TfLiteIntArrayFree(q_params->zero_point); + q_params->zero_point = nullptr; + } + free(q_params); + } + quantization->params = nullptr; + quantization->type = kTfLiteNoQuantization; +} + +void TfLiteSparsityFree(TfLiteSparsity* sparsity) { + if (sparsity == nullptr) { + return; + } + + if (sparsity->traversal_order) { + TfLiteIntArrayFree(sparsity->traversal_order); + sparsity->traversal_order = nullptr; + } + + if (sparsity->block_map) { + TfLiteIntArrayFree(sparsity->block_map); + sparsity->block_map = nullptr; + } + + if (sparsity->dim_metadata) { + int i = 0; + for (; i < sparsity->dim_metadata_size; i++) { + TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i]; + if (metadata.format == kTfLiteDimSparseCSR) { + TfLiteIntArrayFree(metadata.array_segments); + metadata.array_segments = nullptr; + TfLiteIntArrayFree(metadata.array_indices); + metadata.array_indices = nullptr; + } + } + free(sparsity->dim_metadata); + sparsity->dim_metadata = nullptr; + } + + free(sparsity); +} + +void TfLiteTensorFree(TfLiteTensor* t) { + TfLiteTensorDataFree(t); + if (t->dims) TfLiteIntArrayFree(t->dims); + t->dims = nullptr; + + if (t->dims_signature) { + TfLiteIntArrayFree((TfLiteIntArray*)t->dims_signature); + } + t->dims_signature = nullptr; + + TfLiteQuantizationFree(&t->quantization); + TfLiteSparsityFree(t->sparsity); + t->sparsity = nullptr; +} + +void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, + TfLiteQuantizationParams quantization, char* buffer, + size_t size, TfLiteAllocationType allocation_type, + const void* allocation, bool is_variable, + TfLiteTensor* tensor) { + TfLiteTensorFree(tensor); + tensor->type = type; + tensor->name = name; + tensor->dims = dims; + tensor->params = quantization; + tensor->data.raw = buffer; + tensor->bytes = size; + tensor->allocation_type = allocation_type; + tensor->allocation = allocation; + tensor->is_variable = is_variable; + + tensor->quantization.type = kTfLiteNoQuantization; + tensor->quantization.params = nullptr; +} + +TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) { + if (!src || !dst) return kTfLiteOk; + if (src->bytes != dst->bytes) return kTfLiteError; + if (src == dst) return kTfLiteOk; + dst->type = src->type; + if (dst->dims) TfLiteIntArrayFree(dst->dims); + dst->dims = TfLiteIntArrayCopy(src->dims); + if (src->allocation_type == kTfLiteVariantObject) { + // An edge case exists in control flow ops when they copy inputs to outputs + // before invoking any body, in this case the `dst` will not have its + // `allocation_type` set properly, so we handle here for now. + if (dst->allocation_type != kTfLiteVariantObject) { + TfLiteTensorDataFree(dst); + dst->allocation_type = kTfLiteVariantObject; + } + auto* dst_vd = static_cast(dst->data.data); + auto* src_vd = static_cast(src->data.data); + + // `CloneTo` will handle the case when `dst_vd` is nullptr, so it is safe + // to `CloneTo` something which was "freed". Also, returning from `CloneTo` + // will implicitly cast to `VariantData`; don't need static cast here. + dst->data.data = src_vd->CloneTo(dst_vd); + } else { + memcpy(dst->data.raw, src->data.raw, src->bytes); + } + dst->buffer_handle = src->buffer_handle; + dst->data_is_stale = src->data_is_stale; + dst->delegate = src->delegate; + + return kTfLiteOk; +} + +TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor, + bool preserve_data) { + if (tensor->allocation_type != kTfLiteDynamic && + tensor->allocation_type != kTfLitePersistentRo) { + return kTfLiteOk; + } +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::PauseHeapMonitoring(/*pause=*/true); +#endif + // This buffer may be consumed by XNNPack. + size_t alloc_bytes = num_bytes + /*XNN_EXTRA_BYTES=*/16; + // TODO(b/145340303): Tensor data should be aligned. + if (!tensor->data.data) { + tensor->data.data = (char*)malloc(alloc_bytes); +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::OnTfLiteTensorAlloc(tensor, alloc_bytes); +#endif + } else if (num_bytes > tensor->bytes) { +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::OnTfLiteTensorDealloc(tensor); +#endif + if (preserve_data) { + tensor->data.data = (char*)realloc(tensor->data.data, alloc_bytes); + } else { + // Calling free and malloc can be more efficient as it avoids needlessly + // copying the data when it is not required. + free(tensor->data.data); + tensor->data.data = (char*)malloc(alloc_bytes); + } +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::OnTfLiteTensorAlloc(tensor, alloc_bytes); +#endif + } +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::PauseHeapMonitoring(/*pause=*/false); +#endif + tensor->bytes = num_bytes; + if (tensor->data.data == nullptr && num_bytes != 0) { + // We are done allocating but tensor is pointing to null and a valid size + // was requested, so we error. + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) { + return TfLiteTensorResizeMaybeCopy(num_bytes, tensor, true); +} +#endif // TF_LITE_STATIC_MEMORY + +const char* TfLiteTypeGetName(TfLiteType type) { + switch (type) { + case kTfLiteNoType: + return "NOTYPE"; + case kTfLiteFloat32: + return "FLOAT32"; + case kTfLiteUInt16: + return "UINT16"; + case kTfLiteInt16: + return "INT16"; + case kTfLiteInt32: + return "INT32"; + case kTfLiteUInt32: + return "UINT32"; + case kTfLiteUInt8: + return "UINT8"; + case kTfLiteInt8: + return "INT8"; + case kTfLiteInt64: + return "INT64"; + case kTfLiteUInt64: + return "UINT64"; + case kTfLiteBool: + return "BOOL"; + case kTfLiteComplex64: + return "COMPLEX64"; + case kTfLiteComplex128: + return "COMPLEX128"; + case kTfLiteString: + return "STRING"; + case kTfLiteFloat16: + return "FLOAT16"; + case kTfLiteBFloat16: + return "BFLOAT16"; + case kTfLiteFloat64: + return "FLOAT64"; + case kTfLiteResource: + return "RESOURCE"; + case kTfLiteVariant: + return "VARIANT"; + case kTfLiteInt4: + return "INT4"; + } + return "Unknown type"; +} + +TfLiteDelegate TfLiteDelegateCreate() { return TfLiteDelegate{}; } + +// Returns a tensor data allocation strategy. +TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy( + const TfLiteTensor* const t) { + switch (t->allocation_type) { + case kTfLiteMemNone: + return kTfLiteAllocationStrategyNone; + case kTfLiteMmapRo: + return kTfLiteAllocationStrategyMMap; + case kTfLiteArenaRw: + return kTfLiteAllocationStrategyArena; + case kTfLiteArenaRwPersistent: + return kTfLiteAllocationStrategyArena; + case kTfLiteDynamic: + return kTfLiteAllocationStrategyMalloc; + case kTfLitePersistentRo: + return kTfLiteAllocationStrategyUnknown; + case kTfLiteCustom: + return kTfLiteAllocationStrategyUnknown; + case kTfLiteVariantObject: + return kTfLiteAllocationStrategyNew; + } + return kTfLiteAllocationStrategyUnknown; +} + +// Returns how stable a tensor data buffer address is across runs. +TfLiteRunStability TfLiteTensorGetBufferAddressStability( + const TfLiteTensor* const t) { + switch (t->allocation_type) { + case kTfLiteMemNone: + return kTfLiteRunStabilityAcrossRuns; + case kTfLiteMmapRo: + return kTfLiteRunStabilityAcrossRuns; + case kTfLiteArenaRw: + return kTfLiteRunStabilityUnstable; + case kTfLiteArenaRwPersistent: + return kTfLiteRunStabilityUnstable; + case kTfLiteDynamic: + return kTfLiteRunStabilitySingleRun; + case kTfLitePersistentRo: + return kTfLiteRunStabilitySingleRun; + case kTfLiteCustom: + return kTfLiteRunStabilityUnknown; + case kTfLiteVariantObject: + return kTfLiteRunStabilityAcrossRuns; + } + return kTfLiteRunStabilityUnknown; +} + +// Returns how stable a tensor data values are across runs. +TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* const t) { + switch (t->allocation_type) { + case kTfLiteMemNone: + return kTfLiteRunStabilityAcrossRuns; + case kTfLiteMmapRo: + return kTfLiteRunStabilityAcrossRuns; + case kTfLiteArenaRw: + return kTfLiteRunStabilitySingleRun; + case kTfLiteArenaRwPersistent: + return kTfLiteRunStabilityAcrossRuns; + case kTfLiteDynamic: + return kTfLiteRunStabilitySingleRun; + case kTfLitePersistentRo: + return kTfLiteRunStabilitySingleRun; + case kTfLiteCustom: + return kTfLiteRunStabilityUnknown; + case kTfLiteVariantObject: + return kTfLiteRunStabilitySingleRun; + } + return kTfLiteRunStabilityUnknown; +} + +// Returns the operation step when the data of a tensor is populated. +// +// Some operations can precompute their results before the evaluation step. This +// makes the data available earlier for subsequent operations. +TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t) { + switch (t->allocation_type) { + case kTfLiteMemNone: + return kTfLiteRunStepInit; + case kTfLiteMmapRo: + return kTfLiteRunStepInit; + case kTfLiteArenaRw: + return kTfLiteRunStepEval; + case kTfLiteArenaRwPersistent: + return kTfLiteRunStepEval; + case kTfLiteDynamic: + return kTfLiteRunStepEval; + case kTfLitePersistentRo: + return kTfLiteRunStepPrepare; + case kTfLiteCustom: + return kTfLiteRunStepUnknown; + case kTfLiteVariantObject: + return kTfLiteRunStepEval; + } + return kTfLiteRunStepUnknown; +} + +// Returns the operation steop when the shape of a tensor is computed. +// +// Some operations can precompute the shape of their results before the +// evaluation step. This makes the shape available earlier for subsequent +// operations. +TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) { + switch (t->allocation_type) { + case kTfLiteMemNone: + return kTfLiteRunStepInit; + case kTfLiteMmapRo: + return kTfLiteRunStepInit; + case kTfLiteArenaRw: + return kTfLiteRunStepPrepare; + case kTfLiteArenaRwPersistent: + return kTfLiteRunStepPrepare; + case kTfLiteDynamic: + return kTfLiteRunStepEval; + case kTfLitePersistentRo: + return kTfLiteRunStepPrepare; + case kTfLiteCustom: + return kTfLiteRunStepUnknown; + case kTfLiteVariantObject: + return kTfLiteRunStepEval; + } + return kTfLiteRunStepUnknown; +} + +} // extern "C" diff --git a/src/tensorflow/lite/core/c/common.h b/src/tensorflow/lite/core/c/common.h new file mode 100644 index 0000000..f19f582 --- /dev/null +++ b/src/tensorflow/lite/core/c/common.h @@ -0,0 +1,1602 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// WARNING: Users of TensorFlow Lite should not include this file directly, but +// should instead include "third_party/tensorflow/lite/c/common.h". +// Only the TensorFlow Lite implementation itself should include this file +// directly. + +/// This file defines common C types and APIs for implementing operations, +/// delegates and other constructs in TensorFlow Lite. The actual operations and +/// delegates can be defined using C++, but the interface between the +/// interpreter and the operations are C. +/// +/// Summary of abstractions: +/// * `TF_LITE_ENSURE` - self-sufficient error checking +/// * `TfLiteStatus` - status reporting +/// * `TfLiteIntArray` - stores tensor shapes (dims), +/// * `TfLiteContext` - allows an op to access the tensors +/// * `TfLiteTensor` - tensor (a multidimensional array) +/// * `TfLiteNode` - a single node or operation +/// * `TfLiteRegistration` - the implementation of a conceptual operation. +/// * `TfLiteDelegate` - allows delegation of nodes to alternative backends. +/// +/// Some abstractions in this file are created and managed by Interpreter. +/// +/// NOTE: The order of values in these structs are "semi-ABI stable". New values +/// should be added only to the end of structs and never reordered. +/// +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/// \note Users of TensorFlow Lite should use +/// \code +/// #include "tensorflow/lite/c/common.h" +/// \endcode +/// to access the APIs documented on this page. +// NOLINTEND(whitespace/line_length) +// clang-format on + +// IWYU pragma: private, include "third_party/tensorflow/lite/c/common.h" + +#ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_ +#define TENSORFLOW_LITE_CORE_C_COMMON_H_ + +#include +#include +#include +#include + +#include "tensorflow/lite/core/c/c_api_types.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \defgroup common lite/c/common.h + * @{ + */ +// NOLINTEND(whitespace/line_length) +// clang-format on + +/// The list of external context types known to TF Lite. This list exists solely +/// to avoid conflicts and to ensure ops can share the external contexts they +/// need. Access to the external contexts is controlled by one of the +/// corresponding support files. +typedef enum TfLiteExternalContextType { + kTfLiteEigenContext = 0, /// include eigen_support.h to use. + kTfLiteGemmLowpContext = 1, /// include gemm_support.h to use. + kTfLiteEdgeTpuContext = 2, /// Placeholder for Edge TPU support. + kTfLiteCpuBackendContext = 3, /// include cpu_backend_context.h to use. + kTfLiteLiteRtBufferContext = + 4, /// include external_litert_buffer_context.h to use. + kTfLiteMaxExternalContexts = 5 +} TfLiteExternalContextType; + +// Forward declare so dependent structs and methods can reference these types +// prior to the struct definitions. +struct TfLiteContext; +struct TfLiteDelegate; +struct TfLiteRegistration; +struct TfLiteOpaqueDelegateBuilder; + +/// An external context is a collection of information unrelated to the TF Lite +/// framework, but useful to a subset of the ops. TF Lite knows very little +/// about the actual contexts, but it keeps a list of them, and is able to +/// refresh them if configurations like the number of recommended threads +/// change. +typedef struct TfLiteExternalContext { + TfLiteExternalContextType type; + TfLiteStatus (*Refresh)(struct TfLiteContext* context); +} TfLiteExternalContext; + +// LINT.IfChange(optional_tensor) +#define kTfLiteOptionalTensor (-1) +// LINT.ThenChange(//tensorflow/compiler/mlir/lite/flatbuffer_export.cc:optional_tensor) + +/// Fixed size list of integers. Used for dimensions and inputs/outputs tensor +/// indices +typedef struct TfLiteIntArray { + int size; + +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + int data[1]; +#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ + __GNUC_MINOR__ >= 1) || \ + defined(HEXAGON) || \ + (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) + // gcc 6.1+ have a bug where flexible members aren't properly handled + // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c + int data[0]; +#else + int data[]; +#endif +} TfLiteIntArray; + +/// Given the size (number of elements) in a TfLiteIntArray, calculate its size +/// in bytes. +size_t TfLiteIntArrayGetSizeInBytes(int size); + +#ifndef TF_LITE_STATIC_MEMORY +/// Create a array of a given `size` (uninitialized entries). +/// This returns a pointer, that you must free using TfLiteIntArrayFree(). +TfLiteIntArray* TfLiteIntArrayCreate(int size); +#endif + +/// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. +int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b); + +/// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. +int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, + const int b_data[]); + +#ifndef TF_LITE_STATIC_MEMORY +/// Create a copy of an array passed as `src`. +/// You are expected to free memory with TfLiteIntArrayFree +TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src); + +/// Free memory of array `a`. +void TfLiteIntArrayFree(TfLiteIntArray* a); +#endif // TF_LITE_STATIC_MEMORY + +/// Fixed size list of floats. Used for per-channel quantization. +typedef struct TfLiteFloatArray { + int size; +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + float data[1]; +#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ + __GNUC_MINOR__ >= 1) || \ + defined(HEXAGON) || \ + (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) + // gcc 6.1+ have a bug where flexible members aren't properly handled + // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c + float data[0]; +#else + float data[]; +#endif +} TfLiteFloatArray; + +/// Given the size (number of elements) in a TfLiteFloatArray, calculate its +/// size in bytes. +int TfLiteFloatArrayGetSizeInBytes(int size); + +#ifndef TF_LITE_STATIC_MEMORY +/// Create a array of a given `size` (uninitialized entries). +/// This returns a pointer, that you must free using TfLiteFloatArrayFree(). +TfLiteFloatArray* TfLiteFloatArrayCreate(int size); + +/// Create a copy of an array passed as `src`. +/// You are expected to free memory with TfLiteFloatArrayFree. +TfLiteFloatArray* TfLiteFloatArrayCopy(const TfLiteFloatArray* src); + +/// Free memory of array `a`. +void TfLiteFloatArrayFree(TfLiteFloatArray* a); +#endif // TF_LITE_STATIC_MEMORY + +// Since we must not depend on any libraries, define a minimal subset of +// error macros while avoiding names that have pre-conceived meanings like +// assert and check. + +// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than +// calling the context->ReportError function directly, so that message strings +// can be stripped out if the binary size needs to be severely optimized. +#ifndef TF_LITE_STRIP_ERROR_STRINGS +#define TF_LITE_KERNEL_LOG(context, ...) \ + do { \ + (context)->ReportError((context), __VA_ARGS__); \ + } while (false) + +#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \ + do { \ + if ((context) != nullptr) { \ + (context)->ReportError((context), __VA_ARGS__); \ + } \ + } while (false) +#else // TF_LITE_STRIP_ERROR_STRINGS +#define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__) +#define TF_LITE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) +#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) +#endif // TF_LITE_STRIP_ERROR_STRINGS + +/// Check whether value is true, and if not return kTfLiteError from +/// the current function (and report the error string msg). +#define TF_LITE_ENSURE_MSG(context, value, ...) \ + do { \ + if (!(value)) { \ + TF_LITE_KERNEL_LOG((context), __FILE__ " " __VA_ARGS__); \ + return kTfLiteError; \ + } \ + } while (0) + +/// Check whether the value `a` is true, and if not return kTfLiteError from +/// the current function, while also reporting the location of the error. +#define TF_LITE_ENSURE(context, a) \ + do { \ + if (!(a)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \ + __LINE__, #a); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_STATUS(a) \ + do { \ + const TfLiteStatus s = (a); \ + if (s != kTfLiteOk) { \ + return s; \ + } \ + } while (0) + +/// Check whether the value `a == b` is true, and if not return kTfLiteError +/// from the current function, while also reporting the location of the error. +/// `a` and `b` may be evaluated more than once, so no side effects or +/// extremely expensive computations should be done. +/// +/// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. +#define TF_LITE_ENSURE_EQ(context, a, b) \ + do { \ + if ((a) != (b)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \ + __LINE__, #a, #b, (a), (b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \ + do { \ + if ((a) != (b)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \ + __LINE__, #a, #b, TfLiteTypeGetName(a), \ + TfLiteTypeGetName(b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \ + do { \ + auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \ + if (delta > epsilon) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \ + __FILE__, __LINE__, #a, #b, static_cast(a), \ + static_cast(b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_OK(context, status) \ + do { \ + const TfLiteStatus s = (status); \ + if ((s) != kTfLiteOk) { \ + return s; \ + } \ + } while (0) + +// `std::unreachable` not available until CC23. +#ifdef __GNUC__ // GCC, Clang, ICC + +#define TFL_UNREACHABLE() (__builtin_unreachable()) + +#elif defined(_MSC_VER) // MSVC + +#define TFL_UNREACHABLE() (__assume(false)) + +#endif + +/// Single-precision complex data type compatible with the C99 definition. +typedef struct TfLiteComplex64 { + float re, im; /// real and imaginary parts, respectively. +} TfLiteComplex64; + +/// Double-precision complex data type compatible with the C99 definition. +typedef struct TfLiteComplex128 { + double re, im; /// real and imaginary parts, respectively. +} TfLiteComplex128; + +/// Half precision data type compatible with the C99 definition. +typedef struct TfLiteFloat16 { + uint16_t data; +} TfLiteFloat16; + +/// bfloat16 data type compatible with the Google Brain definition. +/// https://cloud.google.com/tpu/docs/bfloat16. +/// This provides 1 bit of sign, 8 bits of exponent, and 7 bits of mantissa. +typedef struct TfLiteBFloat16 { + uint16_t data; +} TfLiteBFloat16; + +/// Return the name of a given type, for error reporting purposes. +const char* TfLiteTypeGetName(TfLiteType type); + +/// SupportedQuantizationTypes. +typedef enum TfLiteQuantizationType : int { + /// No quantization. + kTfLiteNoQuantization = 0, + /// Affine quantization (with support for per-channel quantization). + /// Corresponds to TfLiteAffineQuantization. + kTfLiteAffineQuantization = 1, +} TfLiteQuantizationType; + +/// Structure specifying the quantization used by the tensor, if-any. +typedef struct TfLiteQuantization { + /// The type of quantization held by params. + TfLiteQuantizationType type; + /// Holds an optional reference to a quantization param structure. The actual + /// type depends on the value of the `type` field (see the comment there for + /// the values and corresponding types). + void* params; +} TfLiteQuantization; + +/// Parameters for asymmetric quantization across a dimension (i.e per output +/// channel quantization). +/// quantized_dimension specifies which dimension the scales and zero_points +/// correspond to. +/// For a particular value in quantized_dimension, quantized values can be +/// converted back to float using: +/// `real_value = scale * (quantized_value - zero_point)` +typedef struct TfLiteAffineQuantization { + TfLiteFloatArray* scale; + TfLiteIntArray* zero_point; + int32_t quantized_dimension; +} TfLiteAffineQuantization; + +/// A union of pointers that points to memory for a given tensor. +/// +/// Do not access these members directly, if possible, use +/// `GetTensorData(tensor)` instead, otherwise only access `.data`, as +/// other members are deprecated. +typedef union TfLitePtrUnion { + int32_t* i32; + uint32_t* u32; + int64_t* i64; + uint64_t* u64; + float* f; + TfLiteFloat16* f16; + TfLiteBFloat16* bf16; + double* f64; + char* raw; + const char* raw_const; + uint8_t* uint8; + bool* b; + int16_t* i16; + uint16_t* ui16; + TfLiteComplex64* c64; + TfLiteComplex128* c128; + int8_t* int8; + /// Only use this member. + void* data; +} TfLitePtrUnion; + +/// Memory allocation strategies. +/// * `kTfLiteMmapRo`: Read-only memory-mapped data, or data externally +/// allocated. +/// * `kTfLiteArenaRw`: Arena allocated with no guarantees about persistence, +/// and available during eval. +/// * `kTfLiteArenaRwPersistent`: Arena allocated but persistent across eval, +/// and only available during eval. +/// * `kTfLiteDynamic`: Allocated during eval, or for string tensors. +/// * `kTfLitePersistentRo`: Allocated and populated during prepare. This is +/// useful for tensors that can be computed during prepare and treated +/// as constant inputs for downstream ops (also in prepare). +/// * `kTfLiteCustom`: Custom memory allocation provided by the user. See +/// TfLiteCustomAllocation below. +/// * `kTfLiteVariantObject`: Allocation is an arbitrary type-erased C++ +/// object. +/// Allocation and deallocation are done through `new` and `delete`. +typedef enum TfLiteAllocationType { + kTfLiteMemNone = 0, + kTfLiteMmapRo, + kTfLiteArenaRw, + kTfLiteArenaRwPersistent, + kTfLiteDynamic, + kTfLitePersistentRo, + kTfLiteCustom, + kTfLiteVariantObject, +} TfLiteAllocationType; + +/// Memory allocation strategies. +/// +/// TfLiteAllocationType values have been overloaded to mean more than their +/// original intent. This enum should only be used to document the allocation +/// strategy used by a tensor for it data. +typedef enum TfLiteAllocationStrategy { + kTfLiteAllocationStrategyUnknown, + kTfLiteAllocationStrategyNone, /// No data is allocated. + kTfLiteAllocationStrategyMMap, /// Data is mmaped. + kTfLiteAllocationStrategyArena, /// Handled by the arena. + kTfLiteAllocationStrategyMalloc, /// Uses `malloc`/`free`. + kTfLiteAllocationStrategyNew /// Uses `new[]`/`delete[]`. +} TfLiteAllocationStrategy; + +/// Describes how stable a tensor attribute is with regards to an interpreter +/// runs. +typedef enum TfLiteRunStability { + kTfLiteRunStabilityUnknown, + kTfLiteRunStabilityUnstable, /// May change at any time. + kTfLiteRunStabilitySingleRun, /// Will stay the same for one run. + kTfLiteRunStabilityAcrossRuns /// Will stay the same across all runs. +} TfLiteRunStability; + +/// Describes the steps of a TFLite operation life cycle. +typedef enum TfLiteRunStep { + kTfLiteRunStepUnknown, + kTfLiteRunStepInit, + kTfLiteRunStepPrepare, + kTfLiteRunStepEval +} TfLiteRunStep; + +/// The delegates should use zero or positive integers to represent handles. +/// -1 is reserved from unallocated status. +typedef int TfLiteBufferHandle; +enum { + kTfLiteNullBufferHandle = -1, +}; + +/// Metadata to encode each dimension in a sparse tensor. +typedef struct TfLiteDimensionMetadata { + TfLiteDimensionType format; + int dense_size; + TfLiteIntArray* array_segments; + TfLiteIntArray* array_indices; +} TfLiteDimensionMetadata; + +/// Parameters used to encode a sparse tensor. For detailed explanation of each +/// field please refer to lite/schema/schema.fbs. +typedef struct TfLiteSparsity { + TfLiteIntArray* traversal_order; + TfLiteIntArray* block_map; + TfLiteDimensionMetadata* dim_metadata; + int dim_metadata_size; +} TfLiteSparsity; + +/// Defines a custom memory allocation not owned by the runtime. +/// `data` should be aligned to kDefaultTensorAlignment defined in +/// lite/util.h. (Currently 64 bytes) +/// NOTE: See `Interpreter::SetCustomAllocationForTensor` for details on usage. +typedef struct TfLiteCustomAllocation { + void* data; + size_t bytes; +} TfLiteCustomAllocation; + +/// The flags used in `Interpreter::SetCustomAllocationForTensor`. +/// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. +typedef enum TfLiteCustomAllocationFlags { + kTfLiteCustomAllocationFlagsNone = 0, + /// Skips checking whether allocation.data points to an aligned buffer as + /// expected by the TFLite runtime. + /// NOTE: Setting this flag can cause crashes when calling Invoke(). + /// Use with caution. + kTfLiteCustomAllocationFlagsSkipAlignCheck = 1, +} TfLiteCustomAllocationFlags; + +enum { kTfLiteNoBufferIdentifier = SIZE_MAX }; + +/// A tensor in the interpreter system which is a wrapper around a buffer of +/// data including a dimensionality (or NULL if not currently defined). +#ifndef TF_LITE_STATIC_MEMORY +typedef struct TfLiteTensor { + /// The data type specification for data stored in `data`. This affects + /// what member of `data` union should be used. + TfLiteType type; + /// A union of data pointers. The appropriate type should be used for a typed + /// tensor based on `type`. + TfLitePtrUnion data; + /// A pointer to a structure representing the dimensionality interpretation + /// that the buffer should have. NOTE: the product of elements of `dims` + /// and the element datatype size should be equal to `bytes` below. + TfLiteIntArray* dims; + /// Quantization information. + TfLiteQuantizationParams params; + /// How memory is mapped + /// kTfLiteMmapRo: Memory mapped read only. + /// i.e. weights + /// kTfLiteArenaRw: Arena allocated read write memory + /// (i.e. temporaries, outputs). + TfLiteAllocationType allocation_type; + /// The number of bytes required to store the data of this Tensor. I.e. + /// (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if + /// type is kTfLiteFloat32 and dims = {3, 2} then + /// bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. + size_t bytes; + + /// An opaque pointer to a tflite::MMapAllocation + const void* allocation; + + /// Null-terminated name of this tensor. + const char* name; + + /// The delegate which knows how to handle `buffer_handle`. + /// + /// WARNING: This is an experimental interface that is subject to change. + struct TfLiteDelegate* delegate; + + /// An integer buffer handle that can be handled by `delegate`. + /// The value is valid only when delegate is not null. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteBufferHandle buffer_handle; + + /// If the delegate uses its own buffer (e.g. GPU memory), the delegate is + /// responsible to set data_is_stale to true. + /// `delegate->CopyFromBufferHandle` can be called to copy the data from + /// delegate buffer. + /// + /// WARNING: This is an experimental interface that is subject to change. + bool data_is_stale; + + /// True if the tensor is a variable. + bool is_variable; + + /// Quantization information. Replaces params field above. + TfLiteQuantization quantization; + + /// Parameters used to encode a sparse tensor. + /// This is optional. The field is NULL if a tensor is dense. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteSparsity* sparsity; + + /// Optional. Encodes shapes with unknown dimensions with -1. This field is + /// only populated when unknown dimensions exist in a read-write tensor (i.e. + /// an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and + /// `dims_signature` contains [1, -1, -1, 3]). If no unknown dimensions exist + /// then `dims_signature` is either null, or set to an empty array. Note that + /// this field only exists when TF_LITE_STATIC_MEMORY is not defined. + const TfLiteIntArray* dims_signature; +} TfLiteTensor; + +/// A structure representing an instance of a node. +/// This structure only exhibits the inputs, outputs, user defined data and some +/// node properties (like statefulness), not other features like the type. +typedef struct TfLiteNode { + /// Inputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* inputs; + + /// Outputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* outputs; + + /// intermediate tensors to this node expressed as indices into the + /// simulator's tensors. + TfLiteIntArray* intermediates; + + /// Temporary tensors uses during the computations. This usually contains no + /// tensors, but ops are allowed to change that if they need scratch space of + /// any sort. + TfLiteIntArray* temporaries; + + /// Opaque data provided by the node implementer through `Registration.init`. + void* user_data; + + /// Opaque data provided to the node if the node is a builtin. This is usually + /// a structure defined in builtin_op_data.h + void* builtin_data; + + /// Custom initial data. This is the opaque data provided in the flatbuffer. + /// + /// WARNING: This is an experimental interface that is subject to change. + const void* custom_initial_data; + int custom_initial_data_size; + + /// The pointer to the delegate. This is non-null only when the node is + /// created by calling `interpreter.ModifyGraphWithDelegate`. + /// + /// WARNING: This is an experimental interface that is subject to change. + struct TfLiteDelegate* delegate; + + /// Whether this op might have side effect (e.g. stateful op). + bool might_have_side_effect; +} TfLiteNode; +#else // defined(TF_LITE_STATIC_MEMORY)? +// NOTE: This flag is opt-in only at compile time. +// +// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct +// contains only the minimum fields required to initialize and prepare a micro +// inference graph. The fields in this struct have been ordered from +// largest-to-smallest for optimal struct sizeof. +// +// This struct does not use: +// - allocation +// - buffer_handle +// - data_is_stale +// - delegate +// - dims_signature +// - name +// - sparsity +typedef struct TfLiteTensor { + // TODO(b/155784997): Consider consolidating these quantization fields: + // Quantization information. Replaces params field above. + TfLiteQuantization quantization; + + // Quantization information. + TfLiteQuantizationParams params; + + // A union of data pointers. The appropriate type should be used for a typed + // tensor based on `type`. + TfLitePtrUnion data; + + // A pointer to a structure representing the dimensionality interpretation + // that the buffer should have. NOTE: the product of elements of `dims` + // and the element datatype size should be equal to `bytes` below. + TfLiteIntArray* dims; + + // The number of bytes required to store the data of this Tensor. I.e. + // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if + // type is kTfLiteFloat32 and dims = {3, 2} then + // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. + size_t bytes; + + // The data type specification for data stored in `data`. This affects + // what member of `data` union should be used. + TfLiteType type; + + // How memory is mapped + // kTfLiteMmapRo: Memory mapped read only. + // i.e. weights + // kTfLiteArenaRw: Arena allocated read write memory + // (i.e. temporaries, outputs). + TfLiteAllocationType allocation_type; + + // True if the tensor is a variable. + bool is_variable; +} TfLiteTensor; + +// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains +// only the minimum fields required to represent a node. +// +// This struct does not use: +// - delegate +// - intermediates +// - temporaries +typedef struct TfLiteNode { + // Inputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* inputs; + + // Outputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* outputs; + + // intermediate tensors to this node expressed as indices into the simulator's + // tensors. + TfLiteIntArray* intermediates; + + // Opaque data provided by the node implementer through `Registration.init`. + void* user_data; + + // Opaque data provided to the node if the node is a builtin. This is usually + // a structure defined in builtin_op_data.h + void* builtin_data; + + // Custom initial data. This is the opaque data provided in the flatbuffer. + // + // WARNING: This is an experimental interface that is subject to change. + const void* custom_initial_data; + int custom_initial_data_size; +} TfLiteNode; +#endif // TF_LITE_STATIC_MEMORY + +/// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount +/// of information required for a kernel to run during TfLiteRegistration::Eval. +// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM +// builds with this flag by default internally. +typedef struct TfLiteEvalTensor { + /// A union of data pointers. The appropriate type should be used for a typed + /// tensor based on `type`. + TfLitePtrUnion data; + + /// A pointer to a structure representing the dimensionality interpretation + /// that the buffer should have. + TfLiteIntArray* dims; + + /// The data type specification for data stored in `data`. This affects + /// what member of `data` union should be used. + TfLiteType type; +} TfLiteEvalTensor; + +#ifndef TF_LITE_STATIC_MEMORY +/// Free data memory of tensor `t`. +void TfLiteTensorDataFree(TfLiteTensor* t); + +/// Free quantization data. +void TfLiteQuantizationFree(TfLiteQuantization* quantization); + +/// Free sparsity parameters. +void TfLiteSparsityFree(TfLiteSparsity* sparsity); + +/// Free memory of tensor `t`. +void TfLiteTensorFree(TfLiteTensor* t); + +/// Set all of a tensor's fields (and free any previously allocated data). +void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, + TfLiteQuantizationParams quantization, char* buffer, + size_t size, TfLiteAllocationType allocation_type, + const void* allocation, bool is_variable, + TfLiteTensor* tensor); + +/// Copies the contents of `src` in `dst`. +/// Function does nothing if either `src` or `dst` is passed as nullptr and +/// return `kTfLiteOk`. +/// Returns `kTfLiteError` if `src` and `dst` doesn't have matching data size. +/// Note function copies contents, so it won't create new data pointer +/// or change allocation type. +/// All Tensor related properties will be copied from `src` to `dst` like +/// quantization, sparsity, ... +TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst); + +/// Change the size of the memory block owned by `tensor` to `num_bytes`. +/// Tensors with allocation types other than `kTfLiteDynamic` will be ignored +/// and a `kTfLiteOk` will be returned. `tensor`'s internal data buffer will be +/// assigned a pointer which can safely be passed to free or realloc if +/// `num_bytes` is zero. If `preserve_data` is true, tensor data will be +/// unchanged in the range from the start of the region up to the minimum of the +/// old and new sizes. In the case of NULL tensor, or an error allocating new +/// memory, returns `kTfLiteError`. +TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor, + bool preserve_data); + +/// Change the size of the memory block owned by `tensor` to `num_bytes`. +/// Tensors with allocation types other than `kTfLiteDynamic` will be ignored +/// and a `kTfLiteOk` will be returned. `tensor`'s internal data buffer will be +/// assigned a pointer which can safely be passed to free or realloc if +/// `num_bytes` is zero. Tensor data will be unchanged in the range from the +/// start of the region up to the minimum of the old and new sizes. In the case +/// of NULL tensor, or an error allocating new memory, returns `kTfLiteError`. +TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); +#endif // TF_LITE_STATIC_MEMORY + +/// WARNING: This is an experimental interface that is subject to change. +/// +/// Currently, TfLiteDelegateParams has to be allocated in a way that it's +/// trivially destructable. It will be stored as `builtin_data` field in +/// `TfLiteNode` of the delegate node. +/// +/// See also the `CreateDelegateParams` function in `interpreter.cc` details. +typedef struct TfLiteDelegateParams { + struct TfLiteDelegate* delegate; + TfLiteIntArray* nodes_to_replace; + TfLiteIntArray* input_tensors; + TfLiteIntArray* output_tensors; +} TfLiteDelegateParams; + +/// WARNING: This is an experimental interface that is subject to change. +/// +/// Currently, TfLiteOpaqueDelegateParams has to be allocated in a way that it's +/// trivially destructable. It will be stored as `builtin_data` field in +/// `TfLiteNode` of the delegate node. +/// +/// See also the `CreateOpaqueDelegateParams` function in `subgraph.cc` +/// details. +typedef struct TfLiteOpaqueDelegateParams { + TfLiteOpaqueDelegate* delegate; + void* delegate_data; + TfLiteIntArray* nodes_to_replace; + TfLiteIntArray* input_tensors; + TfLiteIntArray* output_tensors; +} TfLiteOpaqueDelegateParams; + +/// `TfLiteContext` allows an op to access the tensors. +/// +/// `TfLiteContext` is a struct that is created by the TF Lite runtime +/// and passed to the "methods" (C function pointers) in the +/// `TfLiteRegistration` struct that are used to define custom ops and custom +/// delegate kernels. It contains information and methods (C function pointers) +/// that can be called by the code implementing a custom op or a custom delegate +/// kernel. These methods provide access to the context in which that custom op +/// or custom delegate kernel occurs, such as access to the input and output +/// tensors for that op, as well as methods for allocating memory buffers +/// and intermediate tensors, etc. +/// +/// See also `TfLiteOpaqueContext`, which is an more ABI-stable equivalent. +typedef struct TfLiteContext { + /// Number of tensors in the context. + size_t tensors_size; + + /// The execution plan contains a list of the node indices in execution + /// order. execution_plan->size is the current number of nodes. And, + /// execution_plan->data[0] is the first node that needs to be run. + /// TfLiteDelegates can traverse the current execution plan by iterating + /// through each member of this array and using GetNodeAndRegistration() to + /// access details about a node. i.e. + /// + /// + /// TfLiteIntArray* execution_plan; + /// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, + /// &execution_plan)); + /// for (int exec_index = 0; exec_index < execution_plan->size; + /// exec_index++) { + /// int node_index = execution_plan->data[exec_index]; + /// TfLiteNode* node; + /// TfLiteRegistration* reg; + /// context->GetNodeAndRegistration(context, node_index, &node, ®); + /// } + /// + /// Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime. + /// Future calls to GetExecutionPlan invalidates earlier outputs. The + /// following code snippet shows the issue of such an invocation pattern. + /// After calling CheckNode, subsequent access to `plan_1st` is undefined. + /// + /// void CheckNode(const TfLiteNode* node) { + /// ... + /// TfLiteIntArray* plan_2nd; + /// TF_LITE_ENSURE_STATUS( + /// context->GetExecutionPlan(context, &plan_2nd) + /// ); + /// ... + /// } + /// + /// TfLiteIntArray* plan_1st; + /// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st)); + /// for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) { + /// int node_index = plan_1st->data[exec_index]; + /// TfLiteNode* node; + /// TfLiteRegistration* reg; + /// context->GetNodeAndRegistration(context, node_index, &node, ®); + /// CheckNode(node); + /// } + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context, + TfLiteIntArray** execution_plan); + + /// An array of tensors in the interpreter context (of length `tensors_size`) + TfLiteTensor* tensors; + + /// opaque full context ptr (an opaque c++ data structure) + void* impl_; + + /// Request memory pointer be resized. Updates dimensions on the tensor. + /// NOTE: ResizeTensor takes ownership of newSize. + TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor, + TfLiteIntArray* new_size); + /// Request that an error be reported with format string msg. + void (*ReportError)(struct TfLiteContext*, const char* msg, ...); + + /// Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If + /// non-null, the value pointed to by `first_new_tensor_index` will be set to + /// the index of the first new tensor. + TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add, + int* first_new_tensor_index); + + /// Get a Tensor node by node_index. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetNodeAndRegistration)( + struct TfLiteContext*, int node_index, TfLiteNode** node, + struct TfLiteRegistration** registration); + + /// Replace ops with one or more stub delegate operations. This function + /// does not take ownership of `nodes_to_replace`. + TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)( + struct TfLiteContext*, struct TfLiteRegistration registration, + const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate); + + /// Number of threads that are recommended to subsystems like gemmlowp and + /// eigen. + int recommended_num_threads; + + /// Access external contexts by type. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*, + TfLiteExternalContextType); + /// Set the value of a external context. Does not take ownership of the + /// pointer. + /// + /// WARNING: This is an experimental interface that is subject to change. + void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType, + TfLiteExternalContext*); + + /// Flag for allowing float16 precision for FP32 calculation. + /// default: false. + /// + /// WARNING: This is an experimental API and subject to change. + bool allow_fp32_relax_to_fp16; + + /// Pointer to the op-level profiler, if set; nullptr otherwise. + void* profiler; + + /// Allocate persistent buffer which has the same life time as the + /// interpreter. Returns `nullptr` on failure. The memory is allocated from + /// heap for TFL, and from tail in TFLM. This method is only available in + /// `Init` or `Prepare` stage. + /// + /// WARNING: This is an experimental interface that is subject + /// to change. + void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes); + + /// Allocate a buffer which will be deallocated right after invoke phase. + /// The memory is allocated from heap in TFL, and from volatile arena in TFLM. + /// This method is only available in invoke stage. + /// + /// NOTE: If possible use `RequestScratchBufferInArena` method to avoid memory + /// allocation during inference time. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes, + void** ptr); + + /// Request a scratch buffer in the arena through static memory planning. + /// This method is only available in `Prepare` stage and the buffer is + /// allocated by the interpreter between Prepare and Eval stage. In `Eval` + /// stage, `GetScratchBuffer` API can be used to fetch the address. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx, + size_t bytes, int* buffer_idx); + + /// Get the scratch buffer pointer. + /// This method is only available in Eval stage. + /// + /// WARNING: This is an experimental interface that is subject to change. + void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx); + + /// Resize the memory pointer of the `tensor`. This method behaves the same as + /// `ResizeTensor`, except that it makes a copy of the shape array internally + /// so the shape array could be deallocated right afterwards. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx, + TfLiteTensor* tensor, int dims, + const int* shape); + + /// This method provides a preview of post-delegation partitioning. Each + /// TfLiteDelegateParams in the referenced array corresponds to one instance + /// of the delegate kernel. Example usage: + /// + /// TfLiteIntArray* nodes_to_replace = ...; + /// TfLiteDelegateParams* params_array; + /// int num_partitions = 0; + /// TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( + /// context, delegate, nodes_to_replace, ¶ms_array, + /// &num_partitions)); + /// for (int idx = 0; idx < num_partitions; idx++) { + /// const auto& partition_params = params_array[idx]; + /// ... + /// } + /// + /// NOTE: The context owns the memory referenced by partition_params_array. It + /// will be cleared with another call to PreviewDelegatePartitioning, or after + /// TfLiteDelegateParams::Prepare returns. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*PreviewDelegatePartitioning)( + struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace, + TfLiteDelegateParams** partition_params_array, int* num_partitions); + + /// Returns a TfLiteTensor struct for a given index. + /// + /// WARNING: This is an experimental interface that is subject to change. + /// + /// WARNING: This method may not be available on all platforms. + TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context, + int tensor_idx); + + /// Returns a TfLiteEvalTensor struct for a given index. + /// + /// WARNING: This is an experimental interface that is subject to change. + /// + /// WARNING: This method may not be available on all platforms. + TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context, + int tensor_idx); + + /// Retrieves named metadata buffer from the TFLite model. + /// Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer + /// Model: that is, there exists a `metadata` entry with given `name` string. + /// (see TFLite's schema.fbs). + /// The corresponding `buffer` information is populated in `ptr` & `bytes`. + /// The data from `ptr` is valid for the lifetime of the Interpreter. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetModelMetadata)(const struct TfLiteContext* context, + const char* name, const char** ptr, + size_t* bytes); + + /// Retrieves the corresponding TfLiteContext of a subgraph that the given + /// subgraph_index points to and switches to the delegate context for that + /// subgraph. If an invalid subgraph index is given, returns kTfLiteError. + /// + /// NOTE: This function is expected to be paired with ReleaseSubgraphContext() + /// once the delegate preparation is done and/or the delegate context + /// functions are no longer needed. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*AcquireSubgraphContext)( + struct TfLiteContext* context, int subgraph_index, + struct TfLiteContext** acquired_context); + /// Releases the subgraph context by switching back to the TFLite kernel + /// context for the subgraph that the given subgraph_index points to. + /// + /// NOTE: This function is expected to be used after AcquireSubgraphContext() + /// once the delegate preparation is done and/or the delegate context + /// functions are no longer needed. + /// + /// WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*ReleaseSubgraphContext)(struct TfLiteContext* context, + int subgraph_index); +} TfLiteContext; + +/// `TfLiteOperator` is an external version of `TfLiteRegistration` +/// for C API which doesn't use internal types (such as `TfLiteContext`) but +/// only uses stable API types (such as `TfLiteOpaqueContext`). The purpose of +/// each field is the exactly the same as with `TfLiteRegistration`. +typedef struct TfLiteOperator TfLiteOperator; + +#ifndef DOXYGEN_SKIP +// For backwards compatibility. +// Deprecated. Use TfLiteOperator instead. +typedef TfLiteOperator TfLiteRegistrationExternal; +#endif + +/// The valid values of the `inplace_operator` field in `TfLiteRegistration`. +/// This allow an op to signal to the runtime that the same data pointer +/// may be passed as an input and output without impacting the result. +/// This does not mean that the memory can safely be reused, it is up to the +/// runtime to determine this, e.g. if another op consumes the same input or not +/// or if an input tensor has sufficient memory allocated to store the output +/// data. +/// +/// Setting these flags authorizes the runtime to set the data pointers of an +/// input and output tensor to the same value. In such cases, the memory +/// required by the output must be less than or equal to that required by the +/// shared input, never greater. If kTfLiteInplaceOpDataUnmodified is set, then +/// the runtime can share the same input tensor with multiple operator's +/// outputs, provided that kTfLiteInplaceOpDataUnmodified is set for all of +/// them. Otherwise, if an input tensor is consumed by multiple operators, it +/// may only be shared with the operator which is the last to consume it. +/// +/// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. +typedef enum { + /// The default value. This indicates that the same data pointer cannot safely + /// be passed as an op's input and output. + kTfLiteInplaceOpNone = 0, + /// This indicates that an op's first output's data is identical to its first + /// input's data, for example Reshape. + kTfLiteInplaceOpDataUnmodified = 1, + /// Setting kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput means + /// that InputN may be shared with OutputN instead of with the first output. + /// This flag requires one or more of kTfLiteInplaceOpInputNShared to be set. + kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput = 2, + /// kTfLiteInplaceOpInputNShared indicates that it is safe for an op to share + /// InputN's data pointer with an output tensor. If + /// kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is set then + /// kTfLiteInplaceOpInputNShared indicates that InputN may be shared + /// with OutputN, otherwise kTfLiteInplaceOpInputNShared indicates that InputN + /// may be shared with the first output. + /// + /// Indicates that an op's first input may be shared with the first output + /// tensor. kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput has + /// no impact on the behavior allowed by this flag. + kTfLiteInplaceOpInput0Shared = 4, + /// Indicates that an op's second input may be shared with the first output + /// if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set + /// or second output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput + /// is set. + kTfLiteInplaceOpInput1Shared = 8, + /// Indicates that an op's third input may be shared with the first output + /// if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set + /// or third output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput + /// is + /// set. + kTfLiteInplaceOpInput2Shared = 16, + /// Placeholder to ensure that enum can hold 64 bit values to accommodate + /// future fields. + kTfLiteInplaceOpMaxValue = UINT64_MAX, +} TfLiteInPlaceOp; + +/// The number of shareable inputs supported. +static const int kTfLiteMaxSharableOpInputs = 3; + +/// `TfLiteRegistration` defines the implementation of an operation +/// (a built-in op, custom op, or custom delegate kernel). +/// +/// It is a struct containing "methods" (C function pointers) that will be +/// invoked by the TF Lite runtime to evaluate instances of the operation. +/// +/// See also `TfLiteOperator` which is a more ABI-stable equivalent. +typedef struct TfLiteRegistration { + /// Initializes the op from serialized data. + /// Called only *once* for the lifetime of the op, so any one-time allocations + /// should be made here (unless they depend on tensor sizes). + /// + /// * If a built-in op: + /// * `buffer` is the op's params data (TfLiteLSTMParams*). + /// * `length` is zero. + /// * If custom op: + /// * `buffer` is the op's `custom_options`. + /// * `length` is the size of the buffer. + /// + /// Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer + /// or an instance of a struct). + /// + /// The returned pointer will be stored with the node in the `user_data` + /// field, accessible within prepare and invoke functions below. + /// + /// NOTE: if the data is already in the desired format, simply implement this + /// function to return `nullptr` and implement the free function to be a + /// no-op. + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + + /// The pointer `buffer` is the data previously returned by an init + /// invocation. + void (*free)(TfLiteContext* context, void* buffer); + + /// prepare is called when the inputs this node depends on have been resized. + /// `context->ResizeTensor()` can be called to request output tensors to be + /// resized. + /// Can be called multiple times for the lifetime of the op. + /// + /// Returns `kTfLiteOk` on success. + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + + /// Execute the node (should read `node->inputs` and output to + /// `node->outputs`). + /// + /// Returns `kTfLiteOk` on success. + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + + /// `profiling_string` is called during summarization of profiling information + /// in order to group executions together. Providing a value here will cause a + /// given op to appear multiple times is the profiling report. This is + /// particularly useful for custom ops that can perform significantly + /// different calculations depending on their `user-data`. + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + + /// Builtin codes. If this kernel refers to a builtin this is the code + /// of the builtin. This is so we can do marshaling to other frameworks like + /// NN API. + /// + /// Note: It is the responsibility of the registration binder to set this + /// properly. + int32_t builtin_code; + + /// Custom op name. If the op is a builtin, this will be `null`. + /// + /// Note: It is the responsibility of the registration binder to set this + /// properly. + /// + /// WARNING: This is an experimental interface that is subject to change. + const char* custom_name; + + /// The version of the op. + /// Note: It is the responsibility of the registration binder to set this + /// properly. + int version; + + /// The external (i.e. ABI-stable) version of `TfLiteRegistration`. + /// Since we can't use internal types (such as `TfLiteContext`) for C API to + /// maintain ABI stability. C API user will provide `TfLiteOperator` to + /// implement custom ops. We keep it inside of `TfLiteRegistration` and use + /// it to route callbacks properly. + TfLiteOperator* registration_external; + + /// Retrieves asynchronous kernel. + /// + /// If the `async_kernel` field is nullptr, it means the operation described + /// by this TfLiteRegistration object does not support asynchronous execution. + /// Otherwise, the function that the field points to should only be called for + /// delegate kernel nodes, i.e. `node` should be a delegate kernel node + /// created by applying a delegate. If the function returns nullptr, that + /// means that the underlying delegate does not support asynchronous execution + /// for this `node`. + struct TfLiteAsyncKernel* (*async_kernel)(TfLiteContext* context, + TfLiteNode* node); + + /// Indicates if an operator's output may safely overwrite its inputs. + /// See the comments in `TfLiteInPlaceOp`. + uint64_t inplace_operator; +} TfLiteRegistration; + +/// \private +/// Old version of `TfLiteRegistration` to maintain binary backward +/// compatibility. +/// The legacy registration type must be a POD struct type whose field types +/// must be a prefix of the field types in TfLiteRegistration, and offset of the +/// first field in TfLiteRegistration that is not present in the legacy +/// registration type must be greater than or equal to the size of the legacy +/// registration type. +/// +/// WARNING: This structure is deprecated / not an official part of the +/// API. It should be only used for binary backward compatibility. +typedef struct TfLiteRegistration_V3 { + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + void (*free)(TfLiteContext* context, void* buffer); + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + int32_t builtin_code; + const char* custom_name; + int version; + TfLiteOperator* registration_external; + struct TfLiteAsyncKernel* (*async_kernel)(TfLiteContext* context, + TfLiteNode* node); +} TfLiteRegistration_V3; + +/// \private +/// Old version of `TfLiteRegistration` to maintain binary backward +/// compatibility. +/// The legacy registration type must be a POD struct type whose field types +/// must be a prefix of the field types in TfLiteRegistration, and offset of the +/// first field in TfLiteRegistration that is not present in the legacy +/// registration type must be greater than or equal to the size of the legacy +/// registration type. +/// +/// WARNING: This structure is deprecated / not an official part of the +/// API. It should be only used for binary backward compatibility. +typedef struct TfLiteRegistration_V2 { + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + void (*free)(TfLiteContext* context, void* buffer); + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + int32_t builtin_code; + const char* custom_name; + int version; + TfLiteOperator* registration_external; +} TfLiteRegistration_V2; + +/// \private +/// Old version of `TfLiteRegistration` to maintain binary backward +/// compatibility. +/// The legacy registration type must be a POD struct type whose field types +/// must be a prefix of the field types in TfLiteRegistration, and offset of the +/// first field in TfLiteRegistration that is not present in the legacy +/// registration type must be greater than or equal to the size of the legacy +/// registration type. +/// +/// WARNING: This structure is deprecated / not an official part of the +/// API. It should be only used for binary backward compatibility. +typedef struct TfLiteRegistration_V1 { + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + void (*free)(TfLiteContext* context, void* buffer); + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + int32_t builtin_code; + const char* custom_name; + int version; +} TfLiteRegistration_V1; + +/// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the +/// values should be 1, 2, 4, 8, ...etc. +typedef enum TfLiteDelegateFlags { + kTfLiteDelegateFlagsNone = 0, + /// The flag is set if the delegate can handle dynamic sized tensors. + /// For example, the output shape of a `Resize` op with non-constant shape + /// can only be inferred when the op is invoked. + /// In this case, the Delegate is responsible for calling + /// `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling + /// `ResizeTensor` when invoking the op. + /// + /// If the delegate isn't capable to handle dynamic tensors, this flag need + /// to be set to false. + kTfLiteDelegateFlagsAllowDynamicTensors = 1, + + /// This flag can be used by delegates (that allow dynamic tensors) to ensure + /// applicable tensor shapes are automatically propagated in the case of + /// tensor resizing. This means that non-dynamic (allocation_type != + /// kTfLiteDynamic) I/O tensors of a delegate kernel will have correct shapes + /// before its Prepare() method is called. The runtime leverages TFLite + /// builtin ops in the original execution plan to propagate shapes. + /// + /// A few points to note: + /// 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is + /// false, this one is redundant since the delegate kernels are re-initialized + /// every time tensors are resized. + /// 2. Enabling this flag adds some overhead to AllocateTensors(), since extra + /// work is required to prepare the original execution plan. + /// 3. This flag requires that the original execution plan only have ops with + /// valid registrations (and not 'dummy' custom ops like with Flex). + /// + /// WARNING: This feature is experimental and subject to change. + kTfLiteDelegateFlagsRequirePropagatedShapes = 2, + + /// This flag can be used by delegates to request per-operator profiling. If a + /// node is a delegate node, this flag will be checked before profiling. If + /// set, then the node will not be profiled. The delegate will then add per + /// operator information using `Profiler::EventType::OPERATOR_INVOKE_EVENT` + /// and the results will appear in the operator-wise Profiling section and not + /// in the Delegate internal section. + kTfLiteDelegateFlagsPerOperatorProfiling = 4 +} TfLiteDelegateFlags; + +/// WARNING: This is an experimental interface that is subject to change. +typedef struct TfLiteDelegate { + /// Data that delegate needs to identify itself. This data is owned by the + /// delegate. The delegate is owned in the user code, so the delegate is + /// responsible for deallocating this when it is destroyed. + void* data_; + + /// Invoked by `ModifyGraphWithDelegate`. This prepare is called, giving the + /// delegate a view of the current graph through `TfLiteContext*`. It + /// typically will look at the nodes and call + /// `ReplaceNodeSubsetsWithDelegateKernels()` to ask the TensorFlow lite + /// runtime to create macro-nodes to represent delegated subgraphs of the + /// original graph. + TfLiteStatus (*Prepare)(TfLiteContext* context, + struct TfLiteDelegate* delegate); + + /// Copy the data from delegate buffer handle into raw memory of the given + /// `tensor`. Note that the delegate is allowed to allocate the raw bytes as + /// long as it follows the rules for `kTfLiteDynamic` tensors, in which case + /// this cannot be null. + TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle buffer_handle, + TfLiteTensor* tensor); + + /// Copy the data from raw memory of the given `tensor` to delegate buffer + /// handle. This can be null if the delegate doesn't use its own buffer. + TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle buffer_handle, + TfLiteTensor* tensor); + + /// Free the Delegate Buffer Handle. Note: This only frees the handle, but + /// this doesn't release the underlying resource (e.g. textures). The + /// resources are either owned by application layer or the delegate. + /// This can be null if the delegate doesn't use its own buffer. + void (*FreeBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle* handle); + + /// Bitmask flags. See the comments in `TfLiteDelegateFlags`. + int64_t flags; + + /// The opaque delegate builder associated with this object. If set then the + /// TF Lite runtime will give precedence to this field. E.g. instead of + /// invoking `Prepare` via the function pointer inside the `TfLiteDelegate` + /// object, the runtime will first check if the corresponding function + /// pointer inside `opaque_delegate_builder` is set and if so invoke that. + /// + /// If this field is non-null, then the `Prepare` field (of the + /// `TfLiteDelegate`) should be null. + struct TfLiteOpaqueDelegateBuilder* opaque_delegate_builder; +} TfLiteDelegate; + +/// Build a `null` delegate, with all the fields properly set to their default +/// values. +TfLiteDelegate TfLiteDelegateCreate(void); + +/// `TfLiteOpaqueDelegateBuilder` is used for constructing +/// `TfLiteOpaqueDelegate`, see `TfLiteOpaqueDelegateCreate` in c_api_opaque.h. +/// NOTE: This struct is not ABI stable. +/// +/// For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects +/// should be brace-initialized, so that all fields (including any that might be +/// added in the future) get zero-initialized. The purpose of each field is +/// exactly the same as with `TfLiteDelegate`. +/// +/// NOTE: This type is part of the TensorFlow Lite Extension APIs. +/// We reserve the right to make changes to this API in future releases, +/// potentially including non-backwards-compatible changes, on a different +/// schedule than for the other TensorFlow Lite APIs. See +/// https://www.tensorflow.org/guide/versions#separate_version_number_for_tensorflow_lite_extension_apis. +typedef struct TfLiteOpaqueDelegateBuilder { + /// Data that delegate needs to identify itself. This data is owned by the + /// delegate. The delegate is owned in the user code, so the delegate is + /// responsible for deallocating this when it is destroyed. + void* data; + /// Invoked by ModifyGraphWithDelegate. This prepare is called, giving the + /// delegate a view of the current graph through `TfLiteContext*`. It + /// typically will look at the nodes and call + /// `ReplaceNodeSubsetsWithDelegateKernels()` to ask the TensorFlow lite + /// runtime to create macro-nodes to represent delegated subgraphs of the + /// original graph. + TfLiteStatus (*Prepare)(TfLiteOpaqueContext* context, // NOLINT + TfLiteOpaqueDelegate* delegate, void* data); + /// Copies the data from delegate buffer handle into raw memory of the given + /// `tensor`. Note that the delegate is allowed to allocate the raw bytes as + /// long as it follows the rules for kTfLiteDynamic tensors, in which case + /// this cannot be null. + TfLiteStatus (*CopyFromBufferHandle)( // NOLINT + TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); + /// Copies the data from raw memory of the given `tensor` to delegate buffer + /// handle. This can be null if the delegate doesn't use its own buffer. + TfLiteStatus (*CopyToBufferHandle)( // NOLINT + TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); + /// Frees the Delegate Buffer Handle. Note: This only frees the handle, but + /// this doesn't release the underlying resource (e.g. textures). The + /// resources are either owned by application layer or the delegate. + /// This can be null if the delegate doesn't use its own buffer. + void (*FreeBufferHandle)(TfLiteOpaqueContext* context, // NOLINT + TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle* handle); + /// Bitmask flags. See the comments in `TfLiteDelegateFlags`. + int64_t flags; +} TfLiteOpaqueDelegateBuilder; + +#ifndef TF_LITE_STATIC_MEMORY +// See c_api_opaque.h. +// This declaration in common.h is only for backwards compatibility. +// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above. +TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate( + const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder); + +// See c_api_opaque.h. +// This declaration in common.h is only for backwards compatibility. +// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above. +void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* delegate); +#endif // TF_LITE_STATIC_MEMORY + +// See c_api_opaque.h. +// This declaration in common.h is only for backwards compatibility. +// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above. +void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate); + +/// Returns a tensor data allocation strategy. +TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy( + const TfLiteTensor* t); + +/// Returns how stable a tensor data buffer address is across runs. +TfLiteRunStability TfLiteTensorGetBufferAddressStability(const TfLiteTensor* t); + +/// Returns how stable a tensor data values are across runs. +TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* t); + +/// Returns the operation step when the data of a tensor is populated. +/// +/// Some operations can precompute their results before the evaluation step. +/// This makes the data available earlier for subsequent operations. +TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t); + +/// Returns the operation steop when the shape of a tensor is computed. +/// +/// Some operations can precompute the shape of their results before the +/// evaluation step. This makes the shape available earlier for subsequent +/// operations. +TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t); + +/** @} */ +// Ends `\addtogroup`, it's important for the doc generator that this doesn't +// include the CC code below. + +#ifdef __cplusplus +} // extern "C" + +#include + +// --- TFLITE VARIANT TENSORS ---- +// Programming languges usually define "variant" as a type that can hold an +// unbounded set of types. See std::any +// (https://en.cppreference.com/w/cpp/utility/any) for a related standard +// library construct. In tensorflow, variant tensors have a data member which is +// an Object that is destructible and copy constructible. +// Variant tensors are commonly used to represent non trivial data +// semantics that don't fit into simple primitives, such as lists of tensors and +// datasets. Additionally, they can facilitate containers for optimizing +// memory movement of tensor data. +// +// The following set of classes define the variant tensor member for tflite. +// They implement a type-erased container intended to be used behind the +// `data.data : void*` member of `TfLiteTensor`s. Runtime functions interact +// the variant member at the level of a `VariantData`, whereas kernels +// operate with the full knowledge of the un-erased type. The `VariantData` +// class provides abstract methods for destroying and copying `VariantData`. +// Invoking these methods will dispatch to the erased type opaquely. +// The contents of any object of type derived from `AbstractVariant` can be +// written to `TfLiteTensor::data::data : void*` from kernels. If the runtime +// were to copy such a tensor through `TfLiteTensorCopy`, the destination data +// member will contain the result of invoking the erased type's copy +// constructor. Similar for the runtime releasing tensors from memory, the +// erased type's destructor will be invoked. There are a few caveats to consider +// to use these safely, which we discuss below. +// +// EXAMPLE: READING VARIANT TENSORS +// ``` +// // retrieve input with `type == kTfLiteVariant` +// TfLiteTensor* input = ... +// // must first static cast to `VariantData`, more on this below. +// VariantData* vd_input = static_cast(t->data.data); +// CustomType* typed_input = +// static_cast(vd_input); +// // do custom work on `typed_input`... +// ``` +// +// EXAMPLE: WRITING VARIANT TENSORS +// ``` +// TfLiteTensor* output = ... +// // construct a new variant object behind the target tensor +// TfLiteVariantRealloc(output, args...); +// // again must static cast to `VariantData*` before writing to `void*`. +// output->data.data = static_cast(typed_output); +// ``` +// +// WHY STATIC CAST TO `VariantData*` +// The Standard defines a `reinterpret_cast` from a derived type to its +// parents as undefined behavior when the parent is a non-standard layout. +// https://en.cppreference.com/w/cpp/language/reinterpret_cast (see bullet 5). +// Due to the `VariantData` having virtual members it is indeed non-standard +// layout, and any type derived from `VariantData` fails to be +// "transparently-replaceable". I.e. implicit cast from derived to base in this +// case may adjust the pointer and by definition `reinterpret_cast` will not +// the adjust the pointer. +// Thus, dereferencing a pointer of type `VariantData` which addresses +// the first byte of an object of said derived type is UB unless it was first +// implicitly or statically casted to a `VariantData`. Writing the object of +// derived type directly to `void*` which is dereferenced as a `VariantData` is +// then UB, and so the intermediate cast through `VariantData` must be enforced. +// A good example of this issue is ellucidate in the bottom code snippet +// here: https://en.cppreference.com/w/cpp/utility/launder. +class VariantData { + public: + // All variant objects must be able to be destroyed and copied. + virtual ~VariantData() = default; + // A "virtual copy-constructor". Often the destination tensor of a variant + // copy may have been previously allocated in a prior call to inference. We + // allow the copy to target the destinations buffer (`maybe_alloc`), + // for potential reuse and optimizations. `maybe_alloc` must be of the same + // underlying derived type. References to whatever object is at + // `maybe_alloc` may be invalidated. + virtual VariantData* CloneTo(VariantData* maybe_alloc) const = 0; +}; + +// Concrete implementations extend `AbstractVariantData` with CRPT. +template +class AbstractVariantData : public VariantData { + public: + VariantData* CloneTo(VariantData* maybe_alloc) const override { + if (maybe_alloc != nullptr) { + // If the output is still allocated, then its object may still be + // in its life time and the destructor must be called before re-using the + // buffer. + // This may actual have a non-negligible effect on performance if the + // destructor is complex. A future iteration may + // introduce copy or move assignment semantics, allowing for the + // underlying implementation to optimize for this case. + auto* derived = static_cast(maybe_alloc); + derived->~ErasedDerived(); + return new (derived) + ErasedDerived(static_cast(*this)); + } + return new ErasedDerived(static_cast(*this)); + } + + protected: + AbstractVariantData() = default; + AbstractVariantData(const AbstractVariantData&) = default; + AbstractVariantData(AbstractVariantData&&) = delete; +}; + +// Analogous to `TfLiteTensorRealloc` for allocation of tensors whose +// data member points to an arbitrary C++ object. `VariantType` refers +// to the erased type of said object and `VariantArgs` refers to +// a list of argument types with which to construct a new `VariantType`. +// `VariantArgs` must match a constructor of `VariantType`. +template +TfLiteStatus TfLiteTensorVariantRealloc(TfLiteTensor* t, + VariantArgs&&... args) { + if (t->type != kTfLiteVariant) return kTfLiteError; + VariantType* new_vd; + if (t->data.raw != nullptr) { + auto* target_vd = static_cast(t->data.data); + target_vd->~VariantData(); + // As above, we assume if `t` is already allocated then it was allocated + // with the same `VariantType` as templated. + new_vd = new (t->data.raw) VariantType(std::forward(args)...); + } else { + new_vd = new VariantType(std::forward(args)...); + } + t->data.data = static_cast(new_vd); + t->allocation_type = kTfLiteVariantObject; + return kTfLiteOk; +} + +#endif // __cplusplus +#endif // TENSORFLOW_LITE_CORE_C_COMMON_H_ diff --git a/src/tensorflow/lite/core/macros.h b/src/tensorflow/lite/core/macros.h new file mode 100644 index 0000000..86de4da --- /dev/null +++ b/src/tensorflow/lite/core/macros.h @@ -0,0 +1,68 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// This provides utility macros and functions that are inherently platform +// specific or shared across runtime & converter. +#ifndef TENSORFLOW_LITE_CORE_MACROS_H_ +#define TENSORFLOW_LITE_CORE_MACROS_H_ + +#ifdef __has_builtin +#define TFLITE_HAS_BUILTIN(x) __has_builtin(x) +#else +#define TFLITE_HAS_BUILTIN(x) 0 +#endif + +#if (!defined(__NVCC__)) && (TFLITE_HAS_BUILTIN(__builtin_expect) || \ + (defined(__GNUC__) && __GNUC__ >= 3)) +#define TFLITE_EXPECT_FALSE(cond) __builtin_expect(cond, false) +#define TFLITE_EXPECT_TRUE(cond) __builtin_expect(!!(cond), true) +#else +#define TFLITE_EXPECT_FALSE(cond) (cond) +#define TFLITE_EXPECT_TRUE(cond) (cond) +#endif + +#ifdef _WIN32 +#define TFLITE_NOINLINE __declspec(noinline) +#else +#ifdef __has_attribute +#if __has_attribute(noinline) +#define TFLITE_NOINLINE __attribute__((noinline)) +#else +#define TFLITE_NOINLINE +#endif // __has_attribute(noinline) +#else +#define TFLITE_NOINLINE +#endif // __has_attribute +#endif // _WIN32 + +// Normally we'd use ABSL_HAVE_ATTRIBUTE_WEAK and ABSL_ATTRIBUTE_WEAK, but +// we avoid the absl dependency for binary size reasons. +#ifdef __has_attribute +#define TFLITE_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +#define TFLITE_HAS_ATTRIBUTE(x) 0 +#endif + +#if (TFLITE_HAS_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !(defined(__llvm__) && defined(_WIN32)) && !defined(__MINGW32__) +#undef TFLITE_ATTRIBUTE_WEAK +#define TFLITE_ATTRIBUTE_WEAK __attribute__((weak)) +#define TFLITE_HAS_ATTRIBUTE_WEAK 1 +#else +#define TFLITE_ATTRIBUTE_WEAK +#define TFLITE_HAS_ATTRIBUTE_WEAK 0 +#endif + +#endif // TENSORFLOW_LITE_CORE_MACROS_H_ diff --git a/src/tensorflow/lite/kernels/internal/common.cpp b/src/tensorflow/lite/kernels/internal/common.cpp new file mode 100644 index 0000000..fabb020 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/common.cpp @@ -0,0 +1,103 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { + +// Single-rounding MultiplyByQuantizedMultiplier +#if TFLITE_SINGLE_ROUNDING +int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier, + int shift) { + TFLITE_DCHECK(quantized_multiplier >= 0); + TFLITE_DCHECK(shift >= -31 && shift <= 30); + + const int64_t total_shift = 31 - shift; + const int64_t round = static_cast(1) << (total_shift - 1); + int64_t result = x * static_cast(quantized_multiplier) + round; + result = result >> total_shift; + + TFLITE_DCHECK(result >= std::numeric_limits::min() && + result <= std::numeric_limits::max()); + return static_cast(result); +} + +int32_t MultiplyByQuantizedMultiplier(int64_t x, int32_t quantized_multiplier, + int shift) { + // Inputs: + // - quantized_multiplier has fixed point at bit 31 + // - shift is -31 to +7 (negative for right shift) + // + // Assumptions: The following input ranges are assumed + // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1) + // - scaling is chosen so final scaled result fits in int32_t + // - input x is in the range -(1<<47) <= x < (1<<47) + TFLITE_DCHECK(quantized_multiplier >= 0); + TFLITE_DCHECK(shift >= -31 && shift < 8); + TFLITE_DCHECK(x >= -(static_cast(1) << 47) && + x < (static_cast(1) << 47)); + + const int32_t reduced_multiplier = + (quantized_multiplier < 0x7FFF0000) + ? ((quantized_multiplier + (1 << 15)) >> 16) + : 0x7FFF; + const int64_t total_shift = 15 - shift; + const int64_t round = static_cast(1) << (total_shift - 1); + int64_t result = x * static_cast(reduced_multiplier) + round; + result = result >> total_shift; + + TFLITE_DCHECK(result >= std::numeric_limits::min() && + result <= std::numeric_limits::max()); + return static_cast(result); +} +// Double-rounding MultiplyByQuantizedMultiplier +#else +int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier, + int shift) { + using gemmlowp::RoundingDivideByPOT; + using gemmlowp::SaturatingRoundingDoublingHighMul; + int left_shift = shift > 0 ? shift : 0; + int right_shift = shift > 0 ? 0 : -shift; + return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul( + x * (1 << left_shift), quantized_multiplier), + right_shift); +} + +int32_t MultiplyByQuantizedMultiplier(int64_t x, int32_t quantized_multiplier, + int shift) { + // Inputs: + // - quantized_multiplier has fixed point at bit 31 + // - shift is -31 to +7 (negative for right shift) + // + // Assumptions: The following input ranges are assumed + // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1) + // - scaling is chosen so final scaled result fits in int32_t + // - input x is in the range -(1<<47) <= x < (1<<47) + assert(quantized_multiplier >= 0); + assert(shift >= -31 && shift < 8); + assert(x >= -(static_cast(1) << 47) && + x < (static_cast(1) << 47)); + + int32_t reduced_multiplier = (quantized_multiplier < 0x7FFF0000) + ? ((quantized_multiplier + (1 << 15)) >> 16) + : 0x7FFF; + int total_shift = 15 - shift; + x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1)); + int32_t result = x >> total_shift; + return result; +} +#endif // TFLITE_SINGLE_ROUNDING + +} // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/common.h b/src/tensorflow/lite/kernels/internal/common.h index 1e241ca..2d91aa1 100644 --- a/src/tensorflow/lite/kernels/internal/common.h +++ b/src/tensorflow/lite/kernels/internal/common.h @@ -15,15 +15,23 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_ +#include +#include +#include +#include + +#include "tensorflow/lite/kernels/internal/runtime_shape.h" #ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK #ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK #define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK #endif #endif +#include #include -#include "fixedpoint/fixedpoint.h" +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" +#include "tensorflow/lite/core/macros.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/neon_check.h" #include "tensorflow/lite/kernels/internal/types.h" @@ -32,6 +40,117 @@ namespace tflite { constexpr int kReverseShift = -1; +// Reduces and compresses dimensions so that broadcast handling becomes more +// efficient. Returns true if the output shape is broadcastable; it doesn't +// contain any degenerate dimension, i.e. shape dimension = 0. False otherwise. +template +bool ReduceDimensionsForBroadcast(const RuntimeShape& input1_shape, + const RuntimeShape& input2_shape, + size_t* compressed_input1_stride, + size_t* compressed_input2_stride, + size_t* compressed_output_shape) { + size_t num_compressed_dims = 0; + size_t compressed_input1_shape[MAX_DIM]; + size_t compressed_input2_shape[MAX_DIM]; + std::fill(compressed_input1_shape, compressed_input1_shape + MAX_DIM, 1); + std::fill(compressed_input2_shape, compressed_input2_shape + MAX_DIM, 1); + std::fill(compressed_output_shape, compressed_output_shape + MAX_DIM, 1); + bool broadcast_input1 = false; + bool broadcast_input2 = false; + bool first_nonunit = true; + const size_t num_input1_dims = input1_shape.DimensionsCount(); + const size_t num_input2_dims = input2_shape.DimensionsCount(); + const int32_t* input1_dims = input1_shape.DimsData(); + const int32_t* input2_dims = input2_shape.DimsData(); + const size_t num_common_dims = std::min(num_input1_dims, num_input2_dims); + for (size_t i = 1; i <= num_common_dims; i++) { + const size_t input1_dim = input1_dims[num_input1_dims - i]; + const size_t input2_dim = input2_dims[num_input2_dims - i]; + if (input1_dim == 0 || input2_dim == 0) { + return false; + } + if (input1_dim == 1 && input2_dim == 1) { + continue; + } + assert(!broadcast_input1 || !broadcast_input2); + + if (input1_dim == 1) { + if (!broadcast_input1) { + broadcast_input1 = true; + broadcast_input2 = false; + num_compressed_dims++; + } + compressed_input2_shape[num_compressed_dims - 1] *= input2_dim; + compressed_output_shape[num_compressed_dims - 1] *= input2_dim; + } else if (input2_dim == 1) { + if (!broadcast_input2) { + broadcast_input1 = false; + broadcast_input2 = true; + num_compressed_dims++; + } + compressed_input1_shape[num_compressed_dims - 1] *= input1_dim; + compressed_output_shape[num_compressed_dims - 1] *= input1_dim; + } else { + TFLITE_DCHECK(input1_dim == input2_dim); + if (broadcast_input1 || broadcast_input2 || first_nonunit) { + broadcast_input1 = false; + broadcast_input2 = false; + num_compressed_dims++; + } + compressed_input1_shape[num_compressed_dims - 1] *= input1_dim; + compressed_input2_shape[num_compressed_dims - 1] *= input1_dim; + compressed_output_shape[num_compressed_dims - 1] *= input1_dim; + } + first_nonunit = false; + } + if (num_input1_dims > num_input2_dims) { + if (!broadcast_input2) { + num_compressed_dims++; + } + for (size_t i = 0; i < num_input1_dims - num_input2_dims; i++) { + const size_t input1_dim = input1_dims[i]; + if (input1_dim == 0) { + return false; + } + compressed_input1_shape[num_compressed_dims - 1] *= input1_dim; + compressed_output_shape[num_compressed_dims - 1] *= input1_dim; + } + } else if (num_input2_dims > num_input1_dims) { + if (!broadcast_input1) { + num_compressed_dims++; + } + for (size_t i = 0; i < num_input2_dims - num_input1_dims; i++) { + const size_t input2_dim = input2_dims[i]; + if (input2_dim == 0) { + return false; + } + compressed_input2_shape[num_compressed_dims - 1] *= input2_dim; + compressed_output_shape[num_compressed_dims - 1] *= input2_dim; + } + } + num_compressed_dims = (num_compressed_dims > 1) ? num_compressed_dims : 1; + + int input1_stride = 1; + int input2_stride = 1; + for (int i = 0; i < MAX_DIM; ++i) { + compressed_input1_stride[i] = input1_stride; + input1_stride *= compressed_input1_shape[i]; + compressed_input2_stride[i] = input2_stride; + input2_stride *= compressed_input2_shape[i]; + } + for (int i = 0; i < MAX_DIM; ++i) { + if (compressed_input1_shape[i] != compressed_input2_shape[i]) { + if (compressed_input1_shape[i] == 1) { + compressed_input1_stride[i] = 0; + } else { + TFLITE_DCHECK_EQ(compressed_input2_shape[i], 1); + compressed_input2_stride[i] = 0; + } + } + } + return true; +} + inline void GetActivationMinMax(FusedActivationFunctionType ac, float* output_activation_min, float* output_activation_max) { @@ -75,6 +194,7 @@ float ActivationFunction(float x) { inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, const float* bias_data, int array_size, float* array_data) { + if (bias_size == 0) return; // Note: see b/132215220: in May 2019 we thought it would be OK to replace // this with the Eigen one-liner: // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max). @@ -138,6 +258,60 @@ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, #endif } +TFLITE_NOINLINE int32_t MultiplyByQuantizedMultiplier( + int32_t x, int32_t quantized_multiplier, int shift); + +TFLITE_NOINLINE int32_t MultiplyByQuantizedMultiplier( + int64_t x, int32_t quantized_multiplier, int shift); + +// Single-rounding MultiplyByQuantizedMultiplier +#if TFLITE_SINGLE_ROUNDING +inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp( + int32_t x, int32_t quantized_multiplier, int shift) { + TFLITE_DCHECK_LE(shift, 0); + return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); +} + +inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne( + int32_t x, int32_t quantized_multiplier, int shift) { + TFLITE_DCHECK_GE(shift, 0); + return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); +} + +#ifdef USE_NEON +inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows( + int32x4x4_t input_val, int32_t quantized_multiplier, int shift) { + TFLITE_DCHECK(quantized_multiplier >= 0); + + const int right_shift = std::min(-1, shift); + const int left_shift = shift - right_shift; + + const int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier); + const int32x4_t left_shift_dup = vdupq_n_s32(left_shift); + const int32x4_t right_shift_dup = vdupq_n_s32(right_shift); + + int32x4x4_t result; + result.val[0] = vrshlq_s32( + vqdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup), multiplier_dup), + right_shift_dup); + + result.val[1] = vrshlq_s32( + vqdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup), multiplier_dup), + right_shift_dup); + + result.val[2] = vrshlq_s32( + vqdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup), multiplier_dup), + right_shift_dup); + + result.val[3] = vrshlq_s32( + vqdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup), multiplier_dup), + right_shift_dup); + + return result; +} +#endif // USE_NEON +// Double-rounding MultiplyByQuantizedMultiplier +#else inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp( int32_t x, int32_t quantized_multiplier, int left_shift) { using gemmlowp::RoundingDivideByPOT; @@ -153,39 +327,6 @@ inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne( quantized_multiplier); } -inline int32_t MultiplyByQuantizedMultiplier(int32_t x, - int32_t quantized_multiplier, - int shift) { - using gemmlowp::RoundingDivideByPOT; - using gemmlowp::SaturatingRoundingDoublingHighMul; - int left_shift = shift > 0 ? shift : 0; - int right_shift = shift > 0 ? 0 : -shift; - return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul( - x * (1 << left_shift), quantized_multiplier), - right_shift); -} - -inline int32_t MultiplyByQuantizedMultiplier(int64_t x, - int32_t quantized_multiplier, - int shift) { - // Inputs: - // - quantized_multiplier has fixed point at bit 31 - // - shift is -31 to +7 (negative for right shift) - // - // Assumptions: The following input ranges are assumed - // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1) - // - scaling is chosen so final scaled result fits in int32_t - // - input x is in the range -(1<<47) <= x < (1<<47) - assert(quantized_multiplier >= 0); - assert(shift >= -31 && shift < 8); - - int32_t reduced_multiplier = (quantized_multiplier + (1 << 15)) >> 16; - int total_shift = 15 - shift; - x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1)); - int32_t result = x >> total_shift; - return result; -} - #ifdef USE_NEON // Round uses ARM's rounding shift right. inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows( @@ -220,20 +361,23 @@ inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows( return result; } -#endif +#endif // USE_NEON +#endif // TFLITE_SINGLE_ROUNDING template int CountLeadingZeros(T integer_input) { static_assert(std::is_unsigned::value, "Only unsigned integer types handled."); -#if defined(__GNUC__) - return integer_input ? __builtin_clz(integer_input) - : std::numeric_limits::digits; -#else if (integer_input == 0) { return std::numeric_limits::digits; } - +#if defined(__GNUC__) + if (std::is_same::value) { + return __builtin_clz(integer_input); + } else if (std::is_same::value) { + return __builtin_clzll(integer_input); + } +#endif const T one_in_leading_positive = static_cast(1) << (std::numeric_limits::digits - 1); int leading_zeros = 0; @@ -242,7 +386,6 @@ int CountLeadingZeros(T integer_input) { ++leading_zeros; } return leading_zeros; -#endif } template @@ -275,79 +418,216 @@ inline Integer FloorLog2(Integer n) { } } -// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in -// softmax -// func - the function to build the LUT for (e.g exp(x)) -// min,max - table limits -// table - pointer to buffer -// num - number of elements in the LUT -inline void gen_lut(double (*func)(double), double min, double max, - int16_t* table, const int num) { - // size of table should equal to num + 1 - // last element only for slope calculation - double step = (max - min) / (num - 1); - double half_step = step / 2.0; - for (int i = 0; i < num - 1; i++) { - double sample_val = TfLiteRound(func(min + i * step) * 32768.0); - double midpoint_interp_val = - TfLiteRound((func(min + (i + 1) * step) * 32768.0 + - TfLiteRound(func(min + i * step) * 32768.0)) / - 2.0); - double midpoint_val = - TfLiteRound(func(min + i * step + half_step) * 32768.0); - double midpoint_err = midpoint_interp_val - midpoint_val; - double bias = TfLiteRound(midpoint_err / 2.0); - table[i] = std::min(std::max(sample_val - bias, -32768.0), 32767.0); +namespace detail { + +// LUTPopulate takes an optional type-erased transform_params to allow passing +// extra parameters to the transform function pointer. const void* is used +// instead of std::function to be compatible with TFLite Micro +template +inline typename std::enable_if::value, + FloatT>::type +LUTTransform(Func transform, const void* /*transform_params*/, FloatT value) { + static_assert(std::is_floating_point::value, + "FloatT must be a floating-point type."); + return transform(value); +} + +template +inline typename std::enable_if< + std::is_same::value, FloatT>::type +LUTTransform(Func transform, const void* transform_params, FloatT value) { + static_assert(std::is_floating_point::value, + "FloatT must be a floating-point type."); + return transform(value, transform_params); +} + +// Use the same LUT generation code for both uint8_t and int8_t. Int8_t indexes +// will be directly casted to uint8_t, the int8 LUT will thus be ordered as [0, +// 1, ..., 127, -128, ..., -2, -1] instead of [-128, -127, ..., -1, 0, 1, ..., +// 126, 127]. +template +inline void LUTPopulateInt8(float input_scale, int32_t input_zero_point, + float output_scale, int32_t output_zero_point, + Func transform, const void* transform_params, + T* lut) { + static_assert( + std::is_same::value || std::is_same::value, + "T must be an uint8 or int8 type."); + uint8_t* lut_uint8 = reinterpret_cast(lut); + const float inverse_scale = 1 / output_scale; + int32_t maxval = std::numeric_limits::max(); + int32_t minval = std::numeric_limits::min(); + for (int32_t val = minval; val <= maxval; ++val) { + const float dequantized = input_scale * (val - input_zero_point); + const float transformed = + LUTTransform(transform, transform_params, dequantized); + const float rescaled = TfLiteRound(transformed * inverse_scale); + const int32_t quantized = + static_cast(rescaled + output_zero_point); + lut_uint8[static_cast(static_cast(val))] = static_cast( + static_cast(std::max(std::min(maxval, quantized), minval))); } - table[num - 1] = - std::min(std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0); } -// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in -// softmax -// func - the function to build the LUT for (e.g exp(x)) -// min,max - table limits -// table - pointer to buffer -// num - number of elements in the LUT -inline void gen_lut(float (*func)(float), float min, float max, int16_t* table, - const int num) { - // size of table should equal to num + 1 - // last element only for slope calculation - float step = (max - min) / (num - 1); - float half_step = step / 2.0f; - for (int i = 0; i < num - 1; i++) { - float sample_val = TfLiteRound(func(min + i * step) * 32768.0f); - float midpoint_interp_val = - TfLiteRound((func(min + (i + 1) * step) * 32768.0f + - TfLiteRound(func(min + i * step) * 32768.0f)) / - 2.0f); - float midpoint_val = - TfLiteRound(func(min + i * step + half_step) * 32768.0f); - float midpoint_err = midpoint_interp_val - midpoint_val; - float bias = TfLiteRound(midpoint_err / 2.0f); - table[i] = std::min(std::max(sample_val - bias, -32768.0f), 32767.0f); +// Keep floating-point type configurable for backward compatibility. float +// should be used for FloatT by default. +template +inline void LUTPopulateInt16(FloatT input_scale, int32_t input_zero_point, + FloatT output_scale, int32_t output_zero_point, + Func transform, const void* transform_params, + int16_t* lut) { + static_assert(std::is_floating_point::value, + "FloatT must be a floating-point type."); + const FloatT input_min = + input_scale * (std::numeric_limits::min() - input_zero_point); + const FloatT input_max = + input_scale * (std::numeric_limits::max() - input_zero_point); + const FloatT output_min = + output_scale * (std::numeric_limits::min() - output_zero_point); + const FloatT output_max = + output_scale * (std::numeric_limits::max() - output_zero_point); + + const int nb_steps = 512; + const FloatT step = (input_max - input_min) / nb_steps; + const FloatT half_step = step / 2; + const FloatT output_scaling_inv = + static_cast(std::numeric_limits::max() - + std::numeric_limits::min() + 1) / + (output_max - output_min); + const FloatT table_min = + static_cast(std::numeric_limits::min()); + const FloatT table_max = + static_cast(std::numeric_limits::max()); + + for (int i = 0; i < nb_steps; i++) { + const FloatT val = + LUTTransform(transform, transform_params, input_min + i * step); + const FloatT val_midpoint = LUTTransform( + transform, transform_params, input_min + i * step + half_step); + const FloatT val_next = LUTTransform(transform, transform_params, + input_min + (i + 1) * step); + + const FloatT sample_val = TfLiteRound(val * output_scaling_inv); + const FloatT midpoint_interp_val = + TfLiteRound((val_next * output_scaling_inv + + TfLiteRound(val * output_scaling_inv)) / + 2); + const FloatT midpoint_val = TfLiteRound(val_midpoint * output_scaling_inv); + const FloatT midpoint_err = midpoint_interp_val - midpoint_val; + const FloatT bias = TfLiteRound(midpoint_err / 2); + + lut[i] = static_cast(std::min( + std::max(sample_val - bias, table_min), table_max)); } - table[num - 1] = std::min( - std::max(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f); + + lut[nb_steps] = static_cast(std::min( + std::max(TfLiteRound(LUTTransform( + transform, transform_params, input_max) * + output_scaling_inv), + table_min), + table_max)); } -// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax -inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) { - // 512 base value, lut[513] only for calculate slope - uint16_t index = static_cast(256 + (value >> 7)); +} // namespace detail + +template +inline typename std::enable_if::value || + std::is_same::value, + void>::type +LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale, + int32_t output_zero_point, float (*transform)(float), T* lut) { + detail::LUTPopulateInt8(input_scale, input_zero_point, output_scale, + output_zero_point, transform, nullptr, lut); +} + +template +inline typename std::enable_if::value || + std::is_same::value, + void>::type +LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale, + int32_t output_zero_point, float (*transform)(float, const void*), + const void* transform_params, T* lut) { + detail::LUTPopulateInt8(input_scale, input_zero_point, output_scale, + output_zero_point, transform, transform_params, lut); +} + +template +inline typename std::enable_if::value, void>::type +LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale, + int32_t output_zero_point, float (*transform)(float), T* lut) { + detail::LUTPopulateInt16(input_scale, input_zero_point, output_scale, + output_zero_point, transform, nullptr, lut); +} + +template +inline typename std::enable_if::value, void>::type +LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale, + int32_t output_zero_point, float (*transform)(float, const void*), + const void* transform_params, T* lut) { + detail::LUTPopulateInt16(input_scale, input_zero_point, output_scale, + output_zero_point, transform, + transform_params, lut); +} + +// Deprecated, avoid usage and prefer the float version. Kept for +// backward-compatiblity. +template +inline typename std::enable_if::value, void>::type +LUTPopulate(double input_scale, int32_t input_zero_point, double output_scale, + int32_t output_zero_point, double (*transform)(double), T* lut) { + detail::LUTPopulateInt16(input_scale, input_zero_point, output_scale, + output_zero_point, transform, nullptr, lut); +} + +// The size of the LUT depends on the type of input. For uint8 and int8 inputs a +// simple 256 entries LUT is used. For int16 inputs the high 9 bits are used for +// indexing and the 7 remaining bits are used for interpolation. We thus use a +// 513-entries LUT for int16 cases, 512 for the 9-bit indexing and 1 extra entry +// to interpolate the last value. +template +constexpr int LUTSize() { + static_assert(std::is_same::value || + std::is_same::value || + std::is_same::value, + "Only LUTs with uint8, int8 or int16 inputs are supported."); + // As per c++11: constexpr methods cannot have more than one return statement. + return (std::is_same::value || std::is_same::value) + ? 256 + : 513; +} + +// int16_t -> int16_t table lookup with interpolation +// LUT must have 513 values +inline int16_t LUTLookup(int16_t value, const int16_t* lut) { + // 512 base values, lut[513] is only used to calculate the slope + const uint16_t index = static_cast(256 + (value >> 7)); assert(index < 512 && "LUT index out of range."); - int16_t offset = value & 0x7f; + const int16_t offset = value & 0x7f; - // base and slope are Q0.15 - int16_t base = lut[index]; - int16_t slope = lut[index + 1] - lut[index]; + // Base and slope are Q0.x + const int16_t base = lut[index]; + const int16_t slope = lut[index + 1] - lut[index]; - // Q0.15 * Q0.7 = Q0.22 - // Round and convert from Q0.22 to Q0.15 - int32_t delta = (static_cast(slope) * offset + 64) >> 7; + // Q0.x * Q0.7 = Q0.(x + 7) + // Round and convert from Q0.(x + 7) to Q0.x + const int delta = (slope * offset + 64) >> 7; // Q0.15 + Q0.15 - return base + delta; + return static_cast(base + delta); +} + +// int8_t -> int8_t table lookup without interpolation +// LUT must have 256 values +// LUTPopulate has ordered the LUT so that indexing it with an +// int8_t is just done by casting it to an uint8_t. +inline int8_t LUTLookup(int8_t value, const int8_t* lut) { + return lut[static_cast(value)]; +} + +// uint8_t -> uint8_t table lookup without interpolation +// LUT must have 256 values +inline uint8_t LUTLookup(uint8_t value, const uint8_t* lut) { + return lut[value]; } // Table of sigmoid(i/24) at 0.16 format - 256 elements. @@ -569,7 +849,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl( // InputIntegerBits - z_b_headroom - 0.25); const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp( FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam( - InputIntegerBits - z_a_headroom_plus_1, 31 - kAccumIntegerBits)), + static_cast(InputIntegerBits - z_a_headroom_plus_1), + 31 - kAccumIntegerBits)), shifted_quarter); // z_b is treated like z_a, but premultiplying by sqrt(0.5). @@ -579,7 +860,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl( SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom); const FixedPointAccum z_b_pow_2_adj = SaturatingSub( FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam( - InputIntegerBits - z_b_headroom, 31 - kAccumIntegerBits)), + static_cast(InputIntegerBits - z_b_headroom), + 31 - kAccumIntegerBits)), shifted_quarter); const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw)); @@ -798,8 +1080,8 @@ inline void NdArrayDescsForElementwiseBroadcast(const Dims& input0_dims, // Copies dims to desc, calculating strides. template -inline void CopyDimsToDesc(const RuntimeShape& input_shape, - NdArrayDesc* desc_out) { +TFLITE_NOINLINE void CopyDimsToDesc(const RuntimeShape& input_shape, + NdArrayDesc* desc_out) { int desc_stride = 1; for (int i = N - 1; i >= 0; --i) { desc_out->extents[i] = input_shape.Dims(i); diff --git a/src/tensorflow/lite/kernels/internal/compatibility.h b/src/tensorflow/lite/kernels/internal/compatibility.h index 61becad..0d7f9d6 100644 --- a/src/tensorflow/lite/kernels/internal/compatibility.h +++ b/src/tensorflow/lite/kernels/internal/compatibility.h @@ -76,7 +76,7 @@ limitations under the License. #define TFLITE_CHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ABORT #endif -#ifndef TF_LITE_STATIC_MEMORY +#ifndef ARDUINO // TODO(b/162019032): Consider removing these type-aliases. using int8 = std::int8_t; using uint8 = std::uint8_t; @@ -84,7 +84,17 @@ using int16 = std::int16_t; using uint16 = std::uint16_t; using int32 = std::int32_t; using uint32 = std::uint32_t; -#endif // !defined(TF_LITE_STATIC_MEMORY) +#endif // !defined(ARDUINO) + +// Allow for cross-compiler usage of function signatures - currently used for +// specifying named RUY profiler regions in templated methods. +#if defined(_MSC_VER) +#define TFLITE_PRETTY_FUNCTION __FUNCSIG__ +#elif defined(__GNUC__) +#define TFLITE_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#define TFLITE_PRETTY_FUNCTION __func__ +#endif // TFLITE_DEPRECATED() // diff --git a/src/tensorflow/lite/kernels/internal/cppmath.h b/src/tensorflow/lite/kernels/internal/cppmath.h index 24a3aec..430c0ad 100644 --- a/src/tensorflow/lite/kernels/internal/cppmath.h +++ b/src/tensorflow/lite/kernels/internal/cppmath.h @@ -19,9 +19,8 @@ limitations under the License. namespace tflite { -#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ - (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \ - defined(__ZEPHYR__) +#if defined(ARDUINO) || \ + (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(__ZEPHYR__) #define TF_LITE_GLOBAL_STD_PREFIX #else #define TF_LITE_GLOBAL_STD_PREFIX std @@ -33,7 +32,8 @@ namespace tflite { return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \ } -DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); +DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round) +DECLARE_STD_GLOBAL_SWITCH1(TfLiteExpm1, expm1) } // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/optimized/neon_check.h b/src/tensorflow/lite/kernels/internal/optimized/neon_check.h index bbf745c..7df1129 100644 --- a/src/tensorflow/lite/kernels/internal/optimized/neon_check.h +++ b/src/tensorflow/lite/kernels/internal/optimized/neon_check.h @@ -15,26 +15,6 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ -#if defined(__ARM_NEON__) || defined(__ARM_NEON) -#define USE_NEON -#include -#endif - -#if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON -#define USE_NEON -#include "NEON_2_SSE.h" -#endif - -// NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is -// defined, PortableSomeFunc(args) otherwise. -#ifdef USE_NEON -// Always use Neon code -#define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) - -#else -// No NEON available: Use Portable code -#define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) - -#endif // defined(USE_NEON) +// TFLM does not need to utilize any Neon optimizations. #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ diff --git a/src/tensorflow/lite/kernels/internal/portable_tensor.h b/src/tensorflow/lite/kernels/internal/portable_tensor.h index 8b0f6d1..a9f9551 100644 --- a/src/tensorflow/lite/kernels/internal/portable_tensor.h +++ b/src/tensorflow/lite/kernels/internal/portable_tensor.h @@ -15,19 +15,15 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_ -#include +#include #include -#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { -inline RuntimeShape GetTensorShape(std::vector data) { - return RuntimeShape(data.size(), data.data()); -} - // A list of tensors in a format that can be used by kernels like split and // concatenation. template @@ -55,6 +51,26 @@ class VectorOfTensors { all_shape_ptr_.push_back(&all_shape_[i]); } } + + explicit VectorOfTensors(const std::vector& tensors) { + int num_tensors = tensors.size(); + + all_data_.reserve(num_tensors); + all_shape_.reserve(num_tensors); + all_shape_ptr_.reserve(num_tensors); + + for (auto* t : tensors) { + all_data_.push_back(GetTensorData(t)); + all_shape_.push_back(GetTensorShape(t)); + } + + // Taking the pointer from inside a std::vector is only OK if the vector is + // never modified, so we populate all_shape in the previous loop and then we + // are free to grab iterators here. + for (int i = 0; i < num_tensors; ++i) { + all_shape_ptr_.push_back(&all_shape_[i]); + } + } // Return a pointer to the data pointers of all tensors in the list. For // example: // float* const* f = v.data(); @@ -67,6 +83,8 @@ class VectorOfTensors { // dims[1] are the dimensions of the second tensor in the list. const RuntimeShape* const* shapes() const { return all_shape_ptr_.data(); } + size_t size() const { return all_data_.size(); } + private: std::vector all_data_; std::vector all_shape_; diff --git a/src/tensorflow/lite/kernels/internal/portable_tensor_utils.cpp b/src/tensorflow/lite/kernels/internal/portable_tensor_utils.cpp new file mode 100644 index 0000000..577fc6b --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/portable_tensor_utils.cpp @@ -0,0 +1,98 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_UTILS_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_UTILS_H_ + +#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" + +#include +#include +#include + +#include "tensorflow/lite/core/c/builtin_op_data.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. Use of CpuBackendContext in method +// implementations is purely optional. +class CpuBackendContext; + +namespace tensor_utils { + +// Apply Rectified Linear to elements of a vector. +void ApplyReluToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::max(0.0f, vector[v]); + } +} + +// Apply Rectified Linear 1 (cap to [-1;1]) to elements of a vector +void ApplyRelu1ToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::max(-1.0f, std::min(vector[v], 1.0f)); + } +} + +// Apply Rectified Linear 6 (cap to [0;6]) to elements of a vector +void ApplyRelu6ToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::max(0.0f, std::min(vector[v], 6.0f)); + } +} + +// Apply signbit to elements of a vector +void ApplySignbitToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::signbit(vector[v]); + } +} + +void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements, + int8_t* dst_buffer) { + // num_elements means the number of elements regardless of packed or unpacked. + // For example, 3 elements means both + // 1) Packed: 3 int4's = 12 bit -> 16 bits (padded) = 2 bytes. + // stored in src_buffer[0] and src_buffer[1] (i = 0..1) + // 2) Unpacked: 3 int8's = 3 bytes. + //. stored in dst_buffer[0], dst_buffer[1] and dst_buffer[2] (j = 0..2) + for (int i = 0; i < num_elements / 2; i++) { + int8_t byte = src_buffer[i]; + // Shift left first so that sign is properly extended when shifted right + int8_t lower = static_cast(byte << 4) >> 4; + int8_t higher = byte >> 4; + dst_buffer[2 * i] = lower; + dst_buffer[2 * i + 1] = higher; + } + + // If the buffer size is odd, extract the final lower nibble. + if (num_elements % 2 != 0) { + dst_buffer[num_elements - 1] = + static_cast(src_buffer[num_elements / 2] << 4) >> 4; + } +} + +} // namespace tensor_utils +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_UTILS_H_ diff --git a/src/tensorflow/lite/kernels/internal/portable_tensor_utils.h b/src/tensorflow/lite/kernels/internal/portable_tensor_utils.h new file mode 100644 index 0000000..ed59fd0 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/portable_tensor_utils.h @@ -0,0 +1,624 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_ + +#include +#include +#include + +#include "tensorflow/lite/core/c/builtin_op_data.h" +#include "tensorflow/lite/core/c/common.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. Use of CpuBackendContext in method +// implementations is purely optional. +class CpuBackendContext; + +namespace tensor_utils { + +// Multiplies a matrix with a scalar and reduce the result on each row to a +// scalar. +// Parameters: +// - matrix: matrix of size n_row * n_col +// - scalar: the scalar that is multiplied to each element in the matrix +// - n_row: the row count of the matrix +// - n_col: the column count of the matrix +// - output: the 32bit output +// Note: We do not need saturation because the int8 * int8 is safe from overflow +// in (2^31-1) / (2^14) = 131072, which is bigger than the n_row. Non-zero +// initial output value is not exceptionally large. +void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar, + int32_t n_row, int32_t n_col, + int32_t* output); + +// Add another vector for each batch in the batch vector. +template +void VectorBatchVectorAdd(const T* vector, int v_size, int n_batch, + T* batch_vector) { + for (int b = 0; b < n_batch; b++) { + for (int i = 0; i < v_size; ++i) { + batch_vector[i] += vector[i]; + } + batch_vector += v_size; + } +} + +// Cwise product of two vectors. +template +inline void VectorVectorCwiseProduct(const T* vector1, const T* vector2, + int v_size, T* result) { + for (int v = 0; v < v_size; v++) { + *result++ = *vector1++ * *vector2++; + } +} + +// Cwise product of a vector and a batch-vector. +template +inline void VectorBatchVectorCwiseProduct(const T* vector, int v_size, + const T* batch_vector, int n_batch, + T* result) { + for (int b = 0; b < n_batch; b++) { + VectorVectorCwiseProduct(vector, batch_vector, v_size, result); + // Update the pointers. + result += v_size; + batch_vector += v_size; + } +} + +// Cwise product and accumulate of two vectors. Since it's a MAC operation, the +// assumption here is that result array is initialized to valid values. +template +inline void VectorVectorCwiseProductAccumulate(const T* __restrict__ vector1, + const T* __restrict__ vector2, + int v_size, + T* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + *result++ += *vector1++ * *vector2++; + } +} + +// Cwise product and accumulate of a vector and a batch-vector. Since it's a MAC +// operation, the assumption here is that result array is initialized to valid +// values. +template +inline void VectorBatchVectorCwiseProductAccumulate(const T* vector, int v_size, + const T* batch_vector, + int n_batch, T* result) { + for (int b = 0; b < n_batch; b++) { + VectorVectorCwiseProductAccumulate(vector, batch_vector, v_size, result); + // Update the pointers. + result += v_size; + batch_vector += v_size; + } +} + +// Batch vector initialization with another vector. +template +void VectorBatchVectorAssign(const T* vector, int v_size, int n_batch, + T* batch_vector) { + for (int b = 0; b < n_batch; b++) { + std::copy_n(vector, v_size, batch_vector + b * v_size); + } +} + +// Checks if all entries of vector are zero for float. +bool IsZeroVector(const float* vector, int v_size); + +// Checks if all entries of vector are zero for int8. +bool IsZeroVector(const int8_t* vector, int v_size); + +// Quantizes a buffer of floating point values using a symmetric quantization +// (i.e. linear quantization without an offset) to 8-bit signed integers. +// It also outputs the range (min, max) of the floating point buffer, and the +// scaling factor used to quantize the values. +void SymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* min_value, + float* max_value, float* scaling_factor); + +// Quantizes a buffer of floating point values using a symmetric quantization +// (i.e. linear quantization without an offset) to 8-bit signed integers. +// It uses the range (min, max) provided to the function to calculate the +// appropriate scaling factor to quantize the values. +void SymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float min_value, + float max_value, float* scaling_factor); + +void AsymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* scaling_factor, + int32_t* offset); + +// Helper function to quantize floats. +// float_data_ptr input float vectors +// n_batch number of input vectors +// n_data size of a single input vector +// quantized_data_ptr (out) vector with quantized data +// scaling_factors (out) scaling factors (one per vector) +// zero_points (out) zero points (one per vector) +// do_asymmetric controls if the quantization should be asymmetric. +inline void BatchQuantizeFloats(const float* float_data_ptr, int n_batch, + int n_data, int8_t* quantized_data_ptr, + float* scaling_factors, int32_t* zero_points, + bool do_asymmetric) { + for (int b = 0; b < n_batch; ++b) { + const int offset = b * n_data; + if (do_asymmetric) { + tensor_utils::AsymmetricQuantizeFloats( + float_data_ptr + offset, n_data, quantized_data_ptr + offset, + &scaling_factors[b], &zero_points[b]); + } else { + float unused_min, unused_max; + tensor_utils::SymmetricQuantizeFloats( + float_data_ptr + offset, n_data, quantized_data_ptr + offset, + &unused_min, &unused_max, &scaling_factors[b]); + } + } +} + +// Multiplies a matrix by a "batched" vector (i.e. a matrix with a batch +// dimension composed by input vectors independent from each other). The result +// of the multiplication is accumulated to the passed result buffer. +// More specifically, for a matrix M of shape [n, i] and a batched-vector +// of shape [i, batch] it will first compute the product of shape [n, batch]. +// This product will be accumulated to the result buffer. +void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, + int m_cols, const float* vector, + int n_batch, float* result); + +// Same as the function above, but the matrix is a sparse tensor with block +// pattern 1x4. +// This function assumes that m_cols is a multiple of the block size (4 in this +// case) so that there's no incomplete block. +void SparseMatrixBatchVectorMultiplyAccumulate1x4( + const float* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const float* __restrict__ vector, int n_batch, float* __restrict__ result); + +// Same as the function above, but the matrix is stored in block compressed +// sparse row format with block pattern 1x16 which consists of two arrays: +// 1. A matrix array stores non-zero blocks of the matrix in row major. +// 2. A ledger array stores nrows groups, one group per row. Each group starts +// with an integer representing the number of non-zero blocks for the +// corresponding row and follows with column indexes of the first element +// of each non-zero block. +// This function assumes that +// 1. m_cols is a multiple of 16 so that all blocks are full blocks. +// 2. m_cols < 254 * 16 so that block index can be represented by uint8. +void SparseMatrixBatchVectorMultiplyAccumulate( + const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, + int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, + float* __restrict__ result); + +// Same as the function above, but for values quantized using symmetric +// quantization (e.g. by calling SymmetricQuantizeFloats). +// The passed scaling factors is a buffer of the quantization scaling factors +// that will be used to dequentize the products into the final result buffer. +// These scaling factors are the multiplication of the matrix scaling factor +// by the vector's scaling factor, one per batch (i.e. this allows quantizing +// each batch in the batch-vector matrix independently). +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, + const float* __restrict__ scaling_factors, int n_batch, + float* __restrict__ result); + +// Same as the function above except that vector values +// are quantized with asymmetric quantization per-batch and the matrix +// is quantized per row. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, + const float* __restrict__ scaling_factors, int n_batch, + float* __restrict__ result, const float* __restrict__ per_channel_scale, + const int32_t* __restrict__ input_offset); + +// Same as the function above, but the matrix is a sparse tensor with block +// pattern 1x16. +// This function assumes that m_cols is a multiple of the block size (16 in this +// case) so that there's no incomplete block. Also, it assumes all offsets of +// input, output and filter are zero. +void SparseMatrixBatchVectorMultiplyAccumulate1x16( + const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, + int n_batch, const int32_t input_offset, const int32_t output_multiplier, + int32_t output_shift, const int32_t* per_channel_scale, + const int32_t* per_channel_shift, int32_t output_offset, + const int32_t output_activation_min, const int32_t output_activation_max, + int8_t* __restrict__ result); + +// Same as the function above, but the matrix is stored in block compressed +// sparse row format with block pattern 1x16 which consists of two arrays: +// 1. A matrix array stores non-zero blocks of the matrix in row major. +// 2. A ledger array stores nrows groups, one group per row. Each group starts +// with an integer representing the number of non-zero blocks for the +// corresponding row followed by column index of the first element of +// each non-zero block. +// This function assumes that +// 1. m_cols is a multiple of 16 so that all blocks are full blocks. +// 2. m_cols < 254 * 16 so that block index can be represented by uint8. +void SparseMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const uint8_t* __restrict__ ledger, + const int m_rows, const int m_cols, const int8_t* __restrict__ vectors, + const float* __restrict__ scaling_factors, int n_batch, + float* __restrict__ result, const float* per_channel_scale = nullptr); + +// Same as the above 8, 8, 8 integer matmul except for the presence of zero +// point and non-accumulative. +// TODO(b/148688698): remove this function by folding zero point calculation in +// prepare() function. +void MatrixBatchVectorMultiply(const int8_t* input, int32_t input_zeropoint, + const int8_t* input_to_gate_weights, + int32_t input_to_gate_effective_scale_a, + int32_t input_to_gate_effective_scale_b, + int32_t n_batch, int32_t n_input, int32_t n_cell, + int8_t* gate_output, int8_t gate_output_zp); + +// Same as above but has 16 bit and 8 bit input and 8 bit output. +// Used in projection when hidden is 16bit. +void MatrixBatchVectorMultiply(const int16_t* hidden, + const int8_t* hidden_to_output_weights, + int32_t proj_effective_scale_a, + int32_t proj_effective_scale_b, + const int32_t* gate_bias, int32_t n_batch, + int32_t n_hidden, int32_t n_output, + int32_t output_zp, int8_t* proj_output); + +// Apply Layer Normalization (https://arxiv.org/abs/1607.06450) to a Quantized +// vector. +// Parameters: +// - input: batch vector of size n_batch * n_input; 16 bit. +// - layer_norm_weights: the quantized layer normalization weights. +// - bias: the bias for the layer normalization. +// - layer_norm_scale_a: multiplier for scale factor. +// - layer_norm_scale_b: shift for scale factor. +// - variance_limit: the guard to make sure the inverse does not overflow. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 16 bit output +void ApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights, + const int32_t* bias, int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, int32_t variance_limit, + int n_batch, int n_input, int16_t* output); + +// Same as above but the internal calculation is done in float. +void ApplyLayerNormFloat(const int16_t* input, + const int16_t* layer_norm_weights, + int32_t layer_norm_scale_a, int32_t layer_norm_scale_b, + const int32_t* bias, int n_batch, int n_input, + int16_t* output); + +// Apply Sigmoid to a quantized vector. +// Parameters: +// - input: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 16 bit output +// The input is in Q3.12 format and the output is in Q0.15 format. +void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input, + int16_t* output); + +// Same as above but the internal calculation is float. +void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input, + int16_t* output); + +// Apply Tanh to a quantized vector. +// Parameters: +// - integer_bits: the integer bits of the input. +// Currently supports 0, 1, 2, 3, 4, 5, 6. +// - input: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 16 bit output +// The input is in Qm.15-m format and the output is in Q0.15 format. +void ApplyTanh(int32_t intger_bits, const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output); + +// Apply Tanh to a quantized vector. The internal calculation is in float. +// - Input has 2^(integer_bits) as scale. +// - Output has Q0.15 as scale. +void ApplyTanhFloat(const int16_t* input, int32_t n_batch, int32_t n_input, + int32_t integer_bits, int16_t* output); + +// Element-wise multiplication of two quantized vectors. +// Parameters: +// - input_1: batch vector of size n_batch * n_input; 16 bit. +// - input_2: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - shift: the shift needed to produce the output. +// - output: the 16 bit output of size n_batch * n_input. +// Output does not need to be initialized. +void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int shift, int16_t* output); + +// Element-wise multiplication of two quantized vectors. +// Parameters: +// - input_1: batch vector of size n_batch * n_input; 16 bit. +// - input_2: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - shift: the shift needed to produce the output. +// - output: the 8 bit output of size n_batch * n_input. +// Output does not need to be initialized. +void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int shift, int8_t* output); + +// Element-wise multiplication of two quantized vectors with rescaling. +// Parameters: +// - input_1: batch vector of size n_batch * n_input; 16 bit. +// - input_2: batch vector of size n_batch * n_input; 16 bit. +// - multiplier: the multiplier part of scale. +// - shift: the shift part of scale. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 8 bit output of size n_batch * n_input. +// - output_zp: the zero point of output. +// Output does not need to be initialized. +// Multiplier ("m") and shift ("s") are connected to scale ("s") with s = m * +// 2^(s - 31). +void CwiseMul(const int16_t* input_1, const int16_t* input_2, + int32_t multiplier, int32_t shift, int32_t n_batch, + int32_t n_input, int32_t output_zp, int8_t* output); + +// Element-wise saturating addition of two quantized vectors without rescaling. +// Parameters: +// - input_1: batch vector of size n_batch * n_input; 16 bit. +// - input_2: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 8 bit output of size n_batch * n_input. +// Output does not need to be initialized. +void CwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int16_t* output); + +// Element-wise in-place clipping of a vector. Overloaded for float, int16_t, +// int8_t. Parameters: +// - vector: vector of size v_size. +// - v_size: the size of the vector. +// - clipping_value: the value used for clipping. +void CwiseClipping(float* vector, const int v_size, const float clipping_value); +void CwiseClipping(int16_t* vector, const int v_size, + const int16_t clipping_value); +void CwiseClipping(int8_t* vector, const int v_size, + const int8_t clipping_value); + +// Dot product of two vectors. +float VectorVectorDotProduct(const float* vector1, const float* vector2, + int v_size); + +// Dot product of two batch vectors of size n_batch * v_size: +// vector1 = [x_1_1, x_1_2, ..., x_1_vsize, +// x_2_1, x_2_2, ..., x_2_vsize, +// ... +// x_nbatch_1,..., x_nbatch_vsize] +// vector2 = [y_1_1, y_1_2, ..., y_1_vsize, +// y_2_1, y_2_2, ..., y_2_vsize, +// ... +// y_nbatch_1,..., y_nbatch_vsize] +// Then result will be a vector of n_batch size starting from 'result': +// [x_1_1 * y_1_1 + x_1_2 * y_1_2 + ... + x_1_vsize * y_1_vsize, +// x_2_1 * y_2_1 + x_2_2 * y_2_2 + ... + x_2_vsize * y_2_vsize, +// ... +// x_nbatch_1 * y_nbatch_1 + ... + x_nbatch_vsize * y_nbatch_vsize] +template +inline void BatchVectorBatchVectorDotProduct(const T* vector1, const T* vector2, + int v_size, int n_batch, + T* result) { + for (int b = 0; b < n_batch; b++) { + result[b] = VectorVectorDotProduct(vector1, vector2, v_size); + vector1 += v_size; + vector2 += v_size; + } +} + +// Same as above but input is 16bit and output is 32bit. +void BatchVectorBatchVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, int v_size, + int n_batch, int32_t* result); + +// Same as above, but inputs are 16bit integer and output is 16bit integer. +void VectorBatchVectorCwiseProductAccumulate(const int16_t* vector, int v_size, + const int16_t* batch_vector, + int n_batch, int32_t multiplier, + int shift, int16_t* result); + +// Compute "1.0f - elements of vector" (used in CIFG). +void Sub1Vector(const float* vector, int v_size, float* result); + +// Compute "1.0f - elements of vector" (used in CIFG) for int16 input. +// "vector" has range [0, 32767] because it is the output of sigmoid function. +void Sub1Vector(const int16_t* vector, int v_size, int16_t* result); + +// Reduce-sum on a float input vector: +// input_vector: float pointer to input vector. +// output_vector: float pointer to vector. +// output_size: output vector size. +// reduction_size: number of consecutive elements from input vector which are +// added to get one element of output. +void ReductionSumVector(const float* input_vector, float* output_vector, + int output_size, int reduction_size); + +// Same as above but input/output is 32 bit integer. +void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, + int output_size, int reduction_size); + +// Same as above but input is 8 bit integer. +void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, + int output_size, int reduction_size); + +// Multiply all elements of vector with a scalar. +void VectorScalarMultiply(const int8_t* vector, int v_size, float scale, + float* result); + +// Layer norm for each batch. +void MeanStddevNormalization(const float* input_vector, float* output_vector, + int v_size, int n_batch); + +// Saturate Add with rescale on both inputs. +void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, + const int8_t* recurrent, int8_t recurrent_zp, + int32_t input_effective_scale_a, + int32_t input_effective_scale_b, + int32_t recurrent_effective_scale_a, + int32_t recurrent_effective_scale_b, int32_t n_batch, + int32_t n_cell, int16_t* output); + +// Same as the function above, but provide a scratch buffer for the +// int8 x int8 -> int32 and a CpuBackendContext for the accumulator +// computation. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, + const float* __restrict__ scaling_factors, int n_batch, + int32_t* __restrict__ scratch, float* __restrict__ result, + CpuBackendContext* __restrict__ context); + +// Same as the function above except that can make use of cached row sums. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, CpuBackendContext* context); + +// Same as the function above, but provides separate scaling factor for the +// matrix and the vectors. The scaling factors are multiplied in the +// scaling_factor_scratch buffer. +inline void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float matrix_scaling_factor, + const float* vector_scaling_factors, int n_batch, + float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, float* scaling_factor_scratch, + CpuBackendContext* context) { + for (int b = 0; b < n_batch; ++b) { + scaling_factor_scratch[b] = + vector_scaling_factors[b] * matrix_scaling_factor; + } + MatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vectors, + scaling_factor_scratch, n_batch, result, + per_channel_scale, input_offset, scratch, + row_sums, compute_row_sums, context); +} + +// Multiplies a matrix by a "batched" vector (i.e. a matrix with a batch +// dimension composed by input vectors independent from each other). The result +// of the multiplication is accumulated to the passed result buffer. +// More specifically, for a matrix M of shape [n, i] and a batched-vector +// of shape [i, batch] it will first compute the product of shape [n, batch]. +// This product will be accumulated to the result buffer, +// Parameters: +// - input: batch vector of size n_batch * n_input +// - bias: vector of size b_input +// - input_to_gate_weights: matrix of size n_input * n_output +// - multiplier: scalar +// - shift: scalar +// - n_batch: the batch size +// - n_input: the input size +// - n_output: the output size +// - output_zp: the zero point of the output. +// - scratch: batch vector of size n_batch * n_output +// - output: the 16 bit output +// Notes: +// - this is used for gate matmul: for non-cifg it is for input, forget, +// cell, output gates; for cifg, it is for forget, cell, output gates. +// - multiplier and shift combined gives the scale. +// - assumes input zero point is 0. +// - scratch is created for optimization purpose only. +// TODO(b/152066492): this can be removed if some future optimization +// work makes it unnecessary. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int16_t* output, CpuBackendContext* context); + +// Multiplies a matrix by a "batched" vector (i.e. a matrix with a batch +// dimension composed by input vectors independent from each other). The result +// of the multiplication is accumulated to the passed result buffer. +// More specifically, for a matrix M of shape [n, i] and a batched-vector +// of shape [i, batch] it will first compute the product of shape [n, batch]. +// This product will be accumulated to the result buffer, +// Parameters: +// - input: batch vector of size n_batch * n_input +// - bias: vector of size b_input +// - input_to_gate_weights: matrix of size n_input * n_output +// - multiplier: scalar +// - shift: scalar +// - n_batch: the batch size +// - n_input: the input size +// - n_output: the output size +// - output_zp: the zero point of the output. +// - scratch: batch vector of size n_batch * n_output +// - output: the 8 bit output +// Notes: +// - this is used for projection matmul. +// - multiplier and shift combined gives the scale. +// - assumes input zero point is 0. +// - scratch is created for optimization purpose only. +// TODO(b/152066492): this can be removed if some future optimization +// work makes it unnecessary. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int8_t* output, CpuBackendContext* context); + +// Apply Rectified Linear to elements of a vector. +void ApplyReluToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result); + +// Apply Rectified Linear 1 (cap to [-1;1]) to elements of a vector +void ApplyRelu1ToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result); + +// Apply Rectified Linear 6 (cap to [0;6]) to elements of a vector +void ApplyRelu6ToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result); + +// Apply signbit to elements of a vector +void ApplySignbitToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result); + +// Unpack or inflate `src_buffer` by taking each element and splitting it as +// two elements into `dst_buffer`. +// Parameters: +// src_buffer : Densely packed buffer containing int4 values +// num_elements : Number of elements stored in the buffer. Note that this can +// be smaller than the size of `src_buffer` by 1 if it's odd, +// in which case the last nibble in `src_buffer` is ignored. +// This should be equal to the size of `dst_buffer`. +// dst_buffer : Buffer to unpack into. Should be allocated by the caller. +// Size should be at least `num_elements`. +// Notes: +// For example, given `src_buffer = {0x12, 0x34};`, calling this function +// will return `dst_buffer = {0x02, 0x01, 0x04, 0x03}`. +void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements, + int8_t* dst_buffer); + +} // namespace tensor_utils + +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_ diff --git a/src/tensorflow/lite/kernels/internal/quantization_util.cpp b/src/tensorflow/lite/kernels/internal/quantization_util.cpp index cf431cf..62045d6 100644 --- a/src/tensorflow/lite/kernels/internal/quantization_util.cpp +++ b/src/tensorflow/lite/kernels/internal/quantization_util.cpp @@ -52,6 +52,11 @@ constexpr uint32_t kFractionRoundingThreshold = 0x00200000; void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, int* shift) { +#if TFLITE_SINGLE_ROUNDING + // Single-rounding MultiplyByQuantizedMultiplier only supports positive + // multipliers. + // TFLITE_DCHECK(double_multiplier >= 0); +#endif if (double_multiplier == 0.) { *quantized_multiplier = 0; *shift = 0; @@ -65,10 +70,10 @@ void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, int64_t q_fixed = IntegerFrExp(double_multiplier, shift); #else // TFLITE_EMULATE_FLOAT const double q = std::frexp(double_multiplier, shift); - auto q_fixed = static_cast(TfLiteRound(q * (1ll << 31))); + auto q_fixed = static_cast(TfLiteRound(q * (1LL << 31))); #endif // TFLITE_EMULATE_FLOAT - TFLITE_CHECK(q_fixed <= (1ll << 31)); - if (q_fixed == (1ll << 31)) { + TFLITE_CHECK(q_fixed <= (1LL << 31)); + if (q_fixed == (1LL << 31)) { q_fixed /= 2; ++*shift; } @@ -87,6 +92,14 @@ void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, *shift = 0; q_fixed = 0; } +#if TFLITE_SINGLE_ROUNDING + // Single-rounding MultiplyByQuantizedMultiplier doesn't support a shift > 30, + // saturate it. + if (*shift > 30) { + *shift = 30; + q_fixed = (1LL << 31) - 1; + } +#endif *quantized_multiplier = static_cast(q_fixed); } @@ -278,6 +291,12 @@ void PreprocessSoftmaxScaling(double beta, double input_scale, // result is double equivalent of Q0.31 (actually with more precision). Thus // this generates a Q(input_integer_bits).(31-input_integer_bits) // representation. +#if TFLITE_SINGLE_ROUNDING + const double max_real_multiplier = (1LL << 30) - 1.0; +#else + const double max_real_multiplier = (1LL << 31) - 1.0; +#endif + #ifdef TFLITE_EMULATE_FLOAT const double input_beta = IntegerDoubleMultiply(beta, input_scale); int shift; @@ -285,12 +304,14 @@ void PreprocessSoftmaxScaling(double beta, double input_scale, shift += (31 - input_integer_bits); double input_beta_real_multiplier = DoubleFromFractionAndShift(fraction, shift); - if (IntegerDoubleCompare(input_beta_real_multiplier, (1ll << 31) - 1.0) > 0) { - input_beta_real_multiplier = (1ll << 31) - 1.0; + if (IntegerDoubleCompare(input_beta_real_multiplier, max_real_multiplier) > + 0) { + input_beta_real_multiplier = max_real_multiplier; } #else // TFLITE_EMULATE_FLOAT - const double input_beta_real_multiplier = std::min( - beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0); + const double input_beta_real_multiplier = + std::min(beta * input_scale * (1 << (31 - input_integer_bits)), + max_real_multiplier); #endif // TFLITE_EMULATE_FLOAT QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier, @@ -324,8 +345,8 @@ int CalculateInputRadius(int input_integer_bits, int input_left_shift, #else // TFLITE_EMULATE_FLOAT const double max_input_rescaled = 1.0 * ((1 << input_integer_bits) - 1) * - (1ll << (total_signed_bits - input_integer_bits)) / - (1ll << input_left_shift); + (1LL << (total_signed_bits - input_integer_bits)) / + (1LL << input_left_shift); // Tighten bound using floor. Suppose that we could use the exact value. // After scaling the difference, the result would be at the maximum. Thus we // must ensure that our value has lower magnitude. diff --git a/src/tensorflow/lite/kernels/internal/quantization_util.h b/src/tensorflow/lite/kernels/internal/quantization_util.h index 0ee914b..eb4e840 100644 --- a/src/tensorflow/lite/kernels/internal/quantization_util.h +++ b/src/tensorflow/lite/kernels/internal/quantization_util.h @@ -20,7 +20,6 @@ limitations under the License. #include #include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { @@ -103,6 +102,7 @@ QuantizationParams ChooseQuantizationParams(double rmin, double rmax) { return ChooseQuantizationParams(rmin, rmax, false); } +// LINT.IfChange // Converts a floating-point number to an integer. For all inputs x where // static_cast(x) is legal according to the C++ standard, the result // is identical to that cast (i.e. the result is x with its fractional part @@ -167,6 +167,7 @@ IntOut SafeCast(FloatIn x) { return x < 0 ? std::numeric_limits::min() : std::numeric_limits::max(); } +// LINT.ThenChange(//tensorflow/compiler/mlir/lite/kernels/internal/quantization_util.h) // Decompose a double multiplier into a Q0.31 int32 representation of its // significand, and shift representation of NEGATIVE its exponent --- diff --git a/src/tensorflow/lite/kernels/internal/reference/add.h b/src/tensorflow/lite/kernels/internal/reference/add.h index 3da76d8..0df54cb 100644 --- a/src/tensorflow/lite/kernels/internal/reference/add.h +++ b/src/tensorflow/lite/kernels/internal/reference/add.h @@ -15,8 +15,14 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_ -#include "fixedpoint/fixedpoint.h" +#include +#include +#include +#include + +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" #include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" namespace tflite { @@ -27,25 +33,14 @@ inline void Add(const ArithmeticParams& params, const RuntimeShape& input1_shape, const T* input1_data, const RuntimeShape& input2_shape, const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] + input2_data[i], params.quantized_activation_min, - params.quantized_activation_max); - } -} + T activation_min, activation_max; + GetActivationParams(params, &activation_min, &activation_max); -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const float* input1_data, - const RuntimeShape& input2_shape, const float* input2_data, - const RuntimeShape& output_shape, float* output_data) { const int flat_size = MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; i++) { - auto x = input1_data[i] + input2_data[i]; + for (int i = 0; i < flat_size; ++i) { output_data[i] = ActivationFunctionWithMinMax( - x, params.float_activation_min, params.float_activation_max); + input1_data[i] + input2_data[i], activation_min, activation_max); } } @@ -202,59 +197,137 @@ inline void Add(const ArithmeticParams& params, } } -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, - float* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); +template +inline void AddBroadcast(const T* input_data, const T* broadcast_data, + T* output_data, size_t size, T activation_min, + T activation_max) { + for (size_t c = 0; c < size; ++c) { + output_data[c] = ActivationFunctionWithMinMax( + input_data[c] + broadcast_data[0], activation_min, activation_max); + } +} - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, b, y, x, c)] + - input2_data[SubscriptToIndex(desc2, b, y, x, c)], - params.float_activation_min, params.float_activation_max); - } - } +template <> +inline void AddBroadcast(const int32_t* input_data, + const int32_t* broadcast_data, + int32_t* output_data, size_t size, + int32_t activation_min, + int32_t activation_max) { + size_t c = 0; +#ifdef USE_NEON + const int32x4_t vmax = vdupq_n_s32(activation_max); + const int32x4_t vmin = vdupq_n_s32(activation_min); + const int32x4_t vb = vdupq_n_s32(broadcast_data[0]); + for (; c + 4 <= size; c += 4) { + const int32x4_t va = vld1q_s32(&input_data[c]); + int32x4_t vres = vaddq_s32(va, vb); + vres = vmaxq_s32(vmin, vres); + vres = vminq_s32(vmax, vres); + vst1q_s32(&output_data[c], vres); + } +#endif + for (; c < size; ++c) { + output_data[c] = ActivationFunctionWithMinMax( + input_data[c] + broadcast_data[0], activation_min, activation_max); + } +} + +template +void AddElementwise(const T* input1_data, const T* input2_data, T* output_data, + size_t size, T activation_min, T activation_max) { + for (size_t c = 0; c < size; ++c) { + output_data[c] = ActivationFunctionWithMinMax( + input1_data[c] + input2_data[c], activation_min, activation_max); + } +} + +template <> +inline void AddElementwise(const int32_t* input1_data, + const int32_t* input2_data, + int32_t* output_data, size_t size, + int32_t activation_min, + int32_t activation_max) { + size_t c = 0; +#ifdef USE_NEON + const int32x4_t vmax = vdupq_n_s32(activation_max); + const int32x4_t vmin = vdupq_n_s32(activation_min); + for (; c + 4 <= size; c += 4) { + const int32x4_t va = vld1q_s32(&input1_data[c]); + const int32x4_t vb = vld1q_s32(&input2_data[c]); + int32x4_t vres = vaddq_s32(va, vb); + vres = vmaxq_s32(vmin, vres); + vres = vminq_s32(vmax, vres); + vst1q_s32(&output_data[c], vres); + } +#endif + for (; c < size; ++c) { + output_data[c] = ActivationFunctionWithMinMax( + input1_data[c] + input2_data[c], activation_min, activation_max); + } +} + +template +inline void BroadcastAddRecursiveDimensions( + int dimension, size_t* input1_offset_p, size_t* input2_offset_p, + size_t* output_offset, size_t* compressed_input1_stride, + size_t* compressed_input2_stride, size_t* compressed_output_shape, + T activation_min, T activation_max, const T* input1_data, + const T* input2_data, T* output_data) { + if (dimension > 0) { + for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) { + size_t input1_offset_c = *input1_offset_p; + size_t input2_offset_c = *input2_offset_p; + BroadcastAddRecursiveDimensions( + dimension - 1, &input1_offset_c, &input2_offset_c, output_offset, + compressed_input1_stride, compressed_input2_stride, + compressed_output_shape, activation_min, activation_max, input1_data, + input2_data, output_data); + *input1_offset_p += compressed_input1_stride[dimension]; + *input2_offset_p += compressed_input2_stride[dimension]; } + } else { + TFLITE_DCHECK(dimension == 0); + bool input1_is_broadcast = compressed_input1_stride[dimension] == 0; + bool input2_is_broadcast = compressed_input2_stride[dimension] == 0; + TFLITE_DCHECK(!(input1_is_broadcast && input2_is_broadcast)); + const T* input1_data_ptr = input1_data + *input1_offset_p; + const T* input2_data_ptr = input2_data + *input2_offset_p; + T* output_data_ptr = output_data + *output_offset; + if (input1_is_broadcast) { + // input1 is broadcast. + AddBroadcast(input2_data_ptr, input1_data_ptr, output_data_ptr, + compressed_output_shape[dimension], activation_min, + activation_max); + *input2_offset_p += compressed_output_shape[dimension]; + } else if (input2_is_broadcast) { + // input2 is broadcast. + AddBroadcast(input1_data_ptr, input2_data_ptr, output_data_ptr, + compressed_output_shape[dimension], activation_min, + activation_max); + *input1_offset_p += compressed_output_shape[dimension]; + } else { + // Add element-wise. + AddElementwise(input1_data_ptr, input2_data_ptr, output_data_ptr, + compressed_output_shape[dimension], activation_min, + activation_max); + *input1_offset_p += compressed_output_shape[dimension]; + *input2_offset_p += compressed_output_shape[dimension]; + } + *output_offset += compressed_output_shape[dimension]; } } -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int32_t* input1_data, - const RuntimeShape& input2_shape, - const int32_t* input2_data, - const RuntimeShape& output_shape, - int32_t* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); +template +inline typename std::enable_if::value || dummy, void>::type +BroadcastAdd6DSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const T* input1_data, + const RuntimeShape& input2_shape, const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { + constexpr int kMaxBroadcastDim = 6; + T activation_min, activation_max; + GetActivationParams(params, &activation_min, &activation_max); // In Tensorflow, the dimensions are canonically named (batch_number, row, // col, channel), with extents (batches, height, width, depth), with the @@ -267,19 +340,74 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params, // We name our variables by their Tensorflow convention, but generate C code // nesting loops such that the innermost loop has the smallest stride for the // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, b, y, x, c)] + - input2_data[SubscriptToIndex(desc2, b, y, x, c)], - params.quantized_activation_min, - params.quantized_activation_max); - } - } + size_t compressed_input1_stride[kMaxBroadcastDim]; + size_t compressed_input2_stride[kMaxBroadcastDim]; + size_t compressed_output_shape[kMaxBroadcastDim]; + bool broadcastable_shape = ReduceDimensionsForBroadcast( + input1_shape, input2_shape, compressed_input1_stride, + compressed_input2_stride, compressed_output_shape); + // Skip broadcasting for degenerate shapes. + if (!broadcastable_shape) { + return; + } + + size_t input1_offset = 0; + size_t input2_offset = 0; + size_t output_offset = 0; + BroadcastAddRecursiveDimensions( + kMaxBroadcastDim - 1, &input1_offset, &input2_offset, &output_offset, + compressed_input1_stride, compressed_input2_stride, + compressed_output_shape, activation_min, activation_max, input1_data, + input2_data, output_data); +} + +// This function is used for 8-bit as well as for 16-bit, but the accumulator +// is 32-bit for both cases. The overflow does not happen due to the +// choice of the shift (20 or 15, accordingly - see add.cc for more comments). +template +inline void BroadcastAddRecursiveDimensions( + const ArithmeticParams& params, int dimension, size_t* input1_offset_p, + size_t* input2_offset_p, size_t* output_offset, + size_t* compressed_input1_stride, size_t* compressed_input2_stride, + size_t* compressed_output_shape, const T* input1_data, const T* input2_data, + T* output_data) { + for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) { + if (dimension > 0) { + size_t input1_offset_c = *input1_offset_p; + size_t input2_offset_c = *input2_offset_p; + BroadcastAddRecursiveDimensions( + params, dimension - 1, &input1_offset_c, &input2_offset_c, + output_offset, compressed_input1_stride, compressed_input2_stride, + compressed_output_shape, input1_data, input2_data, output_data); + } else { + TFLITE_DCHECK(dimension == 0); + const int32_t input1_val = + params.input1_offset + input1_data[*input1_offset_p]; + const int32_t input2_val = + params.input2_offset + input2_data[*input2_offset_p]; + const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); + const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); + const int32_t scaled_input1_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input1_val, params.input1_multiplier, + params.input1_shift); + const int32_t scaled_input2_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input2_val, params.input2_multiplier, + params.input2_shift); + const int32_t raw_sum = scaled_input1_val + scaled_input2_val; + const int32_t raw_output = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + raw_sum, params.output_multiplier, params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + output_data[*output_offset] = static_cast(clamped_output); + ++(*output_offset); } + *input1_offset_p += compressed_input1_stride[dimension]; + *input2_offset_p += compressed_input2_stride[dimension]; } } @@ -287,16 +415,12 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params, // is 32-bit for both cases. The overflow does not happen due to the // choice of the shift (20 or 15, accordingly - see add.cc for more comments). template -inline void BroadcastAdd4DSlow( - const ArithmeticParams& params, const RuntimeShape& input1_shape, - const T* input1_data, const RuntimeShape& input2_shape, - const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); +inline typename std::enable_if::value, void>::type +BroadcastAdd6DSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const T* input1_data, + const RuntimeShape& input2_shape, const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { + constexpr int kMaxBroadcastDim = 6; // In Tensorflow, the dimensions are canonically named (batch_number, row, // col, channel), with extents (batches, height, width, depth), with the @@ -309,42 +433,33 @@ inline void BroadcastAdd4DSlow( // We name our variables by their Tensorflow convention, but generate C code // nesting loops such that the innermost loop has the smallest stride for the // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - const int32_t input1_val = - params.input1_offset + - input1_data[SubscriptToIndex(desc1, b, y, x, c)]; - const int32_t input2_val = - params.input2_offset + - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - const int32_t shifted_input1_val = - input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = - input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, - params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, - params.input2_shift); - const int32_t raw_sum = scaled_input1_val + scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sum, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[Offset(extended_output_shape, b, y, x, c)] = - static_cast(clamped_output); - } - } - } + size_t compressed_input1_stride[kMaxBroadcastDim]; + size_t compressed_input2_stride[kMaxBroadcastDim]; + size_t compressed_output_shape[kMaxBroadcastDim]; + bool broadcastable_shape = ReduceDimensionsForBroadcast( + input1_shape, input2_shape, compressed_input1_stride, + compressed_input2_stride, compressed_output_shape); + // Skip broadcasting for degenerate shapes. + if (!broadcastable_shape) { + return; } + + size_t input1_offset = 0; + size_t input2_offset = 0; + size_t output_offset = 0; + BroadcastAddRecursiveDimensions( + params, kMaxBroadcastDim - 1, &input1_offset, &input2_offset, + &output_offset, compressed_input1_stride, compressed_input2_stride, + compressed_output_shape, input1_data, input2_data, output_data); +} + +template +inline void BroadcastAdd4DSlow( + const ArithmeticParams& params, const RuntimeShape& input1_shape, + const T* input1_data, const RuntimeShape& input2_shape, + const T* input2_data, const RuntimeShape& output_shape, T* output_data) { + return BroadcastAdd6DSlow(params, input1_shape, input1_data, input2_shape, + input2_data, output_shape, output_data); } inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params, diff --git a/src/tensorflow/lite/kernels/internal/reference/add_n.h b/src/tensorflow/lite/kernels/internal/reference/add_n.h new file mode 100644 index 0000000..b6b5882 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/add_n.h @@ -0,0 +1,86 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_ + +#include +#include + +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { +namespace reference_ops { + +// T is expected to be either float or int. +template +inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs, + const T* const* input_data, T* output_data) { + // All inputs and output should have the same shape, this is checked during + // Prepare stage. + const size_t size = input_shape.FlatSize(); + for (size_t i = 0; i < size; ++i) { + T x = 0; + for (size_t j = 0; j < num_inputs; ++j) { + x += input_data[j][i]; + } + output_data[i] = x; + } +} + +inline void AddN(const ArithmeticParams& params, + const RuntimeShape& input_shape, const size_t num_inputs, + const int8_t* const* input_data, int8_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + // Input offset is negative input zero point. Activation tensors are + // asymmetric quantized so they span the full int8 range. + // All inputs should have same zero-point and scale, this is checked during + // Prepare stage. + TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits::min()); + TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits::max()); + + // All inputs and output should have the same shape, this is checked during + // Prepare stage. + const size_t size = input_shape.FlatSize(); + for (size_t i = 0; i < size; ++i) { + // accumulate in scaled_x before clamping to avoid overflow + const int32_t x = params.input1_offset; // x = 0 + const int32_t shifted_x = x * (1 << params.left_shift); + int32_t scaled_x = MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_x, params.input1_multiplier, params.input1_shift); + + for (size_t j = 0; j < num_inputs; ++j) { + const int32_t y = params.input1_offset + input_data[j][i]; + const int32_t shifted_y = y * (1 << params.left_shift); + int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_y, params.input1_multiplier, params.input1_shift); + scaled_x += scaled_y; + } + + const int32_t raw_output = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + scaled_x, params.output_multiplier, params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + output_data[i] = static_cast(clamped_output); + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/arg_min_max.h b/src/tensorflow/lite/kernels/internal/reference/arg_min_max.h index e6f34fd..8154fbf 100644 --- a/src/tensorflow/lite/kernels/internal/reference/arg_min_max.h +++ b/src/tensorflow/lite/kernels/internal/reference/arg_min_max.h @@ -15,12 +15,23 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ +#include + #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { namespace reference_ops { +template +std::function GetComparefunction(bool is_arg_max) { + if (is_arg_max) { + return std::greater(); + } else { + return std::less(); + } +} + template void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, const T3* input2_data, const RuntimeShape& output_shape, @@ -62,6 +73,15 @@ void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, } } } + +template +void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, + const T3* input2_data, const RuntimeShape& output_shape, + T2* output_data, const bool is_arg_max) { + ArgMinMax(input1_shape, input1_data, input2_data, output_shape, output_data, + GetComparefunction(is_arg_max)); +} + } // namespace reference_ops } // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/reference/batch_matmul.h b/src/tensorflow/lite/kernels/internal/reference/batch_matmul.h new file mode 100644 index 0000000..d836962 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/batch_matmul.h @@ -0,0 +1,280 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_ + +#include +#include + +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { +namespace batch_matmul { + +// Determine which dimension is the broadcast dimension. +inline int broadcast_dim(int lhs_dim, int rhs_dim) { + if (lhs_dim == rhs_dim) return lhs_dim; + if (lhs_dim == 1) return rhs_dim; + TFLITE_DCHECK_EQ(rhs_dim, 1); + return lhs_dim; +} + +// Compute the "extent" for iterating on this dimension. +// If we are broadcasting, then don't advance (i.e return 0). +inline int extent(const RuntimeShape& shape, int x) { + if (shape.Dims(x) == 1) { + return 0; + } + int prod = 1; + for (int i = x + 1; i < shape.DimensionsCount(); ++i) { + prod *= shape.Dims(i); + } + return prod; +} + +} // namespace batch_matmul + +template +inline void BatchMatMul(const RuntimeShape& lhs_shape, const Ta* lhs_data, + const RuntimeShape& rhs_shape, const Tb* rhs_data, + const RuntimeShape& output_shape, Tout* output_data) { + const RuntimeShape extended_lhs_shape = + RuntimeShape::ExtendedShape(5, lhs_shape); + const RuntimeShape extended_rhs_shape = + RuntimeShape::ExtendedShape(5, rhs_shape); + + const int batch_dim0 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0)); + const int batch_dim1 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1)); + const int batch_dim2 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2)); + + const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0); + const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1); + const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2); + const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0); + const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1); + const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2); + + // Set params for each matrix multiply. + const int lhs_rows = extended_lhs_shape.Dims(3); + const int rhs_cols = extended_rhs_shape.Dims(4); + const int accum_depth = extended_lhs_shape.Dims(4); + + for (int b0 = 0; b0 < batch_dim0; ++b0) { + const Ta* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); + const Tb* rhs_ptr0 = rhs_data + (b0 * rhs_ext0); + for (int b1 = 0; b1 < batch_dim1; ++b1) { + const Ta* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1; + const Tb* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1; + for (int b2 = 0; b2 < batch_dim2; ++b2) { + const Ta* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2; + const Tb* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2; + Tout* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) + + b1 * batch_dim2 + b2) * + lhs_rows * rhs_cols; + for (int j = 0; j < rhs_cols; ++j) { + for (int i = 0; i < lhs_rows; ++i) { + Tout total = 0; + for (int k = 0; k < accum_depth; ++k) { + total += static_cast(lhs_ptr2[accum_depth * i + k]) * + static_cast(rhs_ptr2[j * accum_depth + k]); + } + int idx = lhs_rows * j + i; + out_ptr[idx] = total; + } + } + } + } + } +} + +inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data, + const RuntimeShape& rhs_shape, const int8_t* rhs_data, + const float* scaling_factors, + const int32_t* input_offset, int32_t* row_sums, + const RuntimeShape& output_shape, float* output_data, + bool* compute_row_sums, + const float* per_channel_scales) { + const RuntimeShape extended_lhs_shape = + RuntimeShape::ExtendedShape(5, lhs_shape); + const RuntimeShape extended_rhs_shape = + RuntimeShape::ExtendedShape(5, rhs_shape); + + const int batch_dim0 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0)); + const int batch_dim1 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1)); + const int batch_dim2 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2)); + + const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0); + const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1); + const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2); + const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0); + const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1); + const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2); + + // Set params for each matrix multiply. + const int lhs_rows = extended_lhs_shape.Dims(3); + const int rhs_cols = extended_rhs_shape.Dims(4); + const int accum_depth = extended_lhs_shape.Dims(4); + + const int ioff_ext0 = rhs_ext0 == 0 ? 0 : rhs_cols; + const int ioff_ext1 = rhs_ext1 == 0 ? 0 : rhs_cols; + const int ioff_ext2 = rhs_ext2 == 0 ? 0 : rhs_cols; + const int woff_ext0 = lhs_ext0 == 0 ? 0 : lhs_rows; + const int woff_ext1 = lhs_ext1 == 0 ? 0 : lhs_rows; + const int woff_ext2 = lhs_ext2 == 0 ? 0 : lhs_rows; + + if (!compute_row_sums || *compute_row_sums) { + int num_weights_matrices = 1; + for (int i = 1; i < extended_lhs_shape.DimensionsCount() - 2; ++i) { + num_weights_matrices *= extended_lhs_shape.Dims(i); + } + tensor_utils::ReductionSumVector( + lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth); + if (compute_row_sums) { + *compute_row_sums = false; + } + } + + for (int b0 = 0; b0 < batch_dim0; ++b0) { + const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); + const int8_t* rhs_ptr0 = rhs_data + (b0 * rhs_ext0); + const int32_t* ioff_ptr0 = input_offset + (b0 * ioff_ext0); + const float* scale_ptr0 = scaling_factors + (b0 * ioff_ext0); + const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0); + for (int b1 = 0; b1 < batch_dim1; ++b1) { + const int8_t* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1; + const int8_t* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1; + const int32_t* ioff_ptr1 = ioff_ptr0 + (b1 * ioff_ext1); + const float* scale_ptr1 = scale_ptr0 + (b1 * ioff_ext1); + const int32_t* woff_ptr1 = woff_ptr0 + (b1 * woff_ext1); + for (int b2 = 0; b2 < batch_dim2; ++b2) { + const int8_t* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2; + const int8_t* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2; + const int32_t* ioff_ptr2 = ioff_ptr1 + (b2 * ioff_ext2); + const float* scale_ptr2 = scale_ptr1 + (b2 * ioff_ext2); + const int32_t* woff_ptr2 = woff_ptr1 + (b2 * woff_ext2); + float* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) + + b1 * batch_dim2 + b2) * + lhs_rows * rhs_cols; + for (int j = 0; j < rhs_cols; ++j) { + const float batch_scaling_factor = scale_ptr2[j]; + const float batch_offset = static_cast(ioff_ptr2[j]); + for (int i = 0; i < lhs_rows; ++i) { + int32_t total = 0; + for (int k = 0; k < accum_depth; ++k) { + total += + lhs_ptr2[accum_depth * i + k] * rhs_ptr2[j * accum_depth + k]; + } + int32_t row_sum = woff_ptr2[i]; + total -= row_sum * batch_offset; + int idx = lhs_rows * j + i; + float scale = batch_scaling_factor; + if (per_channel_scales) { + scale *= per_channel_scales[i]; + } + out_ptr[idx] += scale * total; + } + } + } + } + } +} + +template +inline void BatchMatMul(const FullyConnectedParams& params, + const RuntimeShape& lhs_shape, const T* lhs_data, + const RuntimeShape& rhs_shape, const T* rhs_data, + const RuntimeShape& output_shape, T* output_data) { + const RuntimeShape extended_lhs_shape = + RuntimeShape::ExtendedShape(5, lhs_shape); + const RuntimeShape extended_rhs_shape = + RuntimeShape::ExtendedShape(5, rhs_shape); + + const int batch_dim0 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0)); + const int batch_dim1 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1)); + const int batch_dim2 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2)); + + const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0); + const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1); + const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2); + const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0); + const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1); + const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2); + + // Set params for each matrix multiply. + const int lhs_rows = extended_lhs_shape.Dims(3); + const int rhs_cols = extended_rhs_shape.Dims(4); + const int accum_depth = extended_lhs_shape.Dims(4); + + const int32_t input_offset = params.input_offset; + const int32_t filter_offset = params.weights_offset; + const int32_t output_offset = params.output_offset; + const int32_t output_multiplier = params.output_multiplier; + const int output_shift = params.output_shift; + const int32_t output_activation_min = params.quantized_activation_min; + const int32_t output_activation_max = params.quantized_activation_max; + TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + + for (int b0 = 0; b0 < batch_dim0; ++b0) { + const T* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); + const T* rhs_ptr0 = rhs_data + (b0 * rhs_ext0); + for (int b1 = 0; b1 < batch_dim1; ++b1) { + const T* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1; + const T* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1; + for (int b2 = 0; b2 < batch_dim2; ++b2) { + const T* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2; + const T* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2; + T* out_ptr = output_data + + ((b0 * batch_dim1 * batch_dim2) + b1 * batch_dim2 + b2) * + lhs_rows * rhs_cols; + + for (int j = 0; j < rhs_cols; ++j) { + for (int i = 0; i < lhs_rows; ++i) { + AccumT total = 0; + for (int k = 0; k < accum_depth; ++k) { + AccumT lhs_val = lhs_ptr2[accum_depth * i + k]; + AccumT rhs_val = rhs_ptr2[accum_depth * j + k]; + total += (lhs_val + filter_offset) * (rhs_val + input_offset); + } + int32_t total_scaled = MultiplyByQuantizedMultiplier( + total, output_multiplier, output_shift); + total_scaled += output_offset; + total_scaled = std::max(total_scaled, output_activation_min); + total_scaled = std::min(total_scaled, output_activation_max); + const int idx = lhs_rows * j + i; + out_ptr[idx] = static_cast(total_scaled); + } + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h b/src/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h new file mode 100644 index 0000000..54c5260 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h @@ -0,0 +1,101 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_ + +#include + +#include "third_party/ruy/ruy/profiler/instrumentation.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +// TODO(b/135760455): Move this method anonymous namespace in a cc file. +inline RuntimeShape ExtendShapeBatchToSpace(const RuntimeShape& shape) { + if (shape.DimensionsCount() == 4) { + return shape; + } + RuntimeShape new_shape(4, 1); + new_shape.SetDim(0, shape.Dims(0)); + new_shape.SetDim(1, shape.Dims(1)); + new_shape.SetDim(3, shape.Dims(2)); + return new_shape; +} + +template +inline void BatchToSpaceND(const RuntimeShape& unextended_input1_shape, + const T* input1_data, + const RuntimeShape& unextended_input2_shape, + const int32_t* block_shape_data, + const RuntimeShape& unextended_input3_shape, + const int32_t* crops_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + ruy::profiler::ScopeLabel label("BatchToSpaceND"); + TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3); + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(), + unextended_output_shape.DimensionsCount()); + + const RuntimeShape input1_shape = + ExtendShapeBatchToSpace(unextended_input1_shape); + const RuntimeShape output_shape = + ExtendShapeBatchToSpace(unextended_output_shape); + + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_batch_size = output_shape.Dims(0); + + const int depth = input1_shape.Dims(3); + const int input_width = input1_shape.Dims(2); + const int input_height = input1_shape.Dims(1); + const int input_batch_size = input1_shape.Dims(0); + + const int block_shape_height = block_shape_data[0]; + const int block_shape_width = + unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1; + const int crops_top = crops_data[0]; + const int crops_left = + unextended_input1_shape.DimensionsCount() == 4 ? crops_data[2] : 0; + for (int in_batch = 0; in_batch < input_batch_size; ++in_batch) { + const int out_batch = in_batch % output_batch_size; + const int spatial_offset = in_batch / output_batch_size; + for (int in_h = 0; in_h < input_height; ++in_h) { + const int out_h = in_h * block_shape_height + + spatial_offset / block_shape_width - crops_top; + if (out_h < 0 || out_h >= output_height) { + continue; + } + for (int in_w = 0; in_w < input_width; ++in_w) { + const int out_w = in_w * block_shape_width + + spatial_offset % block_shape_width - crops_left; + + if (out_w < 0 || out_w >= output_width) { + continue; + } + T* out = output_data + Offset(output_shape, out_batch, out_h, out_w, 0); + const T* in = + input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0); + memcpy(out, in, depth * sizeof(T)); + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/binary_function.h b/src/tensorflow/lite/kernels/internal/reference/binary_function.h index 1711940..0b124af 100644 --- a/src/tensorflow/lite/kernels/internal/reference/binary_function.h +++ b/src/tensorflow/lite/kernels/internal/reference/binary_function.h @@ -43,16 +43,27 @@ inline void BroadcastBinaryFunction4DSlow( NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, unextended_input2_shape, &desc1, &desc2); + const int* dims_data = + reinterpret_cast(output_shape.DimsDataUpTo5D()); for (int b = 0; b < output_shape.Dims(0); ++b) { + int out_idx_b = b * dims_data[1]; + int in_idx1_b = desc1.strides[0] * b; + int in_idx2_b = desc2.strides[0] * b; for (int y = 0; y < output_shape.Dims(1); ++y) { + int out_idx_y = (out_idx_b + y) * dims_data[2]; + int in_idx1_y = in_idx1_b + desc1.strides[1] * y; + int in_idx2_y = in_idx2_b + desc2.strides[1] * y; for (int x = 0; x < output_shape.Dims(2); ++x) { + int out_idx_x = (out_idx_y + x) * dims_data[3]; + int in1_idx = in_idx1_y + desc1.strides[2] * x; + int in2_idx = in_idx2_y + desc2.strides[2] * x; for (int c = 0; c < output_shape.Dims(3); ++c) { - auto out_idx = Offset(output_shape, b, y, x, c); - auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); - auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); + auto out_idx = out_idx_x + c; auto in1_val = input1_data[in1_idx]; auto in2_val = input2_data[in2_idx]; output_data[out_idx] = func(in1_val, in2_val); + in1_idx += desc1.strides[3]; + in2_idx += desc2.strides[3]; } } } diff --git a/src/tensorflow/lite/kernels/internal/reference/broadcast_args.h b/src/tensorflow/lite/kernels/internal/reference/broadcast_args.h new file mode 100644 index 0000000..d93c316 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/broadcast_args.h @@ -0,0 +1,56 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_ + +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +void BroadcastArgs(const RuntimeShape& input1_shape, const T* input1_data, + const RuntimeShape& input2_shape, const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { + // Gets data at the backward index i of the shape tensor. Returns 1 if the + // index is out of range. + auto get_shape_data = [](const RuntimeShape& shape, const T* data, + int backward_idx) -> T { + int forward_idx = shape.FlatSize() - 1 - backward_idx; + if (forward_idx < 0) return 1; + return data[forward_idx]; + }; + + int output_num_elements = output_shape.FlatSize(); + for (int i = 0; i < output_num_elements; ++i) { + int backward_i = output_num_elements - 1 - i; + int shape1_i = get_shape_data(input1_shape, input1_data, i); + int shape2_i = get_shape_data(input2_shape, input2_data, i); + if (shape1_i == 1) { + output_data[backward_i] = shape2_i; + } else if (shape2_i == 1) { + output_data[backward_i] = shape1_i; + } else { + TFLITE_CHECK_EQ(shape1_i, shape2_i); + output_data[backward_i] = shape1_i; + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/broadcast_to.h b/src/tensorflow/lite/kernels/internal/reference/broadcast_to.h new file mode 100644 index 0000000..f106b2b --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/broadcast_to.h @@ -0,0 +1,97 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_ + +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/kernel_util.h" + +namespace tflite { +namespace reference_ops { +template +void BroadcastImpl(const NdArrayDesc& input_desc, const char* input_data, + const NdArrayDesc& output_desc, char* output_data, + int indexes[N], int dim, const int last_broadcasting_dim, + const int type_size) { + // Copy data from input to output. + if (dim == last_broadcasting_dim) { + int copy_size = output_desc.strides[dim] * type_size; + const char* data_src = + input_data + SubscriptToIndex(input_desc, indexes) * type_size; + char* data_dst = + output_data + SubscriptToIndex(output_desc, indexes) * type_size; + for (int i = 0; i < output_desc.extents[dim]; ++i, data_dst += copy_size) { + memcpy(data_dst, data_src, copy_size); + } + return; + } + + // Recursive call to find the next broadcasting. + for (indexes[dim] = 0; indexes[dim] < input_desc.extents[dim]; + ++indexes[dim]) { + BroadcastImpl(input_desc, input_data, output_desc, output_data, indexes, + dim + 1, last_broadcasting_dim, type_size); + } + + // Duplicate data in output tensor. + indexes[dim] = 0; + if (input_desc.extents[dim] != output_desc.extents[dim]) { + int copy_size = output_desc.strides[dim] * type_size; + char* data_src = + output_data + SubscriptToIndex(output_desc, indexes) * type_size; + char* data_dst = data_src + copy_size; + for (int i = 1; i < output_desc.extents[dim]; ++i, data_dst += copy_size) { + memcpy(data_dst, data_src, copy_size); + } + } +} + +template +inline void BroadcastTo(const RuntimeShape& unextended_input_shape, + const char* input_data, + const RuntimeShape& unextended_output_shape, + char* output_data, TfLiteType data_type) { + NdArrayDesc input_desc; + NdArrayDesc output_desc; + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_input_shape), + &input_desc); + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), + &output_desc); + + // Get the last dimension has broadcasting. At this dimension, the data is + // copied from input tensor to output tensor. + int last_broadcast_dim = -1; + for (int i = N - 1; i >= 0; --i) { + if (input_desc.extents[i] != output_desc.extents[i]) { + last_broadcast_dim = i; + break; + } + } + + // If non-broadcasting, just copy data from input to output tensor. + if (last_broadcast_dim == -1) { + memcpy(output_data, input_data, + unextended_input_shape.FlatSize() * TfLiteTypeGetSize(data_type)); + return; + } + + // Broadcasting using memcpy. + int indexes[N] = {0}; + BroadcastImpl(input_desc, input_data, output_desc, output_data, indexes, 0, + last_broadcast_dim, TfLiteTypeGetSize(data_type)); +} +} // namespace reference_ops +} // namespace tflite +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/comparisons.cpp b/src/tensorflow/lite/kernels/internal/reference/comparisons.cpp new file mode 100644 index 0000000..36ce951 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/comparisons.cpp @@ -0,0 +1,41 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/comparisons.h" + +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/runtime_shape.h" + +namespace tflite { +namespace reference_ops { + +BroadcastComparison4DSlowCommon BroadcastComparison4DSlowPreprocess( + const RuntimeShape& unextended_input1_shape, + const RuntimeShape& unextended_input2_shape, + const RuntimeShape& unextended_output_shape) { + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + NdArrayDesc<4> desc1; + NdArrayDesc<4> desc2; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, + unextended_input2_shape, &desc1, &desc2); + return {RuntimeShape::ExtendedShape(4, unextended_output_shape), desc1, + desc2}; +} + +} // namespace reference_ops +} // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/reference/comparisons.h b/src/tensorflow/lite/kernels/internal/reference/comparisons.h index 6344bdc..e40e404 100644 --- a/src/tensorflow/lite/kernels/internal/reference/comparisons.h +++ b/src/tensorflow/lite/kernels/internal/reference/comparisons.h @@ -15,8 +15,12 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_ -#include "tensorflow/lite/c/common.h" +#include + +#include "tensorflow/lite/core/c/common.h" +#include "tensorflow/lite/core/macros.h" #include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/runtime_shape.h" #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { @@ -112,20 +116,11 @@ struct BroadcastComparison4DSlowCommon { NdArrayDesc<4> desc2; }; -inline BroadcastComparison4DSlowCommon BroadcastComparison4DSlowPreprocess( +TFLITE_NOINLINE +BroadcastComparison4DSlowCommon BroadcastComparison4DSlowPreprocess( const RuntimeShape& unextended_input1_shape, const RuntimeShape& unextended_input2_shape, - const RuntimeShape& unextended_output_shape) { - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, - unextended_input2_shape, &desc1, &desc2); - return {RuntimeShape::ExtendedShape(4, unextended_output_shape), desc1, - desc2}; -} + const RuntimeShape& unextended_output_shape); template F> inline void BroadcastComparison4DSlowImpl( @@ -266,12 +261,12 @@ inline void BroadcastComparison4DSlowWithScaling( op_params, input1_shape, input1_data, input2_shape, input2_data, \ output_shape, output_data); \ } -TFLITE_COMPARISON_OP(Equal); -TFLITE_COMPARISON_OP(NotEqual); -TFLITE_COMPARISON_OP(Greater); -TFLITE_COMPARISON_OP(GreaterEqual); -TFLITE_COMPARISON_OP(Less); -TFLITE_COMPARISON_OP(LessEqual); +TFLITE_COMPARISON_OP(Equal) +TFLITE_COMPARISON_OP(NotEqual) +TFLITE_COMPARISON_OP(Greater) +TFLITE_COMPARISON_OP(GreaterEqual) +TFLITE_COMPARISON_OP(Less) +TFLITE_COMPARISON_OP(LessEqual) #undef TFLITE_COMPARISON_OP } // namespace reference_ops diff --git a/src/tensorflow/lite/kernels/internal/reference/concatenation.h b/src/tensorflow/lite/kernels/internal/reference/concatenation.h index 998bb09..9d2ecbe 100644 --- a/src/tensorflow/lite/kernels/internal/reference/concatenation.h +++ b/src/tensorflow/lite/kernels/internal/reference/concatenation.h @@ -16,6 +16,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_ +#include + #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" diff --git a/src/tensorflow/lite/kernels/internal/reference/conv.h b/src/tensorflow/lite/kernels/internal/reference/conv.h index b912ac1..3c9f9fc 100644 --- a/src/tensorflow/lite/kernels/internal/reference/conv.h +++ b/src/tensorflow/lite/kernels/internal/reference/conv.h @@ -15,16 +15,15 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/internal/common.h" - +#include +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/types.h" namespace tflite { namespace reference_ops { - inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, const float* input_data, const RuntimeShape& filter_shape, const float* filter_data, const RuntimeShape& bias_shape, @@ -46,7 +45,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, (void)im2col_data; // only used in optimized code. (void)im2col_shape; // only used in optimized code. const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -55,14 +54,22 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_NE(groups, 0); + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; + TFLITE_DCHECK_NE(filters_per_group, 0); const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); + for (int batch = 0; batch < batches; ++batch) { for (int out_y = 0; out_y < output_height; ++out_y) { const int in_y_origin = (out_y * stride_height) - pad_height; for (int out_x = 0; out_x < output_width; ++out_x) { const int in_x_origin = (out_x * stride_width) - pad_width; for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + auto group = out_channel / filters_per_group; float total = 0.f; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + dilation_height_factor * filter_y; @@ -77,10 +84,11 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, if (!is_point_inside_image) { continue; } - - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - float input_value = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { + float input_value = + input_data[Offset(input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; float filter_value = filter_data[Offset( filter_shape, out_channel, filter_y, filter_x, in_channel)]; total += (input_value * filter_value); @@ -108,8 +116,8 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, uint8_t* output_data, const RuntimeShape& im2col_shape, uint8_t* im2col_data, void* cpu_backend_context) { (void)cpu_backend_context; // only used in optimized code. - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. const int stride_width = params.stride_width; const int stride_height = params.stride_height; const int dilation_width_factor = params.dilation_width_factor; @@ -129,7 +137,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -138,6 +146,10 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); for (int batch = 0; batch < batches; ++batch) { @@ -146,6 +158,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, for (int out_x = 0; out_x < output_width; ++out_x) { const int in_x_origin = (out_x * stride_width) - pad_width; for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + auto group = out_channel / filters_per_group; int32_t acc = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + dilation_height_factor * filter_y; @@ -161,9 +174,11 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, continue; } - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { + int32_t input_val = + input_data[Offset(input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; int32_t filter_val = filter_data[Offset( filter_shape, out_channel, filter_y, filter_x, in_channel)]; acc += @@ -209,7 +224,7 @@ inline void HybridConvPerChannel( TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -218,18 +233,24 @@ inline void HybridConvPerChannel( const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); for (int batch = 0; batch < batches; ++batch) { for (int out_y = 0; out_y < output_height; ++out_y) { for (int out_x = 0; out_x < output_width; ++out_x) { for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + auto group = out_channel / filters_per_group; const int in_x_origin = (out_x * stride_width) - pad_width; const int in_y_origin = (out_y * stride_height) - pad_height; int32_t acc = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { const int in_x = in_x_origin + dilation_width_factor * filter_x; const int in_y = in_y_origin + dilation_height_factor * filter_y; @@ -238,7 +259,8 @@ inline void HybridConvPerChannel( if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && (in_y < input_height)) { int32_t input_val = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; + input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; int32_t filter_val = filter_data[Offset(filter_shape, out_channel, filter_y, filter_x, in_channel)]; diff --git a/src/tensorflow/lite/kernels/internal/reference/cumsum.h b/src/tensorflow/lite/kernels/internal/reference/cumsum.h new file mode 100644 index 0000000..7cbc87c --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/cumsum.h @@ -0,0 +1,175 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_ + +#include +#include +#include + +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" + +namespace tflite { +namespace reference_ops { + +template +inline void CumSum(const T* input_data, const RuntimeShape& shape, int32_t axis, + bool exclusive, bool reverse, T* output_data) { + const int32_t rank = shape.DimensionsCount(); + TFLITE_DCHECK_GE(rank, 1); + TFLITE_DCHECK_GE(axis, 0); + TFLITE_DCHECK_LT(axis, rank); + + size_t inner = 1; + size_t outer = 1; + size_t depth = 1; + for (int32_t i = 0; i < rank; i++) { + if (i < axis) + inner *= shape.Dims(i); + else if (i > axis) + outer *= shape.Dims(i); + else + depth = shape.Dims(i); + } + + for (size_t outer_index = 0; outer_index < outer; outer_index++) { + size_t outer_index_adj; + if (reverse) + outer_index_adj = (outer - 1) - outer_index; + else + outer_index_adj = outer_index; + for (size_t inner_index = 0; inner_index < inner; inner_index++) { + T accumulator = 0; + size_t inner_index_adj; + if (reverse) + inner_index_adj = (inner - 1) - inner_index; + else + inner_index_adj = inner_index; + for (size_t depth_index = 0; depth_index < depth; depth_index++) { + size_t depth_index_adj; + if (reverse) + depth_index_adj = (depth - 1) - depth_index; + else + depth_index_adj = depth_index; + + size_t index = outer_index_adj; + index += inner_index_adj * depth * outer; + index += depth_index_adj * outer; + + if (exclusive) { + output_data[index] = accumulator; + accumulator += input_data[index]; + } else { + accumulator += input_data[index]; + output_data[index] = accumulator; + } + } + } + } +} + +// +// Quantized INT8 CUMSUM +// +inline void CumSum(const ArithmeticParams& params, const int8_t* input_data, + const RuntimeShape& shape, int32_t axis, bool exclusive, + bool reverse, int8_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + // Input offset is negative input zero point. Activation tensors are + // asymmetric quantized so they span the full int8 range. + // All inputs should have same zero-point and scale, this is checked during + // Prepare stage. + TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits::min()); + TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits::max()); + + const int32_t rank = shape.DimensionsCount(); + TFLITE_DCHECK_GE(rank, 1); + TFLITE_DCHECK_GE(axis, 0); + TFLITE_DCHECK_LT(axis, rank); + + size_t inner = 1; + size_t outer = 1; + size_t depth = 1; + for (int32_t i = 0; i < rank; i++) { + if (i < axis) + inner *= shape.Dims(i); + else if (i > axis) + outer *= shape.Dims(i); + else + depth = shape.Dims(i); + } + + for (size_t outer_index = 0; outer_index < outer; outer_index++) { + size_t outer_index_adj; + if (reverse) + outer_index_adj = (outer - 1) - outer_index; + else + outer_index_adj = outer_index; + for (size_t inner_index = 0; inner_index < inner; inner_index++) { + int32_t accumulator = params.input1_offset; // accumulator = 0 + accumulator *= (1 << params.left_shift); + accumulator = MultiplyByQuantizedMultiplierSmallerThanOneExp( + accumulator, params.input1_multiplier, params.input1_shift); + + size_t inner_index_adj; + if (reverse) + inner_index_adj = (inner - 1) - inner_index; + else + inner_index_adj = inner_index; + + for (size_t depth_index = 0; depth_index < depth; depth_index++) { + size_t depth_index_adj; + if (reverse) + depth_index_adj = (depth - 1) - depth_index; + else + depth_index_adj = depth_index; + + size_t index = outer_index_adj; + index += inner_index_adj * depth * outer; + index += depth_index_adj * outer; + + const int32_t y = params.input1_offset + input_data[index]; + const int32_t shifted_y = y * (1 << params.left_shift); + const int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_y, params.input1_multiplier, params.input1_shift); + + int32_t scaled_output; + if (exclusive) { + scaled_output = accumulator; + accumulator += scaled_y; + } else { + accumulator += scaled_y; + scaled_output = accumulator; + } + + const int32_t raw_output = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + scaled_output, params.output_multiplier, params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + output_data[index] = static_cast(clamped_output); + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/depth_to_space.h b/src/tensorflow/lite/kernels/internal/reference/depth_to_space.h new file mode 100644 index 0000000..23cff28 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/depth_to_space.h @@ -0,0 +1,79 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_ + +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params, + const RuntimeShape& unextended_input_shape, + const T* input_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + const int input_depth = input_shape.Dims(3); + const int input_width = input_shape.Dims(2); + const int input_height = input_shape.Dims(1); + const int input_batch = input_shape.Dims(0); + + const int output_depth = output_shape.Dims(3); + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_batch = output_shape.Dims(0); + + const int32_t block_size = op_params.block_size; + + TFLITE_DCHECK_EQ(input_width * block_size, output_width); + TFLITE_DCHECK_EQ(input_height * block_size, output_height); + TFLITE_DCHECK_EQ(input_depth, output_depth * block_size * block_size); + TFLITE_DCHECK_EQ(input_batch, output_batch); + + for (int out_b = 0; out_b < output_batch; ++out_b) { + for (int out_h = 0; out_h < output_height; ++out_h) { + for (int out_w = 0; out_w < output_width; ++out_w) { + for (int out_d = 0; out_d < output_depth; ++out_d) { + const int in_d = + out_d + ((out_h % block_size) * block_size + out_w % block_size) * + output_depth; + + const int in_w = out_w / block_size; + const int in_h = out_h / block_size; + const int in_b = out_b; + + const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d); + const int output_index = + Offset(output_shape, out_b, out_h, out_w, out_d); + + output_data[output_index] = input_data[input_index]; + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h b/src/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h index 20bf83d..bb51a39 100644 --- a/src/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +++ b/src/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h @@ -17,7 +17,7 @@ limitations under the License. #include -#include "fixedpoint/fixedpoint.h" +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/types.h" @@ -68,6 +68,27 @@ inline int32_t DepthwiseConvRound(int32_t x, int32_t quantized_multiplier, return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); } +// Single-rounding MultiplyByQuantizedMultiplier +#if TFLITE_SINGLE_ROUNDING +template <> +inline int32_t DepthwiseConvRound( + int32_t x, int32_t quantized_multiplier, int shift) { + using gemmlowp::RoundingDivideByPOT; + using gemmlowp::SaturatingRoundingDoublingHighMul; + int left_shift = shift > 0 ? shift : 0; + int right_shift = shift > 0 ? 0 : -shift; + return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul( + x * (1 << left_shift), quantized_multiplier), + right_shift); +} + +template <> +inline int32_t DepthwiseConvRound( + int32_t x, int32_t quantized_multiplier, int shift) { + return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); +} +// Double-rounding MultiplyByQuantizedMultiplier +#else template <> inline int32_t DepthwiseConvRound( int32_t x, int32_t quantized_multiplier, int shift) { @@ -86,6 +107,7 @@ inline int32_t DepthwiseConvRound( rounding_offset) >> right_shift; } +#endif // TFLITE_SINGLE_ROUNDING template struct DepthwiseConvBasicKernel { diff --git a/src/tensorflow/lite/kernels/internal/reference/div.h b/src/tensorflow/lite/kernels/internal/reference/div.h new file mode 100644 index 0000000..df8da1b --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/div.h @@ -0,0 +1,247 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_ + +#include + +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { + +namespace reference_ops { + +template +inline void DivCheckArithmeticParams(const ArithmeticParams& params) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + // Input offset is negative input zero point. Activation tensors are + // asymmetric quantized so they span the full int8 range. + constexpr int32_t max_value = + static_cast(std::numeric_limits::max()); + TFLITE_DCHECK_GE(params.input1_offset, -max_value); + TFLITE_DCHECK_LE(params.input1_offset, max_value); + TFLITE_DCHECK_GE(params.input2_offset, -max_value); + TFLITE_DCHECK_LE(params.input2_offset, max_value); + TFLITE_DCHECK_GE(params.output_offset, -max_value); + TFLITE_DCHECK_LE(params.output_offset, max_value); +} + +// Element-wise div that can often be used for inner loop of broadcast Div as +// well as the non-broadcast Div. +template +inline void DivElementwise(int size, const ArithmeticParams& params, + const T* input1_data, const T* input2_data, + T* output_data) { + DivCheckArithmeticParams(params); + + for (int i = 0; i < size; ++i) { + int32_t input1_val = params.input1_offset + input1_data[i]; + int32_t input2_val = params.input2_offset + input2_data[i]; + TFLITE_DCHECK_NE(input2_val, 0); + if (input2_val < 0) { + // Invert signs to avoid a negative input2_val as input2_inv needs to be + // positive to be used as multiplier of MultiplyByQuantizedMultiplier. + input1_val = -input1_val; + input2_val = -input2_val; + } + int recip_shift; + const int32_t input2_inv = GetReciprocal(input2_val, 31, &recip_shift); + const int headroom = CountLeadingSignBits(input1_val); + const int32_t unscaled_quotient = + MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv, + headroom); + const int total_shift = params.output_shift - recip_shift - headroom; + const int32_t unclamped_result = + params.output_offset + + MultiplyByQuantizedMultiplierSmallerThanOneExp( + unscaled_quotient, params.output_multiplier, total_shift); + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, unclamped_result)); + output_data[i] = static_cast(clamped_output); + } +} + +inline void Div(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const uint8_t* input1_data, + const RuntimeShape& input2_shape, const uint8_t* input2_data, + const RuntimeShape& output_shape, uint8_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + + DivElementwise(flat_size, params, input1_data, input2_data, output_data); +} + +inline void Div(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const int8_t* input1_data, + const RuntimeShape& input2_shape, const int8_t* input2_data, + const RuntimeShape& output_shape, int8_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + + DivElementwise(flat_size, params, input1_data, input2_data, output_data); +} + +template +inline void BroadcastDivSlowQuantized( + const ArithmeticParams& params, const RuntimeShape& unextended_input1_shape, + const T* input1_data, const RuntimeShape& unextended_input2_shape, + const T* input2_data, const RuntimeShape& unextended_output_shape, + T* output_data) { + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N); + + NdArrayDesc desc1; + NdArrayDesc desc2; + NdArrayDesc output_desc; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, + unextended_input2_shape, &desc1, &desc2); + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), + &output_desc); + + DivCheckArithmeticParams(params); + + auto div_func = [&](int indexes[N]) { + int32_t input1_val = + params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; + int32_t input2_val = + params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; + TFLITE_DCHECK_NE(input2_val, 0); + if (input2_val < 0) { + // Invert signs to avoid a negative input2_val as input2_inv needs to be + // positive to be used as multiplier of MultiplyByQuantizedMultiplier. + input1_val = -input1_val; + input2_val = -input2_val; + } + int recip_shift; + const int32_t input2_inv = GetReciprocal(input2_val, 31, &recip_shift); + const int headroom = CountLeadingSignBits(input1_val); + const int32_t unscaled_quotient = + MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv, + headroom); + const int total_shift = params.output_shift - recip_shift - headroom; + const int32_t unclamped_result = + params.output_offset + + MultiplyByQuantizedMultiplierSmallerThanOneExp( + unscaled_quotient, params.output_multiplier, total_shift); + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, unclamped_result)); + output_data[SubscriptToIndex(output_desc, indexes)] = + static_cast(clamped_output); + }; + NDOpsHelper(output_desc, div_func); +} + +template +inline void BroadcastDivSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const uint8_t* input1_data, + const RuntimeShape& unextended_input2_shape, + const uint8_t* input2_data, + const RuntimeShape& unextended_output_shape, + uint8_t* output_data) { + BroadcastDivSlowQuantized( + params, unextended_input1_shape, input1_data, unextended_input2_shape, + input2_data, unextended_output_shape, output_data); +} + +template +inline void BroadcastDivSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const int8_t* input1_data, + const RuntimeShape& unextended_input2_shape, + const int8_t* input2_data, + const RuntimeShape& unextended_output_shape, + int8_t* output_data) { + BroadcastDivSlowQuantized( + params, unextended_input1_shape, input1_data, unextended_input2_shape, + input2_data, unextended_output_shape, output_data); +} + +// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary +// dimensionality if the runtime code does a single loop over one dimension +// that handles broadcasting as the base case. The code generator would then +// generate max(D1, D2) nested for loops. +template +void BroadcastDivSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const T* input1_data, + const RuntimeShape& unextended_input2_shape, + const T* input2_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + T output_activation_min; + T output_activation_max; + GetActivationParams(params, &output_activation_min, &output_activation_max); + + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N); + + NdArrayDesc desc1; + NdArrayDesc desc2; + NdArrayDesc output_desc; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, + unextended_input2_shape, &desc1, &desc2); + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), + &output_desc); + + // In Tensorflow, the dimensions are canonically named (batch_number, row, + // col, channel), with extents (batches, height, width, depth), with the + // trailing dimension changing most rapidly (channels has the smallest + // stride, typically 1 element). + // + // In generated C code, we store arrays with the dimensions reversed. The + // first dimension has smallest stride. + + auto div_func = [&](int indexes[N]) { + output_data[SubscriptToIndex(output_desc, indexes)] = + ActivationFunctionWithMinMax( + input1_data[SubscriptToIndex(desc1, indexes)] / + input2_data[SubscriptToIndex(desc2, indexes)], + output_activation_min, output_activation_max); + }; + NDOpsHelper(output_desc, div_func); +} + +template +inline void Div(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const T* input1_data, + const RuntimeShape& input2_shape, const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { + T output_activation_min; + T output_activation_max; + GetActivationParams(params, &output_activation_min, &output_activation_max); + + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + output_data[i] = ActivationFunctionWithMinMax( + input1_data[i] / input2_data[i], output_activation_min, + output_activation_max); + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/elu.h b/src/tensorflow/lite/kernels/internal/reference/elu.h new file mode 100644 index 0000000..3dc9358 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/elu.h @@ -0,0 +1,37 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_ + +#include "tensorflow/lite/kernels/internal/cppmath.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +namespace reference_ops { + +inline void Elu(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const float val = input_data[i]; + output_data[i] = val < 0.0f ? TfLiteExpm1(val) : val; + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/exp.h b/src/tensorflow/lite/kernels/internal/reference/exp.h new file mode 100644 index 0000000..9b33bf1 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/exp.h @@ -0,0 +1,38 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_ + +#include + +#include "third_party/ruy/ruy/profiler/instrumentation.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +inline void Exp(const T* input_data, const size_t num_elements, + T* output_data) { + ruy::profiler::ScopeLabel label("Exp"); + for (size_t idx = 0; idx < num_elements; ++idx) { + output_data[idx] = std::exp(input_data[idx]); + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/fill.h b/src/tensorflow/lite/kernels/internal/reference/fill.h new file mode 100644 index 0000000..16630e6 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/fill.h @@ -0,0 +1,38 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_ + +#include + +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +void Fill(const RuntimeShape& value_shape, const T* value_data, + const RuntimeShape& output_shape, T* output_data) { + TFLITE_DCHECK_EQ(value_shape.DimensionsCount(), 0); + const int flat_size = output_shape.FlatSize(); + for (int i = 0; i < flat_size; ++i) { + output_data[i] = *value_data; + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/floor_div.h b/src/tensorflow/lite/kernels/internal/reference/floor_div.h new file mode 100644 index 0000000..e75d473 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/floor_div.h @@ -0,0 +1,35 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_ + +#include +#include + +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +T FloorDiv(T input1, T input2) { + return std::floor(std::divides()(static_cast(input1), + static_cast(input2))); +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/floor_mod.h b/src/tensorflow/lite/kernels/internal/reference/floor_mod.h new file mode 100644 index 0000000..20ce18b --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/floor_mod.h @@ -0,0 +1,44 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_ + +#include +#include + +namespace tflite { + +namespace reference_ops { + +template +T FloorMod(T input1, T input2) { + struct FloatMod { + float operator()(const float lhs, const float rhs) const { + return std::fmod(lhs, rhs); + } + }; + using ModFunc = typename std::conditional::value, + std::modulus, FloatMod>::type; + ModFunc mod_func; + T trunc_mod = mod_func(input1, input2); + return (trunc_mod != 0) && ((input2 < 0) != (trunc_mod < 0)) + ? (trunc_mod + input2) + : trunc_mod; +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/fully_connected.h b/src/tensorflow/lite/kernels/internal/reference/fully_connected.h index d5ad9d6..cd67828 100644 --- a/src/tensorflow/lite/kernels/internal/reference/fully_connected.h +++ b/src/tensorflow/lite/kernels/internal/reference/fully_connected.h @@ -15,6 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_ +#include + +#include "third_party/ruy/ruy/profiler/instrumentation.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" diff --git a/src/tensorflow/lite/kernels/internal/reference/hard_swish.h b/src/tensorflow/lite/kernels/internal/reference/hard_swish.h index cda1b5c..40504f9 100644 --- a/src/tensorflow/lite/kernels/internal/reference/hard_swish.h +++ b/src/tensorflow/lite/kernels/internal/reference/hard_swish.h @@ -12,10 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ -#include "ruy/profiler/instrumentation.h" // from @ruy +#include + +#include "third_party/ruy/ruy/profiler/instrumentation.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/types.h" @@ -23,9 +25,9 @@ namespace tflite { namespace reference_ops { inline int16_t SaturatingLeftShift(int16_t value, int amount) { - int32_t result = static_cast(value) * (1 << amount); - result = std::min(result, std::numeric_limits::max()); - result = std::max(result, std::numeric_limits::min()); + int64_t result = static_cast(value) * (1 << amount); + result = std::min(result, std::numeric_limits::max()); + result = std::max(result, std::numeric_limits::min()); return result; } @@ -163,4 +165,4 @@ inline void HardSwish(const HardSwishParams& params, } // namespace reference_ops } // namespace tflite -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/add.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/add.h index 10bee90..c2a0e0f 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/add.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ +#include +#include #include #include "tensorflow/lite/kernels/internal/common.h" @@ -34,30 +36,102 @@ inline void CheckArithmeticParams(const ArithmeticParams& params) { TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits::max()); } -inline void ElementWise( - int size, const ArithmeticParams& params, const int8_t* input1_data, - const int8_t* input2_data, int8_t* output_data, - void (*check_arithmetic_params)(const ArithmeticParams&), - int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) { +// TODO: b/270589088 - move to a more appropriate file (b/270589088#comment2) +template +void BroadcastInput1(int size, const ArithmeticParams& params, + const T* input1_data, const T* input2_data, T* output_data, + void (*check_arithmetic_params)(const ArithmeticParams&), + T (*binary_func)(T, T, const ArithmeticParams&)) { + CheckArithmeticParams(params); + for (int i = 0; i < size; ++i) { + output_data[i] = binary_func(input1_data[0], input2_data[i], params); + } +} + +template +void BroadcastInput2(int size, const ArithmeticParams& params, + const T* input1_data, const T* input2_data, T* output_data, + void (*check_arithmetic_params)(const ArithmeticParams&), + T (*binary_func)(T, T, const ArithmeticParams&)) { + CheckArithmeticParams(params); + for (int i = 0; i < size; ++i) { + output_data[i] = binary_func(input1_data[i], input2_data[0], params); + } +} + +// TODO: b/270589088 - move to a more appropriate file (b/270589088#comment2) +template +void ElementWise(int size, const ArithmeticParams& params, const T* input1_data, + const T* input2_data, T* output_data, + void (*check_arithmetic_params)(const ArithmeticParams&), + T (*binary_func)(T, T, const ArithmeticParams&)) { CheckArithmeticParams(params); for (int i = 0; i < size; ++i) { output_data[i] = binary_func(input1_data[i], input2_data[i], params); } } -inline void BroadcastBinaryFunction4DSlow( +template +inline void BroadcastAddRecursiveDimensions( + const ArithmeticParams& params, int dimension, size_t* input1_offset_p, + size_t* input2_offset_p, size_t* output_offset, + size_t* compressed_input1_stride, size_t* compressed_input2_stride, + size_t* compressed_output_shape, const T* input1_data, const T* input2_data, + T* output_data, void (*check_arithmetic_params)(const ArithmeticParams&), + T (*binary_func)(T, T, const ArithmeticParams&)) { + if (dimension > 0) { + for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) { + size_t input1_offset_c = *input1_offset_p; + size_t input2_offset_c = *input2_offset_p; + BroadcastAddRecursiveDimensions( + params, dimension - 1, &input1_offset_c, &input2_offset_c, + output_offset, compressed_input1_stride, compressed_input2_stride, + compressed_output_shape, input1_data, input2_data, output_data, + check_arithmetic_params, binary_func); + *input1_offset_p += compressed_input1_stride[dimension]; + *input2_offset_p += compressed_input2_stride[dimension]; + } + } else { + TFLITE_DCHECK(dimension == 0); + bool input1_is_broadcast = compressed_input1_stride[dimension] == 0; + bool input2_is_broadcast = compressed_input2_stride[dimension] == 0; + TFLITE_DCHECK(!(input1_is_broadcast && input2_is_broadcast)); + const T* input1_data_ptr = input1_data + *input1_offset_p; + const T* input2_data_ptr = input2_data + *input2_offset_p; + T* output_data_ptr = output_data + *output_offset; + if (input1_is_broadcast) { + // input1 is broadcast. + BroadcastInput1(compressed_output_shape[dimension], params, + input1_data_ptr, input2_data_ptr, output_data_ptr, + check_arithmetic_params, binary_func); + *input2_offset_p += compressed_output_shape[dimension]; + } else if (input2_is_broadcast) { + // input2 is broadcast. + BroadcastInput2(compressed_output_shape[dimension], params, + input1_data_ptr, input2_data_ptr, output_data_ptr, + check_arithmetic_params, binary_func); + *input1_offset_p += compressed_output_shape[dimension]; + } else { + // Add element-wise. + ElementWise(compressed_output_shape[dimension], params, + input1_data_ptr, input2_data_ptr, output_data_ptr, + check_arithmetic_params, binary_func); + *input1_offset_p += compressed_output_shape[dimension]; + *input2_offset_p += compressed_output_shape[dimension]; + } + *output_offset += compressed_output_shape[dimension]; + } +} + +// TODO: b/270589088 - move to a more appropriate file. (b/270589088#comment2) +template +void BroadcastBinaryFunction6DSlow( const ArithmeticParams& params, const RuntimeShape& input1_shape, - const int8_t* input1_data, const RuntimeShape& input2_shape, - const int8_t* input2_data, const RuntimeShape& output_shape, - int8_t* output_data, + const T* input1_data, const RuntimeShape& input2_shape, + const T* input2_data, const RuntimeShape& output_shape, T* output_data, void (*check_arithmetic_params)(const ArithmeticParams&), - int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); + T (*binary_func)(T, T, const ArithmeticParams&)) { + constexpr int kMaxBroadcastDim = 6; // In Tensorflow, the dimensions are canonically named (batch_number, row, // col, channel), with extents (batches, height, width, depth), with the @@ -70,17 +144,37 @@ inline void BroadcastBinaryFunction4DSlow( // We name our variables by their Tensorflow convention, but generate C code // nesting loops such that the innermost loop has the smallest stride for the // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = binary_func( - input1_data[SubscriptToIndex(desc1, b, y, x, c)], - input2_data[SubscriptToIndex(desc2, b, y, x, c)], params); - } - } - } + size_t compressed_input1_stride[kMaxBroadcastDim]; + size_t compressed_input2_stride[kMaxBroadcastDim]; + size_t compressed_output_shape[kMaxBroadcastDim]; + bool broadcastable_shape = ReduceDimensionsForBroadcast( + input1_shape, input2_shape, compressed_input1_stride, + compressed_input2_stride, compressed_output_shape); + // Skip broadcasting for degenerate shapes. + if (!broadcastable_shape) { + return; } + + size_t input1_offset = 0; + size_t input2_offset = 0; + size_t output_offset = 0; + BroadcastAddRecursiveDimensions( + params, kMaxBroadcastDim - 1, &input1_offset, &input2_offset, + &output_offset, compressed_input1_stride, compressed_input2_stride, + compressed_output_shape, input1_data, input2_data, output_data, + check_arithmetic_params, binary_func); +} + +template +void BroadcastBinaryFunction4DSlow( + const ArithmeticParams& params, const RuntimeShape& input1_shape, + const T* input1_data, const RuntimeShape& input2_shape, + const T* input2_data, const RuntimeShape& output_shape, T* output_data, + void (*check_arithmetic_params)(const ArithmeticParams&), + T (*binary_func)(T, T, const ArithmeticParams&)) { + BroadcastBinaryFunction6DSlow(params, input1_shape, input1_data, input2_shape, + input2_data, output_shape, output_data, + check_arithmetic_params, binary_func); } inline int8_t AddFunc(int8_t x, int8_t y, const ArithmeticParams& params) { @@ -126,6 +220,18 @@ inline void Add(const ArithmeticParams& params, AddElementwise(flat_size, params, input1_data, input2_data, output_data); } +inline void BroadcastAdd6DSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const int8_t* input1_data, + const RuntimeShape& input2_shape, + const int8_t* input2_data, + const RuntimeShape& output_shape, + int8_t* output_data) { + BroadcastBinaryFunction6DSlow(params, input1_shape, input1_data, input2_shape, + input2_data, output_shape, output_data, + CheckArithmeticParams, AddFunc); +} + inline void BroadcastAdd4DSlow(const ArithmeticParams& params, const RuntimeShape& input1_shape, const int8_t* input1_data, @@ -133,7 +239,7 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params, const int8_t* input2_data, const RuntimeShape& output_shape, int8_t* output_data) { - BroadcastBinaryFunction4DSlow(params, input1_shape, input1_data, input2_shape, + BroadcastBinaryFunction6DSlow(params, input1_shape, input1_data, input2_shape, input2_data, output_shape, output_data, CheckArithmeticParams, AddFunc); } diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h index 3a4164d..eac0057 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_ +#include + #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { @@ -48,7 +50,7 @@ inline void ConvPerChannel( TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -59,6 +61,12 @@ inline void ConvPerChannel( const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_NE(groups, 0); + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; + TFLITE_DCHECK_NE(filters_per_group, 0); const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); for (int batch = 0; batch < batches; ++batch) { @@ -67,6 +75,7 @@ inline void ConvPerChannel( for (int out_x = 0; out_x < output_width; ++out_x) { const int in_x_origin = (out_x * stride_width) - pad_width; for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + auto group = out_channel / filters_per_group; int32_t acc = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + dilation_height_factor * filter_y; @@ -82,9 +91,11 @@ inline void ConvPerChannel( continue; } - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { + int32_t input_val = + input_data[Offset(input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; int32_t filter_val = filter_data[Offset( filter_shape, out_channel, filter_y, filter_x, in_channel)]; // Accumulate with 32 bits accumulator. @@ -124,14 +135,16 @@ inline void ConvPerChannel( } } + // Fixed-point per-channel-quantization convolution reference kernel. // 16-bit data and 8-bit filter +template inline void ConvPerChannel( const ConvParams& params, const int32_t* output_multiplier, const int32_t* output_shift, const RuntimeShape& input_shape, const int16_t* input_data, const RuntimeShape& filter_shape, const int8_t* filter_data, const RuntimeShape& bias_shape, - const std::int64_t* bias_data, const RuntimeShape& output_shape, + const AccumScalar* bias_data, const RuntimeShape& output_shape, int16_t* output_data) { // Get parameters. const int stride_width = params.stride_width; @@ -151,7 +164,7 @@ inline void ConvPerChannel( TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -162,6 +175,10 @@ inline void ConvPerChannel( const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); for (int batch = 0; batch < batches; ++batch) { @@ -170,7 +187,8 @@ inline void ConvPerChannel( for (int out_x = 0; out_x < output_width; ++out_x) { const int in_x_origin = (out_x * stride_width) - pad_width; for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - std::int64_t acc = 0; + auto group = out_channel / filters_per_group; + AccumScalar acc = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + dilation_height_factor * filter_y; for (int filter_x = 0; filter_x < filter_width; ++filter_x) { @@ -185,9 +203,11 @@ inline void ConvPerChannel( continue; } - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { + int32_t input_val = + input_data[Offset(input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; int32_t filter_val = filter_data[Offset( filter_shape, out_channel, filter_y, filter_x, in_channel)]; // Accumulate with 64 bits accumulator. diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h index f0ca09c..7676fce 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_ +#include + #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h index 2bc3e79..3a74402 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h @@ -15,22 +15,30 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ +#include + #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { namespace reference_integer_ops { -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const int8_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int8_t* output_data) { +// For per-channel functions, since it is defined in quantization spec that +// weights are symmetric +// (https://www.tensorflow.org/lite/performance/quantization_spec#symmetric_vs_asymmetric), +// zero_point (params.weights_offset) is always 0. +// However, for per-tensor functions, params.weights_offset is still applied for +// backward compatibility. +template +void FullyConnectedPerChannel( + const FullyConnectedParams& params, const int32_t* output_multiplier, + const int* output_shift, const RuntimeShape& input_shape, + const InputType* input_data, const RuntimeShape& filter_shape, + const WeightType* filter_data, const RuntimeShape& bias_shape, + const BiasType* bias_data, const RuntimeShape& output_shape, + OutputType* output_data) { const int32_t input_offset = params.input_offset; - const int32_t filter_offset = params.weights_offset; const int32_t output_offset = params.output_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; const int32_t output_activation_min = params.quantized_activation_min; const int32_t output_activation_max = params.quantized_activation_max; TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); @@ -44,60 +52,70 @@ inline void FullyConnected( const int accum_depth = filter_shape.Dims(filter_dim_count - 1); for (int b = 0; b < batches; ++b) { for (int out_c = 0; out_c < output_depth; ++out_c) { - int32_t acc = 0; + BiasType acc = 0; for (int d = 0; d < accum_depth; ++d) { int32_t input_val = input_data[b * accum_depth + d]; int32_t filter_val = filter_data[out_c * accum_depth + d]; - acc += (filter_val + filter_offset) * (input_val + input_offset); + acc += filter_val * (input_val + input_offset); } if (bias_data) { acc += bias_data[out_c]; } - acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc); + int32_t acc_scaled = MultiplyByQuantizedMultiplier( + acc, output_multiplier[out_c], output_shift[out_c]); + acc_scaled += output_offset; + acc_scaled = std::max(acc_scaled, output_activation_min); + acc_scaled = std::min(acc_scaled, output_activation_max); + output_data[out_c + output_depth * b] = + static_cast(acc_scaled); } } } -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const int16_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int64_t* bias_data, const RuntimeShape& output_shape, - int16_t* output_data) { +template +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, + const InputType* input_data, + const RuntimeShape& filter_shape, + const WeightType* filter_data, + const RuntimeShape& bias_shape, const BiasType* bias_data, + const RuntimeShape& output_shape, OutputType* output_data) { + const int32_t input_offset = params.input_offset; const int32_t filter_offset = params.weights_offset; + const int32_t output_offset = params.output_offset; const int32_t output_multiplier = params.output_multiplier; const int output_shift = params.output_shift; const int32_t output_activation_min = params.quantized_activation_min; const int32_t output_activation_max = params.quantized_activation_max; TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); + TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); TFLITE_DCHECK_LE(output_activation_min, output_activation_max); const int filter_dim_count = filter_shape.DimensionsCount(); - const int batches = output_shape.Dims(0); - const int output_depth = output_shape.Dims(1); + const int output_dim_count = output_shape.DimensionsCount(); + const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); + const int output_depth = output_shape.Dims(output_dim_count - 1); TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2)); const int accum_depth = filter_shape.Dims(filter_dim_count - 1); for (int b = 0; b < batches; ++b) { for (int out_c = 0; out_c < output_depth; ++out_c) { - int64_t acc = 0; + BiasType acc = 0; for (int d = 0; d < accum_depth; ++d) { int32_t input_val = input_data[b * accum_depth + d]; int32_t filter_val = filter_data[out_c * accum_depth + d]; - acc += (filter_val + filter_offset) * input_val; + acc += (filter_val + filter_offset) * (input_val + input_offset); } if (bias_data) { acc += bias_data[out_c]; } int32_t acc_scaled = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); + acc_scaled += output_offset; acc_scaled = std::max(acc_scaled, output_activation_min); acc_scaled = std::min(acc_scaled, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc_scaled); + output_data[out_c + output_depth * b] = + static_cast(acc_scaled); } } } diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h index 31f2de9..164a836 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ +#include + #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h index d1a15bd..16eff13 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h @@ -15,7 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_ +#include #include + #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { @@ -58,23 +60,36 @@ inline void Logistic(int32_t input_zero_point, int32_t input_range_radius, } } -inline void Logistic(int32_t input_multiplier, int32_t input_size, - const int16_t* ptr_input_data, int16_t* ptr_output_data) { +inline void Logistic(int32_t input_multiplier, int32_t input_left_shift, + int32_t input_size, const int16_t* ptr_input_data, + int16_t* ptr_output_data) { // We use the LUT for sigmoid and take into account, that // tanh(x) = 2*sigmoid(2*x) - 1 - int32_t input_data_mul = (input_multiplier > 0) ? input_multiplier : 1; + // We scale by 3/4 to expand range [-8,8]->[-10.7,10.7]. + // In case of general parameter scale, multiplier 3 is taken into account + // in TanhPrepare function and it is included in + // input_multiplier already. + + TFLITE_DCHECK_GE(input_left_shift, 0); + if (input_multiplier == 0) { // power of two case + input_multiplier = 3 << input_left_shift; + input_left_shift = 0; + } + + int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0; for (int i = 0; i < input_size; ++i, ptr_input_data++, ptr_output_data++) { - int32_t input_data = (*ptr_input_data) * input_data_mul; + int32_t input_data = + ((*ptr_input_data) * input_multiplier + round) >> input_left_shift; - // Scale by 3/4 to expand range [-8,8]->[-10.7,10.7] and - // we do interpolation on unsigned values. - uint32_t abs_input_data = 3 * abs(input_data); + // We do interpolation on unsigned values. + uint32_t abs_input_data = abs(input_data); // We divide by 2 power of 9, because // we need to divide by 2 in power of 7 for // the input conversion + 1/4 from the scale above. + // Define uh as uint32_t type not to make this function overflow. uint32_t uh = abs_input_data >> 9; uint32_t result; diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h index bd48427..7e3f690 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h @@ -1,10 +1,10 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -15,63 +15,4 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -template -inline void Mean(const tflite::MeanParams& op_params, int32_t multiplier, - int32_t shift, const RuntimeShape& unextended_input_shape, - const integer_type* input_data, int32_t input_zero_point, - const RuntimeShape& unextended_output_shape, - integer_type* output_data, int32_t output_zero_point) { - // Current implementation only supports dimension equals 4 and simultaneous - // reduction over width and height. - TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4); - TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape input_shape = - RuntimeShape::ExtendedShape(4, unextended_input_shape); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - const int output_batch = output_shape.Dims(0); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int output_depth = output_shape.Dims(3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int num_elements_in_axis = input_width * input_height; - - TFLITE_CHECK_EQ(op_params.axis_count, 2); - TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - TFLITE_CHECK_EQ(output_height, 1); - TFLITE_CHECK_EQ(output_width, 1); - - static constexpr int32_t kMinInt = std::numeric_limits::min(); - static constexpr int32_t kMaxInt = std::numeric_limits::max(); - - for (int out_b = 0; out_b < output_batch; ++out_b) { - for (int out_d = 0; out_d < output_depth; ++out_d) { - int32_t acc = 0; - for (int in_h = 0; in_h < input_height; ++in_h) { - for (int in_w = 0; in_w < input_width; ++in_w) { - acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)] - - input_zero_point; - } - } - acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift); - acc = acc > 0 ? (acc + num_elements_in_axis / 2) / num_elements_in_axis - : (acc - num_elements_in_axis / 2) / num_elements_in_axis; - acc += output_zero_point; - acc = std::min(std::max(acc, kMinInt), kMaxInt); - output_data[Offset(output_shape, out_b, 0, 0, out_d)] = - static_cast(acc); - } - } -} - -} // namespace reference_integer_ops -} // namespace tflite - #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h index b80838a..3de3361 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h @@ -15,17 +15,22 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_ -#include "fixedpoint/fixedpoint.h" -#include "ruy/profiler/instrumentation.h" // from @ruy +#include + +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" +#include "third_party/ruy/ruy/profiler/instrumentation.h" #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { namespace reference_integer_ops { -template -inline void MulElementwise(int size, const ArithmeticParams& params, - const T* input1_data, const T* input2_data, - T* output_data) { +// Maximum dimension supported by the broadcast mul operation. +constexpr int kMaxMulBroadcastDim = 6; + +template +void MulElementwise(int size, const ArithmeticParams& params, + const InputType* input1_data, const InputType* input2_data, + OutputType* output_data) { for (int i = 0; i < size; ++i) { const int32_t input1_val = params.input1_offset + input1_data[i]; const int32_t input2_val = params.input2_offset + input2_data[i]; @@ -37,7 +42,7 @@ inline void MulElementwise(int size, const ArithmeticParams& params, const int32_t clamped_output = std::min(params.quantized_activation_max, std::max(params.quantized_activation_min, unclamped_result)); - output_data[i] = static_cast(clamped_output); + output_data[i] = static_cast(clamped_output); } } @@ -86,46 +91,104 @@ inline void Mul(const ArithmeticParams& params, } template -inline void BroadcastMul4DSlow( +inline void BroadcastMul6DSlow( const ArithmeticParams& params, const RuntimeShape& input1_shape, const T* input1_data, const RuntimeShape& input2_shape, const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("BroadcastMul4DSlow"); + ruy::profiler::ScopeLabel label("BroadcastMul6DSlow"); - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; + NdArrayDesc desc1; + NdArrayDesc desc2; // The input shapes are extended as part of NdArrayDesc initialization. NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, &desc2); const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - const int32_t input1_val = - params.input1_offset + - input1_data[SubscriptToIndex(desc1, b, y, x, c)]; - const int32_t input2_val = - params.input2_offset + - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - const int32_t unclamped_result = - params.output_offset + - MultiplyByQuantizedMultiplier(input1_val * input2_val, - params.output_multiplier, - params.output_shift); - const int32_t clamped_output = std::min( - params.quantized_activation_max, - std::max(params.quantized_activation_min, unclamped_result)); - output_data[Offset(extended_output_shape, b, y, x, c)] = - static_cast(clamped_output); + RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, output_shape); + // Cache output shape dimensions. + int32_t extended_output_shape_dims[kMaxMulBroadcastDim]; + std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(), + sizeof(extended_output_shape_dims)); + + size_t input1_offset_a = 0; + size_t input2_offset_a = 0; + size_t output_offset_a = 0; + for (int a = 0; a < extended_output_shape_dims[0]; ++a) { + size_t input1_offset_d = input1_offset_a; + size_t input2_offset_d = input2_offset_a; + size_t output_offset_d = output_offset_a; + for (int d = 0; d < extended_output_shape_dims[1]; ++d) { + size_t input1_offset_b = input1_offset_d; + size_t input2_offset_b = input2_offset_d; + size_t output_offset_b = output_offset_d; + for (int b = 0; b < extended_output_shape_dims[2]; ++b) { + size_t input1_offset_y = input1_offset_b; + size_t input2_offset_y = input2_offset_b; + size_t output_offset_y = output_offset_b; + for (int y = 0; y < extended_output_shape_dims[3]; ++y) { + size_t input1_offset_x = input1_offset_y; + size_t input2_offset_x = input2_offset_y; + size_t output_offset_x = output_offset_y; + for (int x = 0; x < extended_output_shape_dims[4]; ++x) { + size_t input1_offset_c = input1_offset_x; + size_t input2_offset_c = input2_offset_x; + size_t output_offset_c = output_offset_x; + for (int c = 0; c < extended_output_shape_dims[5]; ++c) { + const int32_t input1_val = + params.input1_offset + input1_data[input1_offset_c]; + const int32_t input2_val = + params.input2_offset + input2_data[input2_offset_c]; + const int32_t unclamped_result = + params.output_offset + + MultiplyByQuantizedMultiplier(input1_val * input2_val, + params.output_multiplier, + params.output_shift); + const int32_t clamped_output = std::min( + params.quantized_activation_max, + std::max(params.quantized_activation_min, unclamped_result)); + output_data[output_offset_c] = static_cast(clamped_output); + input1_offset_c += desc1.strides[5]; + input2_offset_c += desc2.strides[5]; + ++output_offset_c; + } + input1_offset_x += desc1.strides[4]; + input2_offset_x += desc2.strides[4]; + output_offset_x += extended_output_shape_dims[5]; + } + input1_offset_y += desc1.strides[3]; + input2_offset_y += desc2.strides[3]; + output_offset_y += + extended_output_shape_dims[4] * extended_output_shape_dims[5]; } + input1_offset_b += desc1.strides[2]; + input2_offset_b += desc2.strides[2]; + output_offset_b += extended_output_shape_dims[3] * + extended_output_shape_dims[4] * + extended_output_shape_dims[5]; } + input1_offset_d += desc1.strides[1]; + input2_offset_d += desc2.strides[1]; + output_offset_d += + extended_output_shape_dims[2] * extended_output_shape_dims[3] * + extended_output_shape_dims[4] * extended_output_shape_dims[5]; } + input1_offset_a += desc1.strides[0]; + input2_offset_a += desc2.strides[0]; + output_offset_a += + extended_output_shape_dims[1] * extended_output_shape_dims[2] * + extended_output_shape_dims[3] * extended_output_shape_dims[4] * + extended_output_shape_dims[5]; } } +template +inline void BroadcastMul4DSlow( + const ArithmeticParams& params, const RuntimeShape& input1_shape, + const T* input1_data, const RuntimeShape& input2_shape, + const T* input2_data, const RuntimeShape& output_shape, T* output_data) { + BroadcastMul6DSlow(params, input1_shape, input1_data, input2_shape, + input2_data, output_shape, output_data); +} + } // namespace reference_integer_ops } // namespace tflite #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h index 17944bc..4dc31d9 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h @@ -15,13 +15,15 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_ +#include #include + #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { namespace reference_integer_ops { -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int8_t* input_data, const RuntimeShape& output_shape, int8_t* output_data) { @@ -66,6 +68,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; // Round to the closest integer value. acc = acc > 0 ? (acc + filter_count / 2) / filter_count : (acc - filter_count / 2) / filter_count; @@ -77,6 +80,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, @@ -136,7 +140,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, } } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int16_t* input_data, const RuntimeShape& output_shape, @@ -182,6 +186,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; // Round to the closest integer value. acc = acc > 0 ? (acc + filter_count / 2) / filter_count : (acc - filter_count / 2) / filter_count; @@ -193,6 +198,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h index 81ff34f..93b0bb8 100644 --- a/src/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h @@ -15,9 +15,10 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_ +#include #include -#include "fixedpoint/fixedpoint.h" +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { @@ -65,19 +66,25 @@ inline void Tanh(int32_t input_multiplier, int32_t input_left_shift, // We use the LUT for sigmoid and take into account, that // tanh(x) = 2*sigmoid(2*x) - 1 - int32_t input_data_mul = (input_multiplier > 0) ? input_multiplier : 1; + // We scale by 3/4 to expand range [-8,8]->[-10.7,10.7]. + // In case of general parameter scale, multiplier 3 is taken into account + // in TanhPrepare function and it is included in + // input_multiplier already. + + if (input_multiplier == 0) { // power of two case + input_multiplier = 3 << input_left_shift; + input_left_shift = 0; + } + + int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0; int flat_size = MatchingFlatSize(input_shape, output_shape); for (int i = 0; i < flat_size; ++i, ptr_input_data++, ptr_output_data++) { - int32_t input_data = (*ptr_input_data) * input_data_mul; - - if (input_left_shift == 1) { - input_data <<= 1; - } + int32_t input_data = + ((*ptr_input_data) * input_multiplier + round) >> input_left_shift; - // Scale by 3/4 to expand range [-8,8]->[-10.7,10.7]. - uint32_t abs_input_data = 3 * abs(input_data); + uint32_t abs_input_data = abs(input_data); uint32_t uh = abs_input_data >> 8; int32_t result; diff --git a/src/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h b/src/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h new file mode 100644 index 0000000..40f99ce --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h @@ -0,0 +1,224 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ + +#include + +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { +namespace reference_integer_ops { + +// Fixed-point per-channel-quantization transpose convolution reference kernel. +inline void TransposeConv( + const ConvParams& params, const int32_t* output_multiplier, + const int32_t* output_shift, const RuntimeShape& input_shape, + const int8_t* input_data, const RuntimeShape& filter_shape, + const int8_t* filter_data, const RuntimeShape& bias_shape, + const int32_t* bias_data, const RuntimeShape& output_shape, + int8_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data, + int32_t* scratch_buffer) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int32_t input_offset = params.input_offset; + const int32_t output_offset = params.output_offset; + const int32_t output_activation_min = params.quantized_activation_min; + const int32_t output_activation_max = params.quantized_activation_max; + TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + + const int num_elements = output_shape.FlatSize(); + // We need to initialize scratch_buffer to all 0s, as we apply the same + // 'scatter' based trick as in float version. + memset(scratch_buffer, 0, num_elements * sizeof(int32_t)); + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence. + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location. + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds. + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + const int8_t input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + const int8_t filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + (input_value + input_offset) * filter_value; + } + } + } + } + } + } + } + } + + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + int32_t acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) { + acc += bias_data[out_channel]; + } + acc = MultiplyByQuantizedMultiplier( + acc, output_multiplier[out_channel], output_shift[out_channel]); + acc += output_offset; + acc = std::max(acc, output_activation_min); + acc = std::min(acc, output_activation_max); + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + static_cast(acc); + } + } + } + } +} + +// int16_t input (zero_point=0), int8_t filter, int32 or int64 accumulator +template +inline void TransposeConv( + const ConvParams& params, const int32_t* output_multiplier, + const int32_t* output_shift, const RuntimeShape& input_shape, + const int16_t* input_data, const RuntimeShape& filter_shape, + const int8_t* filter_data, const RuntimeShape& bias_shape, + const Scalar* bias_data, const RuntimeShape& output_shape, + int16_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data, + Scalar* scratch_buffer) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int32_t output_activation_min = params.quantized_activation_min; + const int32_t output_activation_max = params.quantized_activation_max; + TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + + const int num_elements = output_shape.FlatSize(); + // We need to initialize scratch_buffer to all 0s, as we apply the same + // 'scatter' based trick as in float version. + memset(scratch_buffer, 0, num_elements * sizeof(Scalar)); + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence. + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location. + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds. + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + const int32_t input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + const int32_t filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + input_value * filter_value; + } + } + } + } + } + } + } + } + + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + Scalar acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) { + acc += bias_data[out_channel]; + } + int32_t scaled_acc = MultiplyByQuantizedMultiplier( + acc, output_multiplier[out_channel], output_shift[out_channel]); + scaled_acc = std::max(scaled_acc, output_activation_min); + scaled_acc = std::min(scaled_acc, output_activation_max); + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + static_cast(scaled_acc); + } + } + } + } +} + +} // namespace reference_integer_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/l2normalization.h b/src/tensorflow/lite/kernels/internal/reference/l2normalization.h index 7587d2b..e5c91bf 100644 --- a/src/tensorflow/lite/kernels/internal/reference/l2normalization.h +++ b/src/tensorflow/lite/kernels/internal/reference/l2normalization.h @@ -18,7 +18,7 @@ limitations under the License. #include #include -#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/types.h" diff --git a/src/tensorflow/lite/kernels/internal/reference/leaky_relu.h b/src/tensorflow/lite/kernels/internal/reference/leaky_relu.h new file mode 100644 index 0000000..06f691a --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/leaky_relu.h @@ -0,0 +1,69 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_ + +#include +#include + +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { +namespace reference_ops { + +inline void LeakyRelu(const tflite::LeakyReluParams& params, + const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const float val = input_data[i]; + // Note that alpha might be > 1 or < 0, so we don't use std::max here. + output_data[i] = val > 0 ? val : val * params.alpha; + } +} + +template +inline void QuantizeLeakyRelu(const LeakyReluParams& params, + const RuntimeShape& input_shape, + const T* input_data, + const RuntimeShape& output_shape, + T* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + static const int32_t quantized_min = std::numeric_limits::min(); + static const int32_t quantized_max = std::numeric_limits::max(); + for (int i = 0; i < flat_size; ++i) { + const int32_t input_value = input_data[i] - params.input_offset; + int32_t unclamped_output; + if (input_value >= 0) { + unclamped_output = params.output_offset + + MultiplyByQuantizedMultiplier( + input_value, params.output_multiplier_identity, + params.output_shift_identity); + } else { + unclamped_output = params.output_offset + + MultiplyByQuantizedMultiplier( + input_value, params.output_multiplier_alpha, + params.output_shift_alpha); + } + const T clamped_output = + std::min(quantized_max, std::max(quantized_min, unclamped_output)); + output_data[i] = static_cast(clamped_output); + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/log_softmax.h b/src/tensorflow/lite/kernels/internal/reference/log_softmax.h new file mode 100644 index 0000000..67e6e66 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/log_softmax.h @@ -0,0 +1,256 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_ + +#include +#include +#include + +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { +namespace reference_ops { + +inline void LogSoftmax(const SoftmaxParams& params, + const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int outer_size = + MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int depth = + MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + + for (int i = 0; i < outer_size; ++i) { + // Find max element value which we'll use to ensure numerical stability + // taking advantage of the following equality: + // log(exp(x[i])/sum(exp(x[i]))) == log(exp(x[i]+C)/sum(exp(x[i]+C))) + float max = std::numeric_limits::lowest(); + for (int c = 0; c < depth; ++c) { + max = std::max(max, input_data[i * depth + c]); + } + + // Compute sum. + float sum = 0.f; + for (int c = 0; c < depth; ++c) { + sum += std::exp(input_data[i * depth + c] - max); + } + + // Compute result. + const float log_sum = std::log(sum); + for (int c = 0; c < depth; ++c) { + output_data[i * depth + c] = input_data[i * depth + c] - max - log_sum; + } + } +} + +inline void LogSoftmax(const SoftmaxParams& params, + const RuntimeShape& input_shape, + const uint8_t* input_data, + const RuntimeShape& output_shape, uint8_t* output_data) { + const int32_t input_multiplier = params.input_multiplier; + const int32_t input_left_shift = params.input_left_shift; + const int32_t reverse_scaling_divisor = params.reverse_scaling_divisor; + const int32_t reverse_scaling_right_shift = + params.reverse_scaling_right_shift; + const int diff_min = params.diff_min; + // The representation chosen for the input to the exp() function is Q5.26. + // We need to leave extra space since values that we skip might be as large + // as -32 before multiplying by input_beta_multiplier, and therefore as + // large as -16 afterwards. Note that exp(-8) is definitely not + // insignificant to accumulation, but exp(-16) definitely is. + static constexpr int kScaledDiffIntegerBits = 5; + static constexpr int kAccumulationIntegerBits = 12; + static constexpr int kOutputIntegerBits = 4; + using FixedPointScaledDiff = + gemmlowp::FixedPoint; + using FixedPointAccum = + gemmlowp::FixedPoint; + + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int outer_size = + MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int depth = + MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + + for (int i = 0; i < outer_size; ++i) { + uint8_t max_in_row = 0; + for (int c = 0; c < depth; ++c) { + max_in_row = std::max(max_in_row, input_data[i * depth + c]); + } + + FixedPointAccum sum_of_exps = FixedPointAccum::Zero(); + for (int c = 0; c < depth; ++c) { + int32_t input_diff = + static_cast(input_data[i * depth + c]) - max_in_row; + if (input_diff >= diff_min) { + const int32_t input_diff_rescaled = + MultiplyByQuantizedMultiplierGreaterThanOne( + input_diff, input_multiplier, input_left_shift); + const FixedPointScaledDiff scaled_diff_f8 = + FixedPointScaledDiff::FromRaw(input_diff_rescaled); + sum_of_exps = sum_of_exps + gemmlowp::Rescale( + exp_on_negative_values(scaled_diff_f8)); + } + } + + const int32_t fixed_log_sum_of_exps = + log_x_for_x_greater_than_or_equal_to_1( + sum_of_exps) + .raw(); + + // rescaled_diff_min is smallest representable in + // Q(kScaledDiffIntegerBits).(31-kScaledDiffIntegerBits) plus the + // log-sub-exps that will be subtracted in the loop. + // + // The thresholds diff_min, etc are negative. + const int rescaled_diff_min = + fixed_log_sum_of_exps + std::numeric_limits::lowest(); + const int adjusted_diff_min = + std::max(static_cast( + diff_min - 1), // Note use of > below instead of >= above. + MultiplyByQuantizedMultiplierSmallerThanOneExp( + rescaled_diff_min, reverse_scaling_divisor, + -reverse_scaling_right_shift)); + + for (int c = 0; c < depth; ++c) { + int32_t input_diff = + static_cast(input_data[i * depth + c]) - max_in_row; + if (input_diff > adjusted_diff_min) { + const int32_t input_diff_rescaled = + MultiplyByQuantizedMultiplierGreaterThanOne( + input_diff, input_multiplier, input_left_shift); + int32_t unsat_output = + gemmlowp::RoundingDivideByPOT( + (input_diff_rescaled - fixed_log_sum_of_exps), + 31 - kScaledDiffIntegerBits - kOutputIntegerBits) + + 255; + + output_data[i * depth + c] = static_cast( + std::max(std::min(unsat_output, static_cast(255)), + static_cast(0))); + } else { + // Set output to smallest value. + output_data[i * depth + c] = 0; + } + } + } +} + +template +inline void LogSoftmaxQuantized(const SoftmaxParams& params, + const size_t outer_size, const size_t depth, + const RuntimeShape& input_shape, + const T* input_data, + const RuntimeShape& output_shape, + T* output_data) { + const int32_t input_multiplier = params.input_multiplier; + const int32_t input_left_shift = params.input_left_shift; + const int32_t reverse_scaling_divisor = params.reverse_scaling_divisor; + const int32_t reverse_scaling_right_shift = + params.reverse_scaling_right_shift; + const int diff_min = params.diff_min; + + static constexpr T kMinT8 = std::numeric_limits::min(); + static constexpr T kMaxT8 = std::numeric_limits::max(); + static constexpr int32_t kMinInt32 = std::numeric_limits::min(); + + // All IntegerBits must agree with Prepare function. + // Input is chosen as Q5.26 so exp(-1 * 2^5 * 2^-1) = exp(-16) is negligible. + static constexpr int kInputIntegerBits = 5; + static constexpr int kAccumulationIntegerBits = 12; + static constexpr int kOutputIntegerBits = 4; + using F5 = gemmlowp::FixedPoint; + using F12 = gemmlowp::FixedPoint; + + for (size_t outer_index = 0; outer_index < outer_size; ++outer_index) { + T max_in_row = kMinT8; + for (size_t inner_index = 0; inner_index < depth; ++inner_index) { + max_in_row = + std::max(max_in_row, input_data[outer_index * depth + inner_index]); + } + + // Accumulator "sum_of_exps_in_q12" is safe from overflowing in 2^12 steps. + F12 sum_of_exps_in_q12 = F12::FromRaw(0); + for (size_t inner_index = 0; inner_index < depth; ++inner_index) { + int32_t input_diff = + static_cast(input_data[outer_index * depth + inner_index]) - + max_in_row; + if (input_diff >= diff_min) { + const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier( + input_diff, input_multiplier, input_left_shift); + sum_of_exps_in_q12 = + sum_of_exps_in_q12 + + gemmlowp::Rescale( + exp_on_negative_values(F5::FromRaw(input_diff_in_q5))); + } + } + + const int32_t log_sum_of_exps_in_q5 = + log_x_for_x_greater_than_or_equal_to_1( + sum_of_exps_in_q12) + .raw(); + + // Potentially reduced the valid range. shifted_log_sum_of_exps_in_q5 is + // smallest representable in Q5.26 plus the log_sum_of_exps. + const int32_t shifted_log_sum_of_exps_in_q5 = + log_sum_of_exps_in_q5 + kMinInt32; + const int32_t adjusted_diff_min = + std::max(static_cast(diff_min - 1), + MultiplyByQuantizedMultiplier(shifted_log_sum_of_exps_in_q5, + reverse_scaling_divisor, + -reverse_scaling_right_shift)); + + for (size_t inner_index = 0; inner_index < depth; ++inner_index) { + int32_t input_diff = + static_cast(input_data[outer_index * depth + inner_index]) - + max_in_row; + // Note use of > below instead of >= above. + if (input_diff > adjusted_diff_min) { + const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier( + input_diff, input_multiplier, input_left_shift); + + // Rescale and downcast. + int32_t output_in_q27 = + gemmlowp::RoundingDivideByPOT( + (input_diff_in_q5 - log_sum_of_exps_in_q5), + 31 - kInputIntegerBits - kOutputIntegerBits) + + kMaxT8; + + output_in_q27 = + std::max(std::min(output_in_q27, static_cast(kMaxT8)), + static_cast(kMinT8)); + output_data[outer_index * depth + inner_index] = + static_cast(output_in_q27); + } else { + output_data[outer_index * depth + inner_index] = kMinT8; + } + } + } +} + +inline void LogSoftmax(const SoftmaxParams& params, const size_t outer_size, + const size_t depth, const RuntimeShape& input_shape, + const int8_t* input_data, + const RuntimeShape& output_shape, int8_t* output_data) { + LogSoftmaxQuantized(params, outer_size, depth, input_shape, input_data, + output_shape, output_data); +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/logistic.h b/src/tensorflow/lite/kernels/internal/reference/logistic.h index 64b7133..96b5415 100644 --- a/src/tensorflow/lite/kernels/internal/reference/logistic.h +++ b/src/tensorflow/lite/kernels/internal/reference/logistic.h @@ -17,7 +17,7 @@ limitations under the License. #include -#include "fixedpoint/fixedpoint.h" +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" diff --git a/src/tensorflow/lite/kernels/internal/reference/lstm_cell.h b/src/tensorflow/lite/kernels/internal/reference/lstm_cell.h new file mode 100644 index 0000000..17b113e --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/lstm_cell.h @@ -0,0 +1,422 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LSTM_CELL_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LSTM_CELL_H_ + +#include +#include +#include + +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/reference/concatenation.h" +#include "tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +inline void LstmCell( + const LstmCellParams& params, const RuntimeShape& unextended_input_shape, + const float* input_data, const RuntimeShape& unextended_prev_activ_shape, + const float* prev_activ_data, const RuntimeShape& weights_shape, + const float* weights_data, const RuntimeShape& unextended_bias_shape, + const float* bias_data, const RuntimeShape& unextended_prev_state_shape, + const float* prev_state_data, + const RuntimeShape& unextended_output_state_shape, float* output_state_data, + const RuntimeShape& unextended_output_activ_shape, float* output_activ_data, + const RuntimeShape& unextended_concat_temp_shape, float* concat_temp_data, + const RuntimeShape& unextended_activ_temp_shape, float* activ_temp_data) { + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape prev_activ_shape = + RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape); + const RuntimeShape bias_shape = + RuntimeShape::ExtendedShape(4, unextended_bias_shape); + const RuntimeShape prev_state_shape = + RuntimeShape::ExtendedShape(4, unextended_prev_state_shape); + const RuntimeShape output_state_shape = + RuntimeShape::ExtendedShape(4, unextended_output_state_shape); + const RuntimeShape output_activ_shape = + RuntimeShape::ExtendedShape(4, unextended_output_activ_shape); + const RuntimeShape concat_temp_shape = + RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape); + const RuntimeShape activ_temp_shape = + RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape); + TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2); + + const int weights_dim_count = weights_shape.DimensionsCount(); + const int batches = + MatchingDim(input_shape, 0, prev_activ_shape, 0, prev_state_shape, 0, + output_state_shape, 0, output_activ_shape, 0); + const int height = + MatchingDim(input_shape, 1, prev_activ_shape, 1, prev_state_shape, 1, + output_state_shape, 1, output_activ_shape, 1); + const int width = + MatchingDim(input_shape, 2, prev_activ_shape, 2, prev_state_shape, 2, + output_state_shape, 2, output_activ_shape, 2); + const int input_depth = input_shape.Dims(3); + const int prev_activ_depth = prev_activ_shape.Dims(3); + const int total_input_depth = prev_activ_depth + input_depth; + TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1), + total_input_depth); + TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1); + const int intern_activ_depth = + MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3); + TFLITE_DCHECK_EQ(weights_shape.FlatSize(), + intern_activ_depth * total_input_depth); + TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0); + const int output_depth = + MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape, + 3, output_activ_shape, 3); + TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4); + + // Concatenate prev_activ and input data together + float const* concat_input_arrays_data[2] = {input_data, prev_activ_data}; + const RuntimeShape* concat_input_arrays_shapes[2] = {&input_shape, + &prev_activ_shape}; + tflite::ConcatenationParams concat_params; + concat_params.axis = 3; + concat_params.inputs_count = 2; + Concatenation(concat_params, concat_input_arrays_shapes, + concat_input_arrays_data, concat_temp_shape, concat_temp_data); + + // Fully connected + tflite::FullyConnectedParams fc_params; + fc_params.float_activation_min = std::numeric_limits::lowest(); + fc_params.float_activation_max = std::numeric_limits::max(); + FullyConnected(fc_params, concat_temp_shape, concat_temp_data, weights_shape, + weights_data, bias_shape, bias_data, activ_temp_shape, + activ_temp_data); + + // Memory state update (the LSTM "guts") + for (int b = 0; b < batches; ++b) { + for (int w = 0; w < width; ++w) { + for (int h = 0; h < height; ++h) { + for (int c = 0; c < output_depth; ++c) { + const float input_gate = + 1.f / + (1.f + std::exp(-activ_temp_data[Offset(activ_temp_shape, b, h, w, + 0 * output_depth + c)])); + const float new_input = std::tanh(activ_temp_data[Offset( + activ_temp_shape, b, h, w, 1 * output_depth + c)]); + const float forget_gate = + 1.f / + (1.f + std::exp(-activ_temp_data[Offset(activ_temp_shape, b, h, w, + 2 * output_depth + c)])); + const float output_gate = + 1.f / + (1.f + std::exp(-activ_temp_data[Offset(activ_temp_shape, b, h, w, + 3 * output_depth + c)])); + const float new_state = + input_gate * new_input + + forget_gate * + prev_state_data[Offset(prev_state_shape, b, h, w, c)]; + output_state_data[Offset(output_state_shape, b, h, w, c)] = new_state; + output_activ_data[Offset(output_activ_shape, b, h, w, c)] = + output_gate * std::tanh(new_state); + } + } + } + } +} + +// Quantized LSTM cell implementation. +// The quantization of the input, output arrays is as follows: +// - The input activations are quantized as uint8 on the interval +// [-1, 127/128]. +// The rationale for that is that is the natural interval for output +// activations (see next point) and these need to be concatenated together. +// We could accommodate different ranges by re-scaling, but we empirically +// found that setting the input activations range to be [-1, 127/128] in the +// first place, removing the need for re-scaling, greatly improves accuracy. +// - The output activations are quantized as uint8 on the interval +// [-1, 127/128]. +// The rationale for that is that the definition of a LSTM cell makes them +// intrinsically constrained in [-1, 1]; tweaking that to [-1, 127/128] +// makes for simpler, more accurate fixed-point arithmetic. +// - The output-at-previous-timestep state array is obviously quantized as +// the output activations. +// - The internal LSTM memory (not the output-at-previous-timestep, the other +// internal state array) is int16-quantized and may use any power-of-two, +// symmetric range i.e. [-2^N, 2^N * 32767/32768] for any N, which we call +// StateIntegerBits below, see the below discussion of that template +// parameter ("The StateIntegerBits template parameter"). +// - The output of the internal fully-connected node is int16-quantized +// on the interval [-8, 8 * 32767/32768], the rationale for which is +// explained just below ("Why [-8, 8] for fully-connected output?"). +// +// +// === The StateIntegerBits template parameter === +// +// The StateIntegerBits template parameter controls the fixed-point format used +// to represent the internal memory of the LSTM cell (not the +// output-at-previous-timestep, the other internal state array). It's currently +// a template parameter so that the model can control that. The most typical +// value for StateIntegerBits is 4. Other plausible values are anywhere between +// 3 and 5. We might eventually standardize on a single supported value, e.g. 4, +// and drop that template parameter. The reason why it can't be a runtime +// parameter is that this controls the fixed-point format used, i.e. we need to +// generate actually different code based on it. In particular, we generate code +// for a fixed-point tanh() implementation for that format, which internally +// uses a fixed-point exp() implementation, which internally uses a +// barrel-shifter with a number of steps that depends on StateIntegerBits. +// Another consequence of that is that a higher value of StateIntegerBits +// results in a more expensive implementation (more barrel shifter steps +// needed). +// +// +// === Why [-8, 8] for fully-connected output? === +// +// This array is only fed to Logistic and Tanh functions, for which +// the quantized implementation will want to use fixed-point arithmetic, +// requiring a power-of-two representation interval. Thus, we should right +// away quantize this array to a power-of-two interval; otherwise, +// implementation will need to rescale that, losing any benefit that a tighter +// representation interval might otherwise yield, while introducing some +// numerical error and computational overhead. +// +// Now, Logistic and Tanh +// are nearly constant (nearly equal to their horizontal asymptotes) +// outside of a small bounded interval around 0: +// +// Logistic(4) = 1 - 1.8e-2 Tanh(4) = 1 - 6.7e-4 +// Logistic(8) = 1 - 3.4e-4 Tanh(8) = 1 - 2.3e-7 +// Logistic(16) = 1 - 1.1e-7 Tanh(16) = 1 - 2.5e-14 +// +// From this, we see that clamping to [-4, 4] would be too inaccurate +// (the error of 1.8e-2 on Logistic would be felt even in 8bit precision) +// while clamping to [-16, 16] would make no difference even in float32. +// However, for a fixed-point implementation in 16-bit integers, using 5 +// integer bits to represent the [-16, 16] range would leave only 11 +// fractional bits, giving an increment of 2^-11 = 4.9e-4 between consecutive +// representable values. Notice that is higher than the +// worst-case clamping error with clamping to [-8, 8]: 3.4e-4 for Logistic. +// Using [-8, 8] thus seems like the better compromise overall, enjoying +// an increment of 2.4e-4 between representable values and a worst-case +// clamping error of 3.4e-4, both better than the increment of 4.9e-4 with +// [-16, 16]. +// +// Moreover, all other things being equal, it is nice to choose the narrower +// representation range, as that makes the implementation of fixed-point +// math functions a little cheaper (each integer bit requires an additional +// barrel-shifter atep in the implementation of exp(-x)). That is further +// reason to prefer [-8, 8] over [-16, 16]. The choice of [-16, 16] would make +// sense for 32-bit float or 32-bit fixed-point quantization, but we are +// aiming for 16-bit fixed-point quantization of these internal nodes here. +// +template +inline void LstmCell(const LstmCellParams& params, + const RuntimeShape& unextended_input_shape, + const uint8_t* input_data_uint8, + const RuntimeShape& unextended_prev_activ_shape, + const uint8_t* prev_activ_data_uint8, + const RuntimeShape& weights_shape, + const uint8_t* weights_data_uint8, + const RuntimeShape& unextended_bias_shape, + const int32_t* bias_data_int32, + const RuntimeShape& unextended_prev_state_shape, + const int16_t* prev_state_data_int16, + const RuntimeShape& unextended_output_state_shape, + int16_t* output_state_data_int16, + const RuntimeShape& unextended_output_activ_shape, + uint8_t* output_activ_data_uint8, + const RuntimeShape& unextended_concat_temp_shape, + uint8_t* concat_temp_data_uint8, + const RuntimeShape& unextended_activ_temp_shape, + int16_t* activ_temp_data_int16, void* gemmlowp_context) { + (void)gemmlowp_context; // only used in optimized code. + int32_t weights_zero_point = params.weights_zero_point; + int32_t accum_multiplier = params.accum_multiplier; + int accum_shift = params.accum_shift; + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape prev_activ_shape = + RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape); + const RuntimeShape bias_shape = + RuntimeShape::ExtendedShape(4, unextended_bias_shape); + const RuntimeShape prev_state_shape = + RuntimeShape::ExtendedShape(4, unextended_prev_state_shape); + const RuntimeShape output_state_shape = + RuntimeShape::ExtendedShape(4, unextended_output_state_shape); + const RuntimeShape output_activ_shape = + RuntimeShape::ExtendedShape(4, unextended_output_activ_shape); + const RuntimeShape concat_temp_shape = + RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape); + const RuntimeShape activ_temp_shape = + RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape); + TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2); + + // Gather dimensions information, and perform consistency checks. + const int weights_dim_count = weights_shape.DimensionsCount(); + const int outer_size = MatchingFlatSizeSkipDim( + input_shape, 3, prev_activ_shape, prev_state_shape, output_state_shape, + output_activ_shape); + const int input_depth = input_shape.Dims(3); + const int prev_activ_depth = prev_activ_shape.Dims(3); + const int total_input_depth = prev_activ_depth + input_depth; + TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1), + total_input_depth); + const int intern_activ_depth = + MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3); + TFLITE_DCHECK_EQ(weights_shape.FlatSize(), + intern_activ_depth * total_input_depth); + TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1); + TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0); + const int output_depth = + MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape, + 3, output_activ_shape, 3); + TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4); + const int fc_batches = FlatSizeSkipDim(activ_temp_shape, 3); + const int fc_output_depth = + MatchingDim(weights_shape, weights_dim_count - 2, activ_temp_shape, 3); + const int fc_accum_depth = total_input_depth; + TFLITE_DCHECK_EQ(fc_output_depth, 4 * output_depth); + + // Depth-concatenate prev_activ and input data together. + uint8_t const* concat_input_arrays_data[2] = {input_data_uint8, + prev_activ_data_uint8}; + const RuntimeShape* concat_input_arrays_shapes[2] = {&input_shape, + &prev_activ_shape}; + tflite::ConcatenationParams concat_params; + concat_params.axis = 3; + concat_params.inputs_count = 2; + Concatenation(concat_params, concat_input_arrays_shapes, + concat_input_arrays_data, concat_temp_shape, + concat_temp_data_uint8); + + // Implementation of the fully connected node inside the LSTM cell. + // The operands are 8-bit integers, the accumulators are internally 32bit + // integers, and the output is 16-bit fixed-point with 3 integer bits so + // the output range is [-2^3, 2^3] == [-8, 8]. The rationale for that + // is explained in the function comment above. + for (int b = 0; b < fc_batches; ++b) { + for (int out_c = 0; out_c < fc_output_depth; ++out_c) { + // Internal accumulation. + // Initialize accumulator with the bias-value. + int32_t accum = bias_data_int32[out_c]; + // Accumulation loop. + for (int d = 0; d < fc_accum_depth; ++d) { + int16_t input_val = + concat_temp_data_uint8[b * fc_accum_depth + d] - 128; + int16_t weights_val = + weights_data_uint8[out_c * fc_accum_depth + d] - weights_zero_point; + accum += input_val * weights_val; + } + // Down-scale the final int32 accumulator to the scale used by our + // (16-bit, using 3 integer bits) fixed-point format. The quantized + // multiplier and shift here have been pre-computed offline + // (e.g. by toco). + accum = + MultiplyByQuantizedMultiplier(accum, accum_multiplier, accum_shift); + // Saturate, cast to int16, and store to the temporary activations array. + accum = std::max(-32768, std::min(32767, accum)); + activ_temp_data_int16[out_c + fc_output_depth * b] = accum; + } + } + + // Rest of the LSTM cell: tanh and logistic math functions, and some adds + // and muls, all done in 16-bit fixed-point. + for (int b = 0; b < outer_size; ++b) { + for (int c = 0; c < output_depth; ++c) { + // Define the fixed-point data types that we will use here. All use + // int16 as the underlying integer type i.e. all are 16-bit fixed-point. + // They only differ by the number of integral vs. fractional bits, + // determining the range of values that they can represent. + // + // F0 uses 0 integer bits, range [-1, 1]. + // This is the return type of math functions such as tanh, logistic, + // whose range is in [-1, 1]. + using F0 = gemmlowp::FixedPoint; + // F3 uses 3 integer bits, range [-8, 8]. + // This is the range of the previous fully-connected node's output, + // which is our input here. + using F3 = gemmlowp::FixedPoint; + // FS uses StateIntegerBits integer bits, range [-2^StateIntegerBits, + // 2^StateIntegerBits]. It's used to represent the internal state, whose + // number of integer bits is currently dictated by the model. See comment + // on the StateIntegerBits template parameter above. + using FS = gemmlowp::FixedPoint; + // Implementation of input gate, using fixed-point logistic function. + F3 input_gate_input = F3::FromRaw( + activ_temp_data_int16[b * fc_output_depth + 0 * output_depth + c]); + F0 input_gate_output = gemmlowp::logistic(input_gate_input); + // Implementation of input modulation gate, using fixed-point tanh + // function. + F3 input_modulation_gate_input = F3::FromRaw( + activ_temp_data_int16[b * fc_output_depth + 1 * output_depth + c]); + F0 input_modulation_gate_output = + gemmlowp::tanh(input_modulation_gate_input); + // Implementation of forget gate, using fixed-point logistic function. + F3 forget_gate_input = F3::FromRaw( + activ_temp_data_int16[b * fc_output_depth + 2 * output_depth + c]); + F0 forget_gate_output = gemmlowp::logistic(forget_gate_input); + // Implementation of output gate, using fixed-point logistic function. + F3 output_gate_input = F3::FromRaw( + activ_temp_data_int16[b * fc_output_depth + 3 * output_depth + c]); + F0 output_gate_output = gemmlowp::logistic(output_gate_input); + // Implementation of internal multiplication nodes, still in fixed-point. + F0 input_times_input_modulation = + input_gate_output * input_modulation_gate_output; + FS prev_state = FS::FromRaw(prev_state_data_int16[b * output_depth + c]); + FS prev_state_times_forget_state = forget_gate_output * prev_state; + // Implementation of internal addition node, saturating. + FS new_state = gemmlowp::SaturatingAdd( + gemmlowp::Rescale(input_times_input_modulation), + prev_state_times_forget_state); + // Implementation of last internal Tanh node, still in fixed-point. + // Since a Tanh fixed-point implementation is specialized for a given + // number or integer bits, and each specialization can have a substantial + // code size, and we already used above a Tanh on an input with 3 integer + // bits, and per the table in the above function comment there is no + // significant accuracy to be lost by clamping to [-8, +8] for a + // 3-integer-bits representation, let us just do that. This helps people + // porting this to targets where code footprint must be minimized. + F3 new_state_f3 = gemmlowp::Rescale<3>(new_state); + F0 output_activ_int16 = output_gate_output * gemmlowp::tanh(new_state_f3); + // Store the new internal state back to memory, as 16-bit integers. + // Note: here we store the original value with StateIntegerBits, not + // the rescaled 3-integer-bits value fed to tanh. + output_state_data_int16[b * output_depth + c] = new_state.raw(); + // Down-scale the output activations to 8-bit integers, saturating, + // and store back to memory. + int16_t rescaled_output_activ = + gemmlowp::RoundingDivideByPOT(output_activ_int16.raw(), 8); + int16_t clamped_output_activ = std::max( + -128, std::min(127, rescaled_output_activ)); + output_activ_data_uint8[b * output_depth + c] = + 128 + clamped_output_activ; + } + } +} + +} // namespace reference_ops +} // namespace tflite +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LSTM_CELL_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/mul.h b/src/tensorflow/lite/kernels/internal/reference/mul.h index 0578b81..fca74a3 100644 --- a/src/tensorflow/lite/kernels/internal/reference/mul.h +++ b/src/tensorflow/lite/kernels/internal/reference/mul.h @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,12 +15,18 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ +#include +#include + #include "tensorflow/lite/kernels/internal/common.h" namespace tflite { namespace reference_ops { +// Maximum dimension supported by the broadcast mul operation. +constexpr int kMaxMulBroadcastDim = 6; + // Element-wise mul that can often be used for inner loop of broadcast Mul as // well as the non-broadcast Mul. inline void MulElementwise(int size, const ArithmeticParams& params, @@ -51,14 +57,28 @@ inline void Mul(const ArithmeticParams& params, GetActivationParams(params, &output_activation_min, &output_activation_max); const int flat_size = - MatchingFlatSize(input1_shape, input2_shape, output_shape); + MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( + output_data[i] = ActivationFunctionWithMinMax( input1_data[i] * input2_data[i], output_activation_min, output_activation_max); } } +inline void Mul(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const std::complex* input1_data, + const RuntimeShape& input2_shape, + const std::complex* input2_data, + const RuntimeShape& output_shape, + std::complex* output_data) { + const int flat_size = + MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + output_data[i] = input1_data[i] * input2_data[i]; + } +} + inline void Mul(const ArithmeticParams& params, const RuntimeShape& input1_shape, const uint8_t* input1_data, const RuntimeShape& input2_shape, const uint8_t* input2_data, @@ -66,98 +86,179 @@ inline void Mul(const ArithmeticParams& params, TFLITE_DCHECK_LE(params.quantized_activation_min, params.quantized_activation_max); const int flat_size = - MatchingFlatSize(input1_shape, input2_shape, output_shape); + MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); MulElementwise(flat_size, params, input1_data, input2_data, output_data); } -inline void BroadcastMul4DSlow(const ArithmeticParams& params, +template +void BroadcastMulRecursiveDimensions( + const ArithmeticParams& params, int dimension, const T* input1_data, + const T* input2_data, T* output_data, size_t* input1_offset_p, + size_t* input2_offset_p, size_t* output_offset, + const NdArrayDesc& desc1, + const NdArrayDesc& desc2, + const int32_t extended_output_shape_dims[kMaxMulBroadcastDim], + F binary_func) { + if (dimension == kMaxMulBroadcastDim - 1) { + for (int c = 0; c < extended_output_shape_dims[dimension]; ++c) { + const T input1_val = input1_data[*input1_offset_p]; + const T input2_val = input2_data[*input2_offset_p]; + output_data[*output_offset] = binary_func(params, input1_val, input2_val); + *input1_offset_p += desc1.strides[dimension]; + *input2_offset_p += desc2.strides[dimension]; + ++(*output_offset); + } + } else { + for (int a = 0; a < extended_output_shape_dims[dimension]; ++a) { + size_t input1_offset_c = *input1_offset_p; + size_t input2_offset_c = *input2_offset_p; + BroadcastMulRecursiveDimensions( + params, dimension + 1, input1_data, input2_data, output_data, + &input1_offset_c, &input2_offset_c, output_offset, desc1, desc2, + extended_output_shape_dims, binary_func); + *input1_offset_p += desc1.strides[dimension]; + *input2_offset_p += desc2.strides[dimension]; + } + } +} + +inline void BroadcastMul6DSlow(const ArithmeticParams& params, const RuntimeShape& input1_shape, const uint8_t* input1_data, const RuntimeShape& input2_shape, const uint8_t* input2_data, const RuntimeShape& output_shape, uint8_t* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; + NdArrayDesc desc1; + NdArrayDesc desc2; NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, &desc2); const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - const int32_t input1_val = - params.input1_offset + - input1_data[SubscriptToIndex(desc1, b, y, x, c)]; - const int32_t input2_val = - params.input2_offset + - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - const int32_t unclamped_result = - params.output_offset + - MultiplyByQuantizedMultiplier(input1_val * input2_val, - params.output_multiplier, - params.output_shift); - const int32_t clamped_output = std::min( - params.quantized_activation_max, - std::max(params.quantized_activation_min, unclamped_result)); - output_data[Offset(extended_output_shape, b, y, x, c)] = - static_cast(clamped_output); - } - } - } - } -} + RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, output_shape); + // Cache output shape dimensions. + int32_t extended_output_shape_dims[kMaxMulBroadcastDim]; + std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(), + sizeof(extended_output_shape_dims)); -template -void BroadcastMul4DSlow(const ArithmeticParams& params, - const RuntimeShape& unextended_input1_shape, - const T* input1_data, - const RuntimeShape& unextended_input2_shape, - const T* input2_data, - const RuntimeShape& unextended_output_shape, - T* output_data) { - T output_activation_min; - T output_activation_max; - GetActivationParams(params, &output_activation_min, &output_activation_max); - - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); + size_t input1_offset = 0; + size_t input2_offset = 0; + size_t output_offset = 0; + BroadcastMulRecursiveDimensions( + params, 0, input1_data, input2_data, output_data, &input1_offset, + &input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims, + [](const ArithmeticParams& params, const uint8_t input1_val, + const uint8_t input2_val) { + const int32_t offsetted_input1_val = params.input1_offset + input1_val; + const int32_t offsetted_input2_val = params.input2_offset + input2_val; + const int32_t unclamped_result = + params.output_offset + + MultiplyByQuantizedMultiplier( + offsetted_input1_val * offsetted_input2_val, + params.output_multiplier, params.output_shift); + const int32_t clamped_output = std::min( + params.quantized_activation_max, + std::max(params.quantized_activation_min, unclamped_result)); + return static_cast(clamped_output); + }); +} - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; +template +inline typename std::enable_if< + !is_small_integer::value || enable_for_short_integers, void>::type +BroadcastMul6DSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const T* input1_data, + const RuntimeShape& unextended_input2_shape, + const T* input2_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 6); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 6); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 6); + NdArrayDesc desc1; + NdArrayDesc desc2; NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, unextended_input2_shape, &desc1, &desc2); + const RuntimeShape extended_output_shape = + RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, unextended_output_shape); + // Cache output shape dimensions. + int32_t extended_output_shape_dims[kMaxMulBroadcastDim]; + std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(), + sizeof(extended_output_shape_dims)); // In Tensorflow, the dimensions are canonically named (batch_number, row, // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). + // trailing dimension changing most rapidly (channels has the smallest + // stride, typically 1 element). // // In generated C code, we store arrays with the dimensions reversed. The // first dimension has smallest stride. // // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < output_shape.Dims(0); ++b) { - for (int y = 0; y < output_shape.Dims(1); ++y) { - for (int x = 0; x < output_shape.Dims(2); ++x) { - for (int c = 0; c < output_shape.Dims(3); ++c) { - output_data[Offset(output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, b, y, x, c)] * - input2_data[SubscriptToIndex(desc2, b, y, x, c)], - output_activation_min, output_activation_max); - } - } - } - } + // nesting loops such that the innermost loop has the smallest stride for + // the best cache behavior. + size_t input1_offset = 0; + size_t input2_offset = 0; + size_t output_offset = 0; + BroadcastMulRecursiveDimensions( + params, 0, input1_data, input2_data, output_data, &input1_offset, + &input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims, + [](const ArithmeticParams& params, const T input1_val, + const T input2_val) { + T output_activation_min; + T output_activation_max; + GetActivationParams(params, &output_activation_min, + &output_activation_max); + return ActivationFunctionWithMinMax(input1_val * input2_val, + output_activation_min, + output_activation_max); + }); +} + +inline void BroadcastMul6DSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const std::complex* input1_data, + const RuntimeShape& unextended_input2_shape, + const std::complex* input2_data, + const RuntimeShape& unextended_output_shape, + std::complex* output_data) { + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 6); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 6); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 6); + + NdArrayDesc desc1; + NdArrayDesc desc2; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, + unextended_input2_shape, &desc1, &desc2); + const RuntimeShape extended_output_shape = + RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, unextended_output_shape); + // Cache output shape dimensions. + int32_t extended_output_shape_dims[kMaxMulBroadcastDim]; + std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(), + sizeof(extended_output_shape_dims)); + + size_t input1_offset = 0; + size_t input2_offset = 0; + size_t output_offset = 0; + BroadcastMulRecursiveDimensions( + params, 0, input1_data, input2_data, output_data, &input1_offset, + &input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims, + [](const ArithmeticParams& params, const std::complex input1_val, + const std::complex input2_val) { + return input1_val * input2_val; + }); +} + +template +inline void BroadcastMul4DSlow( + const ArithmeticParams& params, const RuntimeShape& input1_shape, + const T* input1_data, const RuntimeShape& input2_shape, + const T* input2_data, const RuntimeShape& output_shape, T* output_data) { + return BroadcastMul6DSlow(params, input1_shape, input1_data, input2_shape, + input2_data, output_shape, output_data); } } // namespace reference_ops diff --git a/src/tensorflow/lite/kernels/internal/reference/pad.h b/src/tensorflow/lite/kernels/internal/reference/pad.h index 2a040ce..2758944 100644 --- a/src/tensorflow/lite/kernels/internal/reference/pad.h +++ b/src/tensorflow/lite/kernels/internal/reference/pad.h @@ -24,8 +24,8 @@ namespace tflite { namespace reference_ops { -// TFLite Pad supports activation tensors with up to 4 dimensions. -constexpr int PadKernelMaxDimensionCount() { return 4; } +// TFLite Pad supports activation tensors with up to 5 dimensions. +constexpr int PadKernelMaxDimensionCount() { return 5; } // There are two versions of pad: Pad and PadV2. In PadV2 there is a second // scalar input that provides the padding value. Therefore pad_value_ptr can be @@ -46,8 +46,8 @@ inline void PadImpl(const tflite::PadParams& op_params, TFLITE_DCHECK_LE(op_params.left_padding_count, PadKernelMaxDimensionCount()); TFLITE_DCHECK_LE(op_params.right_padding_count, PadKernelMaxDimensionCount()); - // Runtime calls are currently fixed at 4 dimensions. Copy inputs so we can - // pad them to 4 dims (yes, we are "padding the padding"). + // Runtime calls are currently fixed at 5 dimensions. Copy inputs so we can + // pad them to 5 dims (yes, we are "padding the padding"). int left_padding_copy[PadKernelMaxDimensionCount()]; for (int i = 0; i < PadKernelMaxDimensionCount(); i++) { left_padding_copy[i] = 0; @@ -67,39 +67,46 @@ inline void PadImpl(const tflite::PadParams& op_params, } const int output_batch = ext_output_shape.Dims(0); - const int output_height = ext_output_shape.Dims(1); - const int output_width = ext_output_shape.Dims(2); - const int output_depth = ext_output_shape.Dims(3); + const int output_plane = ext_output_shape.Dims(1); + const int output_height = ext_output_shape.Dims(2); + const int output_width = ext_output_shape.Dims(3); + const int output_depth = ext_output_shape.Dims(4); const int left_b_padding = left_padding_copy[0]; - const int left_h_padding = left_padding_copy[1]; - const int left_w_padding = left_padding_copy[2]; - const int left_d_padding = left_padding_copy[3]; + const int left_p_padding = left_padding_copy[1]; + const int left_h_padding = left_padding_copy[2]; + const int left_w_padding = left_padding_copy[3]; + const int left_d_padding = left_padding_copy[4]; const int right_b_padding = right_padding_copy[0]; - const int right_h_padding = right_padding_copy[1]; - const int right_w_padding = right_padding_copy[2]; - const int right_d_padding = right_padding_copy[3]; + const int right_p_padding = right_padding_copy[1]; + const int right_h_padding = right_padding_copy[2]; + const int right_w_padding = right_padding_copy[3]; + const int right_d_padding = right_padding_copy[4]; const T pad_value = *pad_value_ptr; const T* in_ptr = input_data; T* out_ptr = output_data; for (int out_b = 0; out_b < output_batch; ++out_b) { - for (int out_h = 0; out_h < output_height; ++out_h) { - for (int out_w = 0; out_w < output_width; ++out_w) { - for (int out_d = 0; out_d < output_depth; ++out_d) { - if (out_b < left_b_padding || - out_b >= output_batch - right_b_padding || - out_h < left_h_padding || - out_h >= output_height - right_h_padding || - out_w < left_w_padding || - out_w >= output_width - right_w_padding || - out_d < left_d_padding || - out_d >= output_depth - right_d_padding) { - *out_ptr++ = pad_value; - } else { - *out_ptr++ = *in_ptr++; + for (int out_p = 0; out_p < output_plane; ++out_p) { + for (int out_h = 0; out_h < output_height; ++out_h) { + for (int out_w = 0; out_w < output_width; ++out_w) { + for (int out_d = 0; out_d < output_depth; ++out_d) { + if (out_b < left_b_padding || + out_b >= output_batch - right_b_padding || + out_p < left_p_padding || + out_p >= output_plane - right_p_padding || + out_h < left_h_padding || + out_h >= output_height - right_h_padding || + out_w < left_w_padding || + out_w >= output_width - right_w_padding || + out_d < left_d_padding || + out_d >= output_depth - right_d_padding) { + *out_ptr++ = pad_value; + } else { + *out_ptr++ = *in_ptr++; + } } } } diff --git a/src/tensorflow/lite/kernels/internal/reference/pooling.h b/src/tensorflow/lite/kernels/internal/reference/pooling.h index 0872f52..fe17484 100644 --- a/src/tensorflow/lite/kernels/internal/reference/pooling.h +++ b/src/tensorflow/lite/kernels/internal/reference/pooling.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_ +#include + #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" @@ -23,7 +25,7 @@ limitations under the License. namespace tflite { namespace reference_ops { -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const float* input_data, const RuntimeShape& output_shape, float* output_data) { @@ -66,6 +68,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; const float average = total / filter_count; output_data[Offset(output_shape, batch, out_y, out_x, channel)] = ActivationFunctionWithMinMax(average, params.float_activation_min, @@ -74,9 +77,10 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const uint8_t* input_data, const RuntimeShape& output_shape, @@ -122,6 +126,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; acc = (acc + filter_count / 2) / filter_count; acc = std::max(acc, params.quantized_activation_min); acc = std::min(acc, params.quantized_activation_max); @@ -131,6 +136,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cpp b/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cpp new file mode 100644 index 0000000..bfa0bac --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cpp @@ -0,0 +1,819 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include +#include +#include +#include +#include +#include + +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/cppmath.h" +#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { +namespace tensor_utils { + +namespace { +const int32_t kInt16Max = std::numeric_limits::max(); +const int32_t kInt16Min = std::numeric_limits::min(); +} // namespace + +// LINT.IfChange(portable_symmetric_quantize_floats) +void PortableSymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* min_value, + float* max_value, float* scaling_factor) { + auto minmax = std::minmax_element(values, values + size); + *min_value = *minmax.first; + *max_value = *minmax.second; + + PortableSymmetricQuantizeFloats(values, size, quantized_values, *min_value, + *max_value, scaling_factor); +} + +void PortableSymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float min_value, + float max_value, float* scaling_factor) { + const int32_t kScale = 127; + const float range = std::max(std::abs(min_value), std::abs(max_value)); + if (range == 0) { + memset(quantized_values, 0, size * sizeof(int8_t)); + *scaling_factor = 1; + return; + } + *scaling_factor = range / kScale; + const float scaling_factor_inv = kScale / range; + for (int i = 0; i < size; ++i) { + const int32_t quantized_value = + static_cast(TfLiteRound(values[i] * scaling_factor_inv)); + // Clamp: just in case some odd numeric offset. + quantized_values[i] = static_cast( + std::min(kScale, std::max(-kScale, quantized_value))); + } +} +// LINT.ThenChange(//tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/portable_tensor_utils.cc:portable_symmetric_quantize_floats) + +void PortableAsymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, + float* scaling_factor, int32_t* offset) { + const int32_t kMinScale = -128; + const int32_t kMaxScale = 127; + const double qmin_double = kMinScale; + const double qmax_double = kMaxScale; + const auto minmax = std::minmax_element(values, values + size); + const double rmin = static_cast(std::min(0.0f, *minmax.first)); + const double rmax = static_cast(std::max(0.0f, *minmax.second)); + if (rmin == rmax) { + memset(quantized_values, 0, size * sizeof(int8_t)); + *scaling_factor = 1; + *offset = 0; + return; + } else { + double scale = (rmax - rmin) / (qmax_double - qmin_double); + const double zero_point_from_min = qmin_double - rmin / scale; + const double zero_point_from_max = qmax_double - rmax / scale; + const double zero_point_from_min_error = + std::abs(qmin_double) + std::abs(rmin / scale); + const double zero_point_from_max_error = + std::abs(qmax_double) + std::abs(rmax / scale); + const double zero_point_double = + zero_point_from_min_error < zero_point_from_max_error + ? zero_point_from_min + : zero_point_from_max; + int8_t nudged_zero_point = 0; + if (zero_point_double <= qmin_double) { + nudged_zero_point = kMinScale; + } else if (zero_point_double >= qmax_double) { + nudged_zero_point = kMaxScale; + } else { + nudged_zero_point = static_cast(round(zero_point_double)); + } + *scaling_factor = scale; + *offset = nudged_zero_point; + } + const float scaling_factor_inv = 1.0f / *scaling_factor; + for (int i = 0; i < size; ++i) { + const int32_t quantized_value = static_cast( + TfLiteRound(*offset + values[i] * scaling_factor_inv)); + quantized_values[i] = + std::min(kMaxScale, std::max(kMinScale, quantized_value)); + } +} + +void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix, + int m_rows, int m_cols, + const float* vector, + int n_batch, float* result) { + float* result_in_batch = result; + for (int b = 0; b < n_batch; b++) { + const float* matrix_ptr = matrix; + for (int r = 0; r < m_rows; r++) { + float dot_prod = 0.0f; + const float* vector_in_batch = vector + b * m_cols; + for (int c = 0; c < m_cols; c++) { + dot_prod += *matrix_ptr++ * *vector_in_batch++; + } + *result_in_batch += dot_prod; + ++result_in_batch; + } + } +} + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result) { + for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { + const float batch_scaling_factor = scaling_factors[batch]; + // Get the address of the first row. + const int8_t* row_ptr = matrix; + for (int row = 0; row < m_rows; ++row) { + // Initialize the dot product sum for the row to 0. + int32_t dotprod = 0; +#if defined(__GNUC__) + // Prefetch the row to cache. + __builtin_prefetch(row_ptr, 0 /* prefetch for read */, + 3 /* temporal locality */); +#endif + for (int col = 0; col < m_cols; ++col, ++row_ptr) { + dotprod += (*row_ptr) * (vectors[col]); + } // for col + *result += dotprod * batch_scaling_factor; + ++result; + } // for row + } // for batch +} + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, CpuBackendContext* context) { + if (input_offset == nullptr) { + PortableMatrixBatchVectorMultiplyAccumulate( + matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result); + return; + } + if (!compute_row_sums || *compute_row_sums) { + PortableReductionSumVector(matrix, row_sums, m_rows, m_cols); + if (compute_row_sums) { + *compute_row_sums = false; + } + } + + for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { + const float batch_scaling_factor = scaling_factors[batch]; + const int32_t batch_offset = input_offset[batch]; + const int8_t* row_ptr = matrix; + for (int row = 0; row < m_rows; ++row) { + int32_t dotprod = 0; + float scale = batch_scaling_factor; + if (per_channel_scale) { + scale *= per_channel_scale[row]; + } +#if defined(__GNUC__) + // Prefetch the row to cache. + __builtin_prefetch(row_ptr, 0 /* prefetch for read */, + 3 /* temporal locality */); +#endif + for (int col = 0; col < m_cols; ++col, ++row_ptr) { + dotprod += (*row_ptr) * vectors[col]; + } // for col + dotprod -= row_sums[row] * batch_offset; + *result += dotprod * scale; + ++result; + } // for row + } // for batch +} + +void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4( + const float* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const float* __restrict__ vector, int n_batch, float* __restrict__ result) { + const int kBlockSize = 4; + TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0); + for (int batch = 0; batch < n_batch; batch++) { + const float* matrix_ptr = matrix; + for (int row = 0; row < m_rows; row++) { + float dot_prod = 0.0f; + const float* vector_in_batch = vector + batch * m_cols; + for (int i = segments[row]; i < segments[row + 1]; i++) { + const int block_start_index = indices[i] * kBlockSize; + const float* vector_block_in_batch_ptr = + vector_in_batch + block_start_index; + for (int c = 0; c < kBlockSize; c++) { + dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++; + } + } + result[batch * m_rows + row] += dot_prod; + } + } +} + +void PortableSparseMatrixBatchVectorMultiplyAccumulate1x16( + const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, + int n_batch, const int32_t input_offset, const int32_t output_multiplier, + const int32_t output_shift, const int32_t* per_channel_scale, + const int32_t* per_channel_shift, const int32_t output_offset, + const int32_t output_activation_min, const int32_t output_activation_max, + int8_t* __restrict__ result) { + const int kBlockSize = 16; + TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0); + for (int batch = 0; batch < n_batch; ++batch) { + const int8_t* matrix_ptr = matrix; + for (int row = 0; row < m_rows; ++row) { + int32_t dot_prod = 0; + const int8_t* vector_in_batch = vector + batch * m_cols; + for (int i = segments[row]; i < segments[row + 1]; ++i) { + const int block_start_index = indices[i] * kBlockSize; + const int8_t* vector_block_in_batch_ptr = + vector_in_batch + block_start_index; + for (int c = 0; c < kBlockSize; c++) { + dot_prod += *matrix_ptr * *vector_block_in_batch_ptr++; + dot_prod += *matrix_ptr++ * input_offset; + } + } + const int32_t bias_value = bias_vector != nullptr ? bias_vector[row] : 0; + dot_prod = MultiplyByQuantizedMultiplier( + dot_prod + bias_value, + per_channel_scale ? per_channel_scale[row] : output_multiplier, + per_channel_shift ? per_channel_shift[row] : output_shift); + dot_prod += output_offset; + result[batch * m_rows + row] = + static_cast(ActivationFunctionWithMinMax( + dot_prod, output_activation_min, output_activation_max)); + } + } +} + +void PortableSparseMatrixBatchVectorMultiplyAccumulate( + const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, + int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, + float* __restrict__ result) { + const int kBlockSize = 16; + TFLITE_DCHECK_EQ( // NOLINT + m_cols % kBlockSize, 0); + for (int batch = 0; batch < n_batch; batch++) { + const float* matrix_ptr = matrix; + const uint8_t* ledger_ptr = ledger; + for (int row = 0; row < m_rows; row++) { + float dot_prod = 0.0f; + int num_nonzero_blocks = *ledger_ptr++; + if (num_nonzero_blocks > 0) { + const float* vector_in_batch = vector + batch * m_cols; + for (int i = 0; i < num_nonzero_blocks; i++) { + const int block_start_index = *ledger_ptr++ * kBlockSize; + const float* vector_block_in_batch_ptr = + vector_in_batch + block_start_index; + for (int c = 0; c < kBlockSize; c++) { + dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++; + } + } + } + result[batch * m_rows + row] += dot_prod; + } + } +} + +void PortableSparseMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, + const int m_cols, const int8_t* __restrict__ vectors, + const float* scaling_factors, int n_batch, float* __restrict__ result, + const float* per_channel_scale) { + static const int kBlockSize = 16; + TFLITE_DCHECK_EQ( // NOLINT + m_cols % kBlockSize, 0); + for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { + const float batch_scaling_factor = scaling_factors[batch]; + const uint8_t* ledger_ptr = ledger; + // Get the address of the first row. + const int8_t* row_ptr = matrix; + for (int row = 0; row < m_rows; ++row) { + // Initialize the dot product sum for the row to 0. + int32_t dotprod = 0; +#if defined(__GNUC__) + // Prefetch the row to cache. + __builtin_prefetch(row_ptr, 0 /* prefetch for read */, + 3 /* temporal locality */); +#endif + int num_nonzero_blocks = *ledger_ptr++; + for (int i = 0; i < num_nonzero_blocks; i++) { + const int block_start_index = *ledger_ptr++ * kBlockSize; + const int8_t* vector_block_ptr = vectors + block_start_index; + for (int c = 0; c < kBlockSize; c++) { + dotprod += (*row_ptr++) * (*vector_block_ptr++); + } // for block + } // for num_nonzero_blocks + float scaling_factor = batch_scaling_factor; + if (per_channel_scale) { + scaling_factor *= per_channel_scale[row]; + } + result[batch * m_rows + row] += dotprod * scaling_factor; + } // for row + } // for batch +} + +template +void PortableMatrixBatchVectorMultiplyAccumulateImpl( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + T* output) { + const int16_t output_max = std::numeric_limits::max(); + const int16_t output_min = std::numeric_limits::min(); + for (int batch = 0; batch < n_batch; ++batch) { + for (int row = 0; row < n_output; ++row) { + int32_t acc = bias[row]; + for (int col = 0; col < n_input; ++col) { + int8_t input_val = input[batch * n_input + col]; + int8_t weights_val = input_to_gate_weights[row * n_input + col]; + acc += input_val * weights_val; + } + acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift); + acc += output_zp; + acc += output[batch * n_output + row]; + if (acc > output_max) { + acc = output_max; + } + if (acc < output_min) { + acc = output_min; + } + output[batch * n_output + row] = static_cast(acc); + } + } +} + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int16_t* output, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulateImpl( + input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, + n_output, output_zp, output); +} + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int8_t* output, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulateImpl( + input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, + n_output, output_zp, output); +} + +void PortableMatrixBatchVectorMultiply(const int8_t* input, + int32_t input_zeropoint, + const int8_t* input_to_gate_weights, + int32_t input_to_gate_effective_scale_a, + int32_t input_to_gate_effective_scale_b, + int32_t n_batch, int32_t n_input, + int32_t n_cell, int8_t* gate_output, + int8_t gate_output_zp) { + const int32_t int8_max = std::numeric_limits::max(); + const int32_t int8_min = std::numeric_limits::min(); + for (int batch = 0; batch < n_batch; ++batch) { + for (int row = 0; row < n_cell; ++row) { + int32_t acc = 0; + for (int col = 0; col < n_input; ++col) { + int32_t input_val = input[batch * n_input + col]; + int8_t weights_val = input_to_gate_weights[row * n_input + col]; + acc += (input_val - input_zeropoint) * weights_val; + } + acc = MultiplyByQuantizedMultiplier(acc, input_to_gate_effective_scale_a, + input_to_gate_effective_scale_b); + acc += gate_output_zp; + if (acc > int8_max) { + acc = int8_max; + } + if (acc < int8_min) { + acc = int8_min; + } + gate_output[batch * n_cell + row] = static_cast(acc); + } + } +} + +void PortableMatrixBatchVectorMultiply( + const int16_t* hidden, const int8_t* hidden_to_output_weights, + int32_t proj_effective_scale_a, int32_t proj_effective_scale_b, + const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden, + int32_t n_output, int32_t output_zp, int8_t* proj_output) { + const int16_t int8_max = std::numeric_limits::max(); + const int16_t int8_min = std::numeric_limits::min(); + for (int batch = 0; batch < n_batch; ++batch) { + for (int row = 0; row < n_output; ++row) { + int64_t acc = gate_bias[row]; + for (int col = 0; col < n_hidden; ++col) { + int16_t input_val = hidden[batch * n_hidden + col]; + int8_t weights_val = hidden_to_output_weights[row * n_hidden + col]; + int64_t curr = acc; + acc += input_val * weights_val; + if (input_val * weights_val > 0 && acc < curr) { + acc = std::numeric_limits::max(); + } + if (input_val * weights_val < 0 && acc > curr) { + acc = std::numeric_limits::min(); + } + } + acc = MultiplyByQuantizedMultiplier(acc, proj_effective_scale_a, + proj_effective_scale_b); + acc += output_zp; + if (acc > int8_max) { + acc = int8_max; + } + if (acc < int8_min) { + acc = int8_min; + } + proj_output[batch * n_output + row] = acc; + } + } +} + +void PortableApplyLayerNorm(const int16_t* input, + const int16_t* layer_norm_weights, + const int32_t* bias, int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, int32_t variance_limit, + int n_batch, int n_input, int16_t* output) { + // The square of std::pow(2, 10), which is the extra factor that makes sure + // normalized values has enough resolution. + static const int kTwoToPower20 = 1 << 20; + for (int i = 0; i < n_batch; ++i) { + int64_t sum = 0; + int64_t sum_sq = 0; + for (int j = 0; j < n_input; ++j) { + const int32_t index = i * n_input + j; + int32_t val = static_cast(input[index]); + sum += val; + sum_sq += val * val; + } + int32_t mean = + static_cast(static_cast(sum) * 1024 / n_input); + // TODO(b/173994730): Avoids overflow but only works for POT n_input. + int32_t temp = kTwoToPower20 / n_input; + int64_t variance = + sum_sq * temp - static_cast(mean) * static_cast(mean); + int32_t variance2 = static_cast(variance / kTwoToPower20); + if (variance2 < 1) { + variance2 = variance_limit; + } + int32_t stddev_inverse_a; + int stddev_inverse_b; + GetInvSqrtQuantizedMultiplierExp(variance2, /*reverse_shift*/ -1, + &stddev_inverse_a, &stddev_inverse_b); + + for (int j = 0; j < n_input; ++j) { + const int32_t index = i * n_input + j; + int32_t val = static_cast(input[index]); + int32_t shifted = 1024 * val - mean; + int32_t rescaled = MultiplyByQuantizedMultiplier( + shifted, stddev_inverse_a, stddev_inverse_b); + // TODO(jianlijianli): Saturate this. + int64_t val3 = rescaled * layer_norm_weights[j] + bias[j]; + int32_t val4 = + static_cast((val3 > 0 ? val3 + 512 : val3 - 512) / 1024); + int32_t val5 = MultiplyByQuantizedMultiplier(val4, layer_norm_scale_a, + layer_norm_scale_b + 12); + val5 = std::min(std::max(kInt16Min, val5), kInt16Max); + output[index] = static_cast(val5); + } + } +} + +void PortableApplyLayerNormFloat(const int16_t* input, + const int16_t* layer_norm_weights, + int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, + const int32_t* bias, int n_batch, int n_input, + int16_t* output) { + const int32_t int16_max = std::numeric_limits::max(); + const int32_t int16_min = std::numeric_limits::min(); + const float layer_norm_scale = + layer_norm_scale_a * + std::pow(2.0, static_cast(layer_norm_scale_b - 31)); + const float bias_scale = + static_cast(std::pow(2.0, -10)) * layer_norm_scale; + + for (int batch = 0; batch < n_batch; ++batch) { + float sum = 0.0f; + float sum_sq = 0.0f; + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const float value = static_cast(input[index]); + sum += value; + sum_sq += value * value; + } + const float mean = sum / n_input; + float stddev_inv = 0.0f; + const float variance = sum_sq / n_input - mean * mean; + if (variance == 0) { + stddev_inv = 1.0f / std::sqrt(1e-8f); + } else { + stddev_inv = 1.0f / std::sqrt(variance); + } + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const float normalized_value = + (static_cast(input[index]) - mean) * stddev_inv; + const float weighted_normalized_value = + normalized_value * layer_norm_weights[i] * layer_norm_scale + + bias[i] * bias_scale; + const int32_t quant_output = static_cast(round( + weighted_normalized_value * static_cast(std::pow(2, 12)))); + output[index] = std::min(int16_max, std::max(int16_min, quant_output)); + } + } +} + +void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix, + int32_t scalar, int32_t n_row, + int32_t n_col, int32_t* output) { + for (int i = 0; i < n_row; ++i) { + int32_t row_sum = 0; + for (int j = 0; j < n_col; ++j) { + row_sum += *matrix++; + } + output[i] += row_sum * scalar; + } +} + +void PortableApplySigmoid(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int c = 0; c < n_input; c++) { + using F3 = gemmlowp::FixedPoint; + using F0 = gemmlowp::FixedPoint; + const int index = batch * n_input + c; + F3 sigmoid_input = F3::FromRaw(input[index]); + F0 sigmoid_output = gemmlowp::logistic(sigmoid_input); + output[index] = sigmoid_output.raw(); + } + } +} + +void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output) { + const int32_t int16_max = std::numeric_limits::max(); + const int32_t int16_min = std::numeric_limits::min(); + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const float float_input = + input[index] * static_cast(std::pow(2, -12)); + const float float_output = 1.0f / (1.0f + std::exp(-float_input)); + const int32_t quant_output = static_cast( + float_output * static_cast(std::pow(2, 15))); + const int32_t quant_output_clamped = + std::min(int16_max, std::max(int16_min, quant_output)); + output[index] = static_cast(quant_output_clamped); + } + } +} + +template +void PortableApplyTanhImpl(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output) { + using FX = gemmlowp::FixedPoint; + using F0 = gemmlowp::FixedPoint; + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + FX tanh_input = FX::FromRaw(input[index]); + F0 tanh_output = gemmlowp::tanh(tanh_input); + output[index] = tanh_output.raw(); + } + } +} + +void PortableApplyTanh(int32_t integer_bits, const int16_t* input, + int32_t n_batch, int32_t n_input, int16_t* output) { + assert(integer_bits <= 6); +#define DISPATCH_TANH(i) \ + case i: \ + PortableApplyTanhImpl(input, n_batch, n_input, output); \ + break; + switch (integer_bits) { + DISPATCH_TANH(0); + DISPATCH_TANH(1); + DISPATCH_TANH(2); + DISPATCH_TANH(3); + DISPATCH_TANH(4); + DISPATCH_TANH(5); + DISPATCH_TANH(6); + default: + return; + } +#undef DISPATCH_TANH +} + +void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch, + int32_t n_input, int32_t integer_bits, + int16_t* output) { + const int32_t int16_max = std::numeric_limits::max(); + const int32_t int16_min = std::numeric_limits::min(); + const double two = 2.0; + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const float float_input = + input[index] * std::pow(two, static_cast(integer_bits)); + const float float_output = std::tanh(float_input); + const int32_t quant_output = static_cast( + float_output * static_cast(std::pow(2, 15))); + const int32_t quant_output_clamped = + std::min(int16_max, std::max(int16_min, quant_output)); + output[index] = static_cast(quant_output_clamped); + } + } +} + +void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, + int n_batch, int n_input, int shift, int16_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const int16_t a = input_1[index]; + const int16_t b = input_2[index]; + const int32_t value = static_cast(a) * static_cast(b); + output[index] = + static_cast(gemmlowp::RoundingDivideByPOT(value, shift)); + } + } +} + +void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, + int32_t multiplier, int32_t shift, int32_t n_batch, + int32_t n_input, int32_t output_zp, int8_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const int16_t a = input_1[index]; + const int16_t b = input_2[index]; + int32_t value = static_cast(a) * static_cast(b); + value = MultiplyByQuantizedMultiplier(value, multiplier, shift); + value += output_zp; + value = std::min(std::max(static_cast(-128), value), + static_cast(127)); + + output[index] = static_cast(value); + } + } +} + +void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2, + int n_batch, int n_input, int16_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + int32_t sum = input_1[index] + input_2[index]; + const int32_t sum_clamped = std::min(kInt16Max, std::max(kInt16Min, sum)); + output[index] = static_cast(sum_clamped); + } + } +} + +float PortableVectorVectorDotProduct(const float* vector1, const float* vector2, + int v_size) { + float result = 0.0; + for (int v = 0; v < v_size; v++) { + result += *vector1++ * *vector2++; + } + return result; +} + +namespace { +inline int32_t VectorVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, int v_size) { + int32_t result = 0; + for (int v = 0; v < v_size; v++) { + result += *vector1++ * *vector2++; + } + return result; +} +} // namespace + +void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, + int v_size, int n_batch, + int32_t* result) { + for (int b = 0; b < n_batch; b++) { + result[b] = VectorVectorDotProduct(vector1, vector2, v_size); + vector1 += v_size; + vector2 += v_size; + } +} + +void PortableVectorBatchVectorCwiseProductAccumulate( + const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch, + int32_t multiplier, int shift, int16_t* result) { + for (int b = 0; b < n_batch; b++) { + for (int v = 0; v < v_size; v++) { + int32_t prod = vector[v] * *batch_vector++; + prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift); + int32_t output = prod + *result; + output = std::max(std::min(static_cast(32767), output), + static_cast(-32768)); + *result++ = output; + } + } +} + +void PortableSub1Vector(const float* vector, int v_size, float* result) { + for (int v = 0; v < v_size; v++) { + *result++ = 1.0f - *vector++; + } +} + +void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result) { + static const int16_t kOne = 32767; + for (int v = 0; v < v_size; v++) { + *result++ = kOne - *vector++; + } +} + +void PortableVectorScalarMultiply(const int8_t* vector, const int v_size, + const float scale, float* result) { + for (int v = 0; v < v_size; ++v) { + *result++ = scale * *vector++; + } +} + +void PortableMeanStddevNormalization(const float* __restrict__ input_vector, + float* __restrict__ output_vector, + int v_size, int n_batch) { + for (int batch = 0; batch < n_batch; ++batch) { + float sum = 0.0f; + for (int i = 0; i < v_size; ++i) { + sum += input_vector[i]; + } + const float mean = sum / v_size; + float sum_diff_sq = 0.0f; + for (int i = 0; i < v_size; ++i) { + const float diff = input_vector[i] - mean; + sum_diff_sq += diff * diff; + } + const float variance = sum_diff_sq / v_size; + constexpr float kNormalizationConstant = 1e-8f; + const float stddev_inv = + 1.0f / std::sqrt(variance + kNormalizationConstant); + for (int i = 0; i < v_size; ++i) { + output_vector[i] = (input_vector[i] - mean) * stddev_inv; + } + input_vector += v_size; + output_vector += v_size; + } +} + +void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, + const int8_t* recurrent, int8_t recurrent_zp, + int32_t input_effective_scale_a, + int32_t input_effective_scale_b, + int32_t recurrent_effective_scale_a, + int32_t recurrent_effective_scale_b, + int32_t n_batch, int32_t n_cell, + int16_t* output) { + const int32_t int16_max = std::numeric_limits::max(); + const int32_t int16_min = std::numeric_limits::min(); + for (int i = 0; i < n_batch * n_cell; ++i) { + int32_t x = static_cast(input[i]) - static_cast(input_zp); + int32_t h = + static_cast(recurrent[i]) - static_cast(recurrent_zp); + int32_t x_scaled = MultiplyByQuantizedMultiplier(x, input_effective_scale_a, + input_effective_scale_b); + int32_t h_scaled = MultiplyByQuantizedMultiplier( + h, recurrent_effective_scale_a, recurrent_effective_scale_b); + int32_t y = h_scaled + x_scaled; + if (y > int16_max) { + y = int16_max; + } + if (y < int16_min) { + y = int16_min; + } + output[i] = static_cast(y); + } +} + +} // namespace tensor_utils +} // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h b/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h new file mode 100644 index 0000000..7c623f7 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h @@ -0,0 +1,336 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_H_ + +#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { +namespace tensor_utils { + +// Check if all entries of a vector are zero for float. +bool IsZeroVector(const float* vector, int v_size) { + return PortableIsZeroVector(vector, v_size); +} + +// Check if all entries of a vector are zero for int8_t. +bool IsZeroVector(const int8_t* vector, int v_size) { + return PortableIsZeroVector(vector, v_size); +} + +void SymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* min, float* max, + float* scaling_factor) { + PortableSymmetricQuantizeFloats(values, size, quantized_values, min, max, + scaling_factor); +} + +void SymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float min_value, + float max_value, float* scaling_factor) { + PortableSymmetricQuantizeFloats(values, size, quantized_values, min_value, + max_value, scaling_factor); +} + +void AsymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* scaling_factor, + int32_t* offset) { + PortableAsymmetricQuantizeFloats(values, size, quantized_values, + scaling_factor, offset); +} + +void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, + int m_cols, const float* vector, + int n_batch, float* result) { + PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, + n_batch, result); +} + +void MatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix, + const int m_rows, const int m_cols, + const int8_t* __restrict__ vector, + const float* scaling_factors, + int n_batch, + float* __restrict__ result) { + PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, + scaling_factors, n_batch, result); +} + +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulate( + matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, + per_channel_scale, input_offset, scratch, row_sums, compute_row_sums, + context); +} + +void MatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix, + const int m_rows, const int m_cols, + const int8_t* __restrict__ vector, + const float* scaling_factors, + int n_batch, int32_t* scratch, + float* __restrict__ result, + CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, + scaling_factors, n_batch, result); +} + +void SparseMatrixBatchVectorMultiplyAccumulate1x4( + const float* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const float* __restrict__ vector, int n_batch, float* __restrict__ result) { + PortableSparseMatrixBatchVectorMultiplyAccumulate1x4( + matrix, segments, indices, m_rows, m_cols, vector, n_batch, result); +} + +void SparseMatrixBatchVectorMultiplyAccumulate( + const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, + int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, + float* __restrict__ result) { + PortableSparseMatrixBatchVectorMultiplyAccumulate( + matrix, ledger, m_rows, m_cols, vector, n_batch, result); +} + +void SparseMatrixBatchVectorMultiplyAccumulate1x16( + const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, + int n_batch, const int32_t input_offset, const int32_t output_multiplier, + const int32_t output_shift, const int32_t* per_channel_scale, + const int32_t* per_channel_shift, const int32_t output_offset, + const int32_t output_activation_min, const int32_t output_activation_max, + + int8_t* __restrict__ result) { + PortableSparseMatrixBatchVectorMultiplyAccumulate1x16( + matrix, segments, indices, m_rows, m_cols, vector, bias_vector, n_batch, + input_offset, output_multiplier, output_shift, per_channel_scale, + per_channel_shift, output_offset, output_activation_min, + output_activation_max, result); +} + +void SparseMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, + const int m_cols, const int8_t* __restrict__ vectors, + const float* scaling_factors, int n_batch, float* __restrict__ result, + const float* per_channel_scale) { + PortableSparseMatrixBatchVectorMultiplyAccumulate( + matrix, ledger, m_rows, m_cols, vectors, scaling_factors, n_batch, result, + per_channel_scale); +} + +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int16_t* output, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulate( + input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, + n_output, output_zp, scratch, output, context); +} + +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int8_t* output, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulate( + input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, + n_output, output_zp, scratch, output, context); +} + +void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar, + int32_t n_row, int32_t n_col, + int32_t* output) { + PortableMatrixScalarMultiplyAccumulate(matrix, scalar, n_row, n_col, output); +} + +void MatrixBatchVectorMultiply(const int8_t* input, int32_t input_zeropoint, + const int8_t* input_to_gate_weights, + int32_t input_to_gate_effective_scale_a, + int32_t input_to_gate_effective_scale_b, + int32_t n_batch, int32_t n_input, int32_t n_cell, + int8_t* gate_output, int8_t gate_output_zp) { + PortableMatrixBatchVectorMultiply( + input, input_zeropoint, input_to_gate_weights, + input_to_gate_effective_scale_a, input_to_gate_effective_scale_b, n_batch, + n_input, n_cell, gate_output, gate_output_zp); +} + +void MatrixBatchVectorMultiply(const int16_t* hidden, + const int8_t* hidden_to_output_weights, + int32_t proj_effective_scale_a, + int32_t proj_effective_scale_b, + const int32_t* gate_bias, int32_t n_batch, + int32_t n_hidden, int32_t n_output, + int32_t output_zp, int8_t* proj_output) { + PortableMatrixBatchVectorMultiply(hidden, hidden_to_output_weights, + proj_effective_scale_a, + proj_effective_scale_b, gate_bias, n_batch, + n_hidden, n_output, output_zp, proj_output); +} + +void ApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights, + const int32_t* bias, int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, int32_t variance_limit, + int n_batch, int n_input, int16_t* output) { + PortableApplyLayerNorm(input, layer_norm_weights, bias, layer_norm_scale_a, + layer_norm_scale_b, variance_limit, n_batch, n_input, + output); +} + +void ApplyLayerNormFloat(const int16_t* input, + const int16_t* layer_norm_weights, + int32_t layer_norm_scale_a, int32_t layer_norm_scale_b, + const int32_t* bias, int n_batch, int n_input, + int16_t* output) { + PortableApplyLayerNormFloat(input, layer_norm_weights, layer_norm_scale_a, + layer_norm_scale_b, bias, n_batch, n_input, + output); +} + +void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input, + int16_t* output) { + PortableApplySigmoid(input, n_batch, n_input, output); +} + +void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input, + int16_t* output) { + PortableApplySigmoidFloat(input, n_batch, n_input, output); +} + +void ApplyTanh(int32_t integer_bits, const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output) { + PortableApplyTanh(integer_bits, input, n_batch, n_input, output); +} + +void ApplyTanhFloat(const int16_t* input, int32_t n_batch, int32_t n_input, + int32_t integer_bits, int16_t* output) { + PortableApplyTanhFloat(input, n_batch, n_input, integer_bits, output); +} + +void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int shift, int16_t* output) { + PortableCwiseMul(input_1, input_2, n_batch, n_input, shift, output); +} + +void CwiseMul(const int16_t* input_1, const int16_t* input_2, + int32_t multiplier, int32_t shift, int32_t n_batch, + int32_t n_input, int32_t output_zp, int8_t* output) { + PortableCwiseMul(input_1, input_2, multiplier, shift, n_batch, n_input, + output_zp, output); +} + +void CwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int16_t* output) { + PortableCwiseAdd(input_1, input_2, n_batch, n_input, output); +} + +void CwiseClipping(float* vector, const int v_size, + const float clipping_value) { + PortableCwiseClipping(vector, v_size, clipping_value); +} + +void CwiseClipping(int16_t* vector, const int v_size, + const int16_t clipping_value) { + PortableCwiseClipping(vector, v_size, clipping_value); +} + +void CwiseClipping(int8_t* vector, const int v_size, + const int8_t clipping_value) { + PortableCwiseClipping(vector, v_size, clipping_value); +} + +void VectorBatchVectorCwiseProductAccumulate(const int16_t* vector, int v_size, + const int16_t* batch_vector, + int n_batch, int32_t multiplier, + int shift, int16_t* result) { + PortableVectorBatchVectorCwiseProductAccumulate( + vector, v_size, batch_vector, n_batch, multiplier, shift, result); +} + +float VectorVectorDotProduct(const float* vector1, const float* vector2, + int v_size) { + return PortableVectorVectorDotProduct(vector1, vector2, v_size); +} + +void BatchVectorBatchVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, int v_size, + int n_batch, int32_t* result) { + PortableBatchVectorBatchVectorDotProduct(vector1, vector2, v_size, n_batch, + result); +} + +void Sub1Vector(const float* vector, int v_size, float* result) { + PortableSub1Vector(vector, v_size, result); +} + +void Sub1Vector(const int16_t* vector, int v_size, int16_t* result) { + PortableSub1Vector(vector, v_size, result); +} + +// Multiply all elements of vector with a scalar. +void VectorScalarMultiply(const int8_t* vector, int v_size, float scale, + float* result) { + PortableVectorScalarMultiply(vector, v_size, scale, result); +} + +void ReductionSumVector(const float* input_vector, float* output_vector, + int output_size, int reduction_size) { + PortableReductionSumVector(input_vector, output_vector, output_size, + reduction_size); +} + +void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, + int output_size, int reduction_size) { + PortableReductionSumVector(input_vector, output_vector, output_size, + reduction_size); +} + +void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, + int output_size, int reduction_size) { + PortableReductionSumVector(input_vector, output_vector, output_size, + reduction_size); +} + +void MeanStddevNormalization(const float* input_vector, float* output_vector, + int v_size, int n_batch) { + PortableMeanStddevNormalization(input_vector, output_vector, v_size, n_batch); +} + +void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, + const int8_t* recurrent, int8_t recurrent_zp, + int32_t input_effective_scale_a, + int32_t input_effective_scale_b, + int32_t recurrent_effective_scale_a, + int32_t recurrent_effective_scale_b, int32_t n_batch, + int32_t n_cell, int16_t* output) { + PortableTwoGateSaturatingAdd( + input, input_zp, recurrent, recurrent_zp, input_effective_scale_a, + input_effective_scale_b, recurrent_effective_scale_a, + recurrent_effective_scale_b, n_batch, n_cell, output); +} + +} // namespace tensor_utils +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h b/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h new file mode 100644 index 0000000..5e228bb --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h @@ -0,0 +1,248 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_ + +#include +#include + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. +class CpuBackendContext; + +namespace tensor_utils { + +template +bool PortableIsZeroVector(const T* vector, int v_size) { + for (int i = 0; i < v_size; ++i) { + if (vector[i] != 0) { + return false; + } + } + return true; +} + +// LINT.IfChange(portable_symmetric_quantize_floats) +void PortableSymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* min_value, + float* max_value, float* scaling_factor); + +void PortableSymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float min_value, + float max_value, float* scaling_factor); +// LINT.ThenChange(//tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/portable_tensor_utils.h:portable_symmetric_quantize_floats) + +void PortableAsymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, + float* scaling_factor, int32_t* offset); + +// Multiply a matrix by a batch vector, and store results in a batch-size +// vector. +void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix, + int m_rows, int m_cols, + const float* vector, + int n_batch, float* result); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, CpuBackendContext* context); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vector, const float* scaling_factors, + int n_batch, int32_t* scratch, float* __restrict__ result, + CpuBackendContext* context); + +void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4( + const float* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const float* __restrict__ vector, int n_batch, float* __restrict__ result); + +void PortableSparseMatrixBatchVectorMultiplyAccumulate( + const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, + int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, + float* __restrict__ result); + +void PortableSparseMatrixBatchVectorMultiplyAccumulate1x16( + const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, + int n_batch, const int32_t input_offset, const int32_t output_multiplier, + int32_t output_shift, const int32_t* per_channel_scale, + const int32_t* per_channel_shift, int32_t output_offset, + const int32_t output_activation_min, const int32_t output_activation_max, + int8_t* __restrict__ result); + +void PortableSparseMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, + const int m_cols, const int8_t* __restrict__ vectors, + const float* scaling_factors, int n_batch, float* __restrict__ result, + const float* per_channel_scale); + +// Dot product of two vectors. +float PortableVectorVectorDotProduct(const float* vector1, const float* vector2, + int v_size); + +void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, + int v_size, int n_batch, + int32_t* result); + +void PortableVectorBatchVectorCwiseProductAccumulate( + const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch, + int32_t multiplier, int shift, int16_t* result); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int16_t* output, CpuBackendContext* context); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int8_t* output, CpuBackendContext* context); + +void PortableMatrixBatchVectorMultiply(const int8_t* input, + int32_t input_zeropoint, + const int8_t* input_to_gate_weights, + int32_t input_to_gate_effective_scale_a, + int32_t input_to_gate_effective_scale_b, + int32_t n_batch, int32_t n_input, + int32_t n_cell, int8_t* gate_output, + int8_t gate_output_zp); + +void PortableMatrixBatchVectorMultiply( + const int16_t* hidden, const int8_t* hidden_to_output_weights, + int32_t proj_effective_scale_a, int32_t proj_effective_scale_b, + const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden, + int32_t n_output, int32_t output_zp, int8_t* proj_output); + +void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix, + int32_t scalar, int32_t n_row, + int32_t n_col, int32_t* output); + +void PortableApplyLayerNorm(const int16_t* input, + const int16_t* layer_norm_weights, + const int32_t* bias, int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, int32_t variance_limit, + int n_batch, int n_input, int16_t* output); + +void PortableApplyLayerNormFloat(const int16_t* input, + const int16_t* layer_norm_weights, + int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, + const int32_t* bias, int n_batch, int n_input, + int16_t* output); + +void PortableApplySigmoid(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output); + +void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output); + +void PortableApplyTanh(int32_t integer_bits, const int16_t* input, + int32_t n_batch, int32_t n_input, int16_t* output); + +void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch, + int32_t n_input, int32_t integer_bits, + int16_t* output); + +void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, + int n_batch, int n_input, int shift, int16_t* output); + +void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, + int32_t multiplier, int32_t shift, int32_t n_batch, + int32_t n_input, int32_t output_zp, int8_t* output); + +void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2, + int n_batch, int n_input, int16_t* output); + +template +void PortableCwiseClipping(T* vector, const int v_size, + const T& clipping_value) { + for (int i = 0; i < v_size; i++) { + vector[i] = std::max(std::min(clipping_value, vector[i]), + static_cast(-clipping_value)); + } +} + +// Batch vector initialization with another vector. +void PortableVectorBatchVectorAssign(const float* vector, int v_size, + int n_batch, float* batch_vector); + +// Compute "1.0f - elements of vector" (used in CIFG). +void PortableSub1Vector(const float* vector, int v_size, float* result); + +void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result); + +// Multiply all elements of vector with a scalar. +void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale, + float* result); + +// Reduce-sum on a vector: +// input_vector: pointer to input vector. +// output_vector: pointer to vector. +// output_size: output vector size. +// reduction_size: number of consecutive elements from input vector which are +// added to get one element of output. +template +void PortableReductionSumVector(const INPUT* input_vector, + OUTPUT* output_vector, int output_size, + int reduction_size) { + for (int o = 0; o < output_size; o++) { + OUTPUT result = 0; + for (int r = 0; r < reduction_size; r++) { + result += input_vector[r]; + } + output_vector[o] = result; + input_vector += reduction_size; + } +} + +// Layer norm for each batch. +void PortableMeanStddevNormalization(const float* __restrict__ input_vector, + float* __restrict__ output_vector, + int v_size, int n_batch); + +// Saturate Add. +void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, + const int8_t* recurrent, int8_t recurrent_zp, + int32_t input_effective_scale_a, + int32_t input_effective_scale_b, + int32_t recurrent_effective_scale_a, + int32_t recurrent_effective_scale_b, + int32_t n_batch, int32_t n_cell, + int16_t* output); + +} // namespace tensor_utils +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/prelu.h b/src/tensorflow/lite/kernels/internal/reference/prelu.h index 02db517..aa9901d 100644 --- a/src/tensorflow/lite/kernels/internal/reference/prelu.h +++ b/src/tensorflow/lite/kernels/internal/reference/prelu.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ +#include + #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/types.h" diff --git a/src/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h b/src/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h index 40f779c..bda2769 100644 --- a/src/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +++ b/src/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_ +#include + #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { diff --git a/src/tensorflow/lite/kernels/internal/reference/quantize.h b/src/tensorflow/lite/kernels/internal/reference/quantize.h index 6f3f9ae..f304b64 100644 --- a/src/tensorflow/lite/kernels/internal/reference/quantize.h +++ b/src/tensorflow/lite/kernels/internal/reference/quantize.h @@ -17,6 +17,7 @@ limitations under the License. #include #include +#include #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" @@ -49,6 +50,39 @@ inline void AffineQuantize(const tflite::QuantizationParams& op_params, } } +// Quantizes per-channel. +template +inline void PerChannelQuantize( + const tflite::PerChannelQuantizationParams& op_params, + const RuntimeShape& input_shape, const InputT* input_data, + const RuntimeShape& output_shape, OutputT* output_data) { + // Ensure flat size is same. + MatchingFlatSize(input_shape, output_shape); + + const int32_t* zero_point = op_params.zero_point; + const float* scale = op_params.scale; + const int32_t quantized_dimension = op_params.quantized_dimension; + const int32_t num_dims = input_shape.DimensionsCount(); + const int32_t* dims_data = input_shape.DimsData(); + std::vector current_dim(num_dims, 0); + static constexpr int32_t min_val = std::numeric_limits::min(); + static constexpr int32_t max_val = std::numeric_limits::max(); + + do { + size_t offset = + ReducedOutputOffset(num_dims, reinterpret_cast(dims_data), + current_dim.data(), 0, nullptr); + const InputT val = input_data[offset]; + const int channel = current_dim[quantized_dimension]; + int32_t unclamped = static_cast(TfLiteRound( + val / static_cast(scale[channel]))) + + zero_point[channel]; + int32_t clamped = std::min(std::max(unclamped, min_val), max_val); + output_data[offset] = static_cast(clamped); + } while (NextIndex(num_dims, reinterpret_cast(dims_data), + current_dim.data())); +} + } // namespace reference_ops } // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/reference/reduce.h b/src/tensorflow/lite/kernels/internal/reference/reduce.h index a7c86dd..ab4745f 100644 --- a/src/tensorflow/lite/kernels/internal/reference/reduce.h +++ b/src/tensorflow/lite/kernels/internal/reference/reduce.h @@ -15,7 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_ -#include "ruy/profiler/instrumentation.h" // from @ruy +#include + +#include "third_party/ruy/ruy/profiler/instrumentation.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/max.h" @@ -23,6 +25,25 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/types.h" +// Check if the reduction at index is the first one along the dimensions given +// in axis. +inline bool IsFirstReduction(const int* index, const int num_axis, + const int* axis) { + if (num_axis == 0) { + return true; + } + + TFLITE_DCHECK(index != nullptr); + TFLITE_DCHECK(axis != nullptr); + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) { + if (index[axis[axis_idx]] != 0) { + return false; + } + } + + return true; +} + namespace tflite { namespace reference_ops { @@ -35,8 +56,7 @@ inline bool Reduce(const In* input_data, const int* input_dims, const int* output_dims, const int input_num_dims, const int output_num_dims, const int* axis, const int num_axis, int* input_iter, - Out reducer(const Out current, const In in), - Out* output_data) { + Out reducer(Out current, const In in), Out* output_data) { // Reset input iterator. for (int idx = 0; idx < input_num_dims; ++idx) { input_iter[idx] = 0; @@ -53,6 +73,37 @@ inline bool Reduce(const In* input_data, const int* input_dims, return true; } +// Similar to above Reduce function but takes two reducer functions. +// The 'reducer_first' is called with the first value of the reduction, +// 'reducer_next' is then called for all the others. +template +inline bool Reduce(const In* input_data, const int* input_dims, + const int* output_dims, const int input_num_dims, + const int output_num_dims, const int* axis, + const int num_axis, int* input_iter, + const std::function& reducer_first, + const std::function& reducer_next, + Out* output_data) { + // Reset input iterator. + for (int idx = 0; idx < input_num_dims; ++idx) { + input_iter[idx] = 0; + } + // Iterate through input_data. + do { + size_t input_offset = + ReducedOutputOffset(input_num_dims, input_dims, input_iter, 0, nullptr); + size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, + input_iter, num_axis, axis); + if (IsFirstReduction(input_iter, num_axis, axis)) { + output_data[output_offset] = reducer_first(input_data[input_offset]); + } else { + output_data[output_offset] = + reducer_next(output_data[output_offset], input_data[input_offset]); + } + } while (NextIndex(input_num_dims, input_dims, input_iter)); + return true; +} + // This method parses the input 'axis' to remove duplicates and handle negative // values, and returns a valid 'out_axis' inline bool ResolveAxis(const int num_dims, const int* axis, @@ -111,7 +162,8 @@ inline bool InitTensorDataForReduce(const int* dims, const int num_dims, for (int idx = 0; idx < num_dims; ++idx) { size_t current = static_cast(dims[idx]); // Overflow prevention. - if (num_elements > std::numeric_limits::max() / current) { + if (current > 0 && + num_elements > std::numeric_limits::max() / current) { return false; } num_elements *= current; @@ -132,17 +184,20 @@ inline bool ReduceGeneric(const T* input_data, const int* input_dims, bool keep_dims, int* temp_index, int* resolved_axis, T init_value, T reducer(const T current, const T in)) { - // Return early when input shape has zero dim. - for (int i = 0; i < input_num_dims; ++i) { - if (input_dims[i] == 0) return true; - } - // Reset output data. if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value, output_data)) { return false; } + // Return early when input shape has zero dim. This is done after initializing + // data for output tensor because there are cases that the input tensor is + // empty but output tensor is not. In that case, output tensor should be + // filled with init_value. + for (int i = 0; i < input_num_dims; ++i) { + if (input_dims[i] == 0) return true; + } + // Resolve axis. int num_resolved_axis = 0; if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, @@ -213,11 +268,11 @@ inline bool Mean(const T* input_data, const int* input_dims, return true; } -template inline void Mean(const tflite::MeanParams& op_params, const RuntimeShape& unextended_input_shape, - const T* input_data, - const RuntimeShape& unextended_output_shape, T* output_data) { + const float* input_data, + const RuntimeShape& unextended_output_shape, + float* output_data) { ruy::profiler::ScopeLabel label("Mean4D"); // Current implementation only supports dimension equals 4 and simultaneous @@ -257,78 +312,21 @@ inline void Mean(const tflite::MeanParams& op_params, } } -inline void Mean(const tflite::MeanParams& op_params, - const RuntimeShape& unextended_input_shape, - const uint8_t* input_data, int32_t input_zero_point, - float input_scale, const RuntimeShape& unextended_output_shape, - uint8_t* output_data, int32_t output_zero_point, - float output_scale) { - ruy::profiler::ScopeLabel label("Mean4D/Uint8"); - - // Current implementation only supports dimension equals 4 and simultaneous - // reduction over width and height. - TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4); - TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape input_shape = - RuntimeShape::ExtendedShape(4, unextended_input_shape); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - const int output_batch = output_shape.Dims(0); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int output_depth = output_shape.Dims(3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const float num_elements_in_axis = input_width * input_height; - - TFLITE_CHECK_EQ(op_params.axis_count, 2); - TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - TFLITE_CHECK_EQ(output_height, 1); - TFLITE_CHECK_EQ(output_width, 1); - - constexpr int32_t kMinValue = std::numeric_limits::min(); - constexpr int32_t kMaxValue = std::numeric_limits::max(); - - int32_t bias = - output_zero_point - - static_cast(input_zero_point * input_scale / output_scale); - double real_scale = - static_cast(input_scale / (num_elements_in_axis * output_scale)); - - int32_t multiplier; - int shift; - QuantizeMultiplier(real_scale, &multiplier, &shift); - for (int out_b = 0; out_b < output_batch; ++out_b) { - for (int out_d = 0; out_d < output_depth; ++out_d) { - int32_t acc = 0; - for (int in_h = 0; in_h < input_height; ++in_h) { - for (int in_w = 0; in_w < input_width; ++in_w) { - acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)]; - } - } - acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift); - acc += bias; - acc = std::min(std::max(acc, kMinValue), kMaxValue); - output_data[Offset(output_shape, out_b, 0, 0, out_d)] = - static_cast(acc); - } - } -} - // Computes the mean of elements across dimensions given in axis. // It does so in two stages, first calculates the sum of elements along the axis // then divides it by the number of element in axis for quantized values. template inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point, - float input_scale, const int* input_dims, - const int input_num_dims, T* output_data, - int32_t output_zero_point, float output_scale, + const int* input_dims, const int input_num_dims, + T* output_data, int32_t output_multiplier, + int output_shift, int32_t output_zero_point, const int* output_dims, const int output_num_dims, const int* axis, const int num_axis_dimensions, bool keep_dims, int* temp_index, int* resolved_axis, U* temp_sum, bool compute_sum) { + const int32_t kMinValue = std::numeric_limits::min(); + const int32_t kMaxValue = std::numeric_limits::max(); const bool uint8_case = std::is_same::value; const bool int16_case = std::is_same::value; if (uint8_case) { @@ -353,6 +351,14 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point, temp_sum[idx] = U(); } + // Return early when input shape has zero dim. This is done after initializing + // data for output tensor because there are cases that the input tensor is + // empty but output tensor is not. In that case, output tensor should be + // filled with init_value. + for (int i = 0; i < input_num_dims; ++i) { + if (input_dims[i] == 0) return true; + } + // Resolve axis. int num_resolved_axis = 0; if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, @@ -367,44 +373,117 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point, } // Calculate mean by dividing output_data by num of aggregated element. - size_t num_elements_in_axis = 1; + int64_t num_elements_in_axis = 1; for (int idx = 0; idx < num_resolved_axis; ++idx) { size_t current = static_cast(input_dims[resolved_axis[idx]]); // Overflow prevention. - if (current > (std::numeric_limits::max() / num_elements_in_axis)) { + if (current > static_cast(std::numeric_limits::max() / + num_elements_in_axis)) { return false; } num_elements_in_axis *= current; } - if (num_elements_in_axis > 0) { - const float scale = input_scale / output_scale; - if (compute_sum) { - // TODO(b/116341117): Eliminate float and do this completely in 8bit. - const float bias = -input_zero_point * scale * num_elements_in_axis; - for (size_t idx = 0; idx < num_outputs; ++idx) { - const U value = - static_cast(TfLiteRound(temp_sum[idx] * scale + bias)) + - output_zero_point; - output_data[idx] = static_cast(value); - } - } else { - const float bias = -input_zero_point * scale; - for (size_t idx = 0; idx < num_outputs; ++idx) { - float float_mean = static_cast(temp_sum[idx]) / - static_cast(num_elements_in_axis); - float result = TfLiteMin( - TfLiteRound(float_mean * scale + bias) + output_zero_point, - static_cast(std::numeric_limits::max())); - result = TfLiteMax(result, - static_cast(std::numeric_limits::min())); - output_data[idx] = static_cast(result); - } - } + if (num_elements_in_axis == 0) { + return true; + } + + // Readapt output rescaling when calculating the mean to integrate a + // 1/num_elements_in_axis multiplier. + if (!compute_sum) { + TFLITE_DCHECK_GE(num_elements_in_axis, 0); + int shift = + 63 - CountLeadingZeros(static_cast(num_elements_in_axis)); + // To avoid any overflow risk 'shift' should be <= 32 and to satisfy + // 'MultiplyByQuantizedMultiplier' pre-conditions 'output_shift - shift' + // should be >= -31. Clamp the value at the price of some precision loss. + shift = std::min(shift, 32); + shift = std::min(shift, 31 + output_shift); + output_multiplier = static_cast( + (static_cast(output_multiplier) << shift) / + num_elements_in_axis); + output_shift = output_shift - shift; + } + + for (size_t idx = 0; idx < num_outputs; ++idx) { + const U shifted_sum = + static_cast(temp_sum[idx] - input_zero_point * num_elements_in_axis); + int32_t output = MultiplyByQuantizedMultiplier( + shifted_sum, output_multiplier, output_shift) + + output_zero_point; + output = std::min(std::max(output, kMinValue), kMaxValue); + output_data[idx] = static_cast(output); } return true; } +template +inline bool QuantizedMeanOrSumExtraArgs( + const T* input_data, int32_t input_zero_point, float input_scale, + const int* input_dims, const int input_num_dims, T* output_data, + float output_scale, int32_t output_multiplier, int output_shift, + int32_t output_zero_point, const int* output_dims, + const int output_num_dims, const int* axis, const int num_axis_dimensions, + bool keep_dims, int* temp_index, int* resolved_axis, U* temp_sum, + bool compute_sum) { + return QuantizedMeanOrSum( + input_data, input_zero_point, input_dims, input_num_dims, output_data, + output_multiplier, output_shift, output_zero_point, output_dims, + output_num_dims, axis, num_axis_dimensions, keep_dims, temp_index, + resolved_axis, temp_sum, compute_sum); +} + +template +inline bool QuantizedReduceProd(const T* input_data, int32_t input_zero_point, + const RuntimeShape& input_shape, T* output_data, + int32_t output_zero_point, + const RuntimeShape& output_shape, + const int* axis, + const int64_t num_axis_dimensions, + bool keep_dims, int* temp_index, + int* resolved_axis, int32_t* temp_prod, + int32_t scaling_multiplier, int scaling_shift) { + const int32_t kMinValue = std::numeric_limits::min(); + const int32_t kMaxValue = std::numeric_limits::max(); + + // Resolve axis. + int num_resolved_axis = 0; + if (!ResolveAxis(input_shape.DimensionsCount(), axis, num_axis_dimensions, + resolved_axis, &num_resolved_axis)) { + return false; + } + + // Calculate the reduced product by rescaling each multiplication step to + // avoid an overflow. + auto reducer_first = [&](T in) -> int32_t { return in - input_zero_point; }; + + auto reducer_next = [&](int32_t current, T in) -> int32_t { + const int64_t result = + static_cast(current) * (in - input_zero_point); + return MultiplyByQuantizedMultiplier(result, scaling_multiplier, + scaling_shift); + }; + + if (!Reduce( + input_data, input_shape.DimsData(), output_shape.DimsData(), + input_shape.DimensionsCount(), output_shape.DimensionsCount(), + resolved_axis, num_resolved_axis, temp_index, reducer_first, + reducer_next, temp_prod)) { + return false; + } + + for (int i = 0; i < output_shape.FlatSize(); i++) { + int32_t result = + MultiplyByQuantizedMultiplier(static_cast(temp_prod[i]), + scaling_multiplier, scaling_shift) + + output_zero_point; + result = std::min(std::max(result, kMinValue), kMaxValue); + output_data[i] = static_cast(result); + } + + return true; +} + } // namespace reference_ops } // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/reference/requantize.h b/src/tensorflow/lite/kernels/internal/reference/requantize.h index d1e6778..83879fb 100644 --- a/src/tensorflow/lite/kernels/internal/reference/requantize.h +++ b/src/tensorflow/lite/kernels/internal/reference/requantize.h @@ -15,7 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ -#include "ruy/profiler/instrumentation.h" // from @ruy +#include + +#include "third_party/ruy/ruy/profiler/instrumentation.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/types.h" diff --git a/src/tensorflow/lite/kernels/internal/reference/resize_bilinear.h b/src/tensorflow/lite/kernels/internal/reference/resize_bilinear.h new file mode 100644 index 0000000..bf9a88a --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/resize_bilinear.h @@ -0,0 +1,233 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_ + +#include +#include +#include +#include + +#include "tensorflow/lite/kernels/internal/cppmath.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +inline void ComputeInterpolationValues(const float value, const float scale, + const bool half_pixel_centers, + int32_t input_size, float* scaled_value, + int32_t* lower_bound, + int32_t* upper_bound) { + if (half_pixel_centers) { + *scaled_value = (value + 0.5f) * scale - 0.5f; + } else { + *scaled_value = value * scale; + } + float scaled_value_floor = std::floor(*scaled_value); + *lower_bound = std::max(static_cast(scaled_value_floor), + static_cast(0)); + *upper_bound = + std::min(static_cast(std::ceil(*scaled_value)), input_size - 1); +} + +template +inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params, + const RuntimeShape& unextended_input_shape, + const T* input_data, + const RuntimeShape& unextended_output_size_shape, + const int32_t* output_size_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + // If half_pixel_centers is True, align_corners must be False. + TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners); + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_size_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape output_size_shape = + RuntimeShape::ExtendedShape(4, unextended_output_size_shape); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + int32_t batches = MatchingDim(input_shape, 0, output_shape, 0); + int32_t input_height = input_shape.Dims(1); + int32_t input_width = input_shape.Dims(2); + int32_t depth = MatchingDim(input_shape, 3, output_shape, 3); + + TFLITE_DCHECK_EQ(output_size_shape.Dims(0), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(1), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(2), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(3), 2); + int32_t output_height = + output_size_data[Offset(output_size_shape, 0, 0, 0, 0)]; + int32_t output_width = + output_size_data[Offset(output_size_shape, 0, 0, 0, 1)]; + + float height_scale = static_cast(input_height) / output_height; + float width_scale = static_cast(input_width) / output_width; + if (op_params.align_corners && output_height > 1) { + height_scale = static_cast(input_height - 1) / (output_height - 1); + } + if (op_params.align_corners && output_width > 1) { + width_scale = static_cast(input_width - 1) / (output_width - 1); + } + const float rounding_offset = std::numeric_limits::is_integer ? .5f : .0f; + + for (int b = 0; b < batches; ++b) { + for (int y = 0; y < output_height; ++y) { + float input_y; + int32_t y0, y1; + ComputeInterpolationValues(y, height_scale, op_params.half_pixel_centers, + input_height, &input_y, &y0, &y1); + for (int x = 0; x < output_width; ++x) { + float input_x; + int32_t x0, x1; + ComputeInterpolationValues(x, width_scale, op_params.half_pixel_centers, + input_width, &input_x, &x0, &x1); + for (int c = 0; c < depth; ++c) { + T interpolation = + static_cast(input_data[Offset(input_shape, b, y0, x0, c)] * + (1 - (input_y - y0)) * (1 - (input_x - x0)) + + input_data[Offset(input_shape, b, y1, x0, c)] * + (input_y - y0) * (1 - (input_x - x0)) + + input_data[Offset(input_shape, b, y0, x1, c)] * + (1 - (input_y - y0)) * (input_x - x0) + + input_data[Offset(input_shape, b, y1, x1, c)] * + (input_y - y0) * (input_x - x0) + + rounding_offset); + output_data[Offset(output_shape, b, y, x, c)] = interpolation; + } + } + } + } +} + +inline void ComputeInterpolationValuesInteger( + const int32_t value, const int32_t scale_10, const bool half_pixel_centers, + int32_t input_size, int32_t* scaled_value, int32_t* lower_bound, + int32_t* upper_bound) { + if (half_pixel_centers) { + *scaled_value = value * scale_10 + scale_10 / 2 - (1 << 9); + } else { + *scaled_value = value * scale_10; + } + constexpr int32_t zero = 0; + *lower_bound = std::max(*scaled_value / (1 << 10), zero); + *upper_bound = + std::min((*scaled_value + (1 << 10) - 1) / (1 << 10), input_size - 1); +} + +// Same as above but doesn't use any floating-point for the resize +template +inline void ResizeBilinearInteger( + const tflite::ResizeBilinearParams& op_params, + const RuntimeShape& unextended_input_shape, const T* input_data, + const RuntimeShape& unextended_output_size_shape, + const int32_t* output_size_data, + const RuntimeShape& unextended_output_shape, T* output_data) { + // If half_pixel_centers is True, align_corners must be False. + TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners); + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_size_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape output_size_shape = + RuntimeShape::ExtendedShape(4, unextended_output_size_shape); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + const int32_t batches = MatchingDim(input_shape, 0, output_shape, 0); + const int32_t input_height = input_shape.Dims(1); + const int32_t input_width = input_shape.Dims(2); + const int32_t depth = MatchingDim(input_shape, 3, output_shape, 3); + + TFLITE_DCHECK_EQ(output_size_shape.Dims(0), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(1), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(2), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(3), 2); + const int32_t output_height = + output_size_data[Offset(output_size_shape, 0, 0, 0, 0)]; + const int32_t output_width = + output_size_data[Offset(output_size_shape, 0, 0, 0, 1)]; + + int32_t height_scale_10 = + ((1 << 10) * input_height + output_height / 2) / output_height; + int32_t width_scale_10 = + ((1 << 10) * input_width + output_width / 2) / output_width; + if (op_params.align_corners && output_height > 1) { + height_scale_10 = + ((1 << 10) * (input_height - 1) + (output_height - 1) / 2) / + (output_height - 1); + } + if (op_params.align_corners && output_width > 1) { + width_scale_10 = ((1 << 10) * (input_width - 1) + (output_width - 1) / 2) / + (output_width - 1); + } + + for (int b = 0; b < batches; ++b) { + for (int y = 0; y < output_height; ++y) { + int32_t input_y, y0, y1; + ComputeInterpolationValuesInteger(y, height_scale_10, + op_params.half_pixel_centers, + input_height, &input_y, &y0, &y1); + for (int x = 0; x < output_width; ++x) { + int32_t input_x, x0, x1; + ComputeInterpolationValuesInteger(x, width_scale_10, + op_params.half_pixel_centers, + input_width, &input_x, &x0, &x1); + for (int c = 0; c < depth; ++c) { + const int64_t output_20_ll = + static_cast( + input_data[Offset(input_shape, b, y0, x0, c)]) * + ((1 << 10) - (input_y - (1 << 10) * y0)) * + ((1 << 10) - (input_x - (1 << 10) * x0)); + const int64_t output_20_lu = + static_cast( + input_data[Offset(input_shape, b, y1, x0, c)]) * + (input_y - (1 << 10) * y0) * + ((1 << 10) - (input_x - (1 << 10) * x0)); + const int64_t output_20_rl = + static_cast( + input_data[Offset(input_shape, b, y0, x1, c)]) * + ((1 << 10) - (input_y - (1 << 10) * y0)) * + (input_x - (1 << 10) * x0); + const int64_t output_20_ru = + static_cast( + input_data[Offset(input_shape, b, y1, x1, c)]) * + (input_y - (1 << 10) * y0) * (input_x - (1 << 10) * x0); + const int64_t output_20 = + output_20_ll + output_20_lu + output_20_rl + output_20_ru; +#if TFLITE_SINGLE_ROUNDING + const int64_t round = 1 << 19; + const T interpolation = static_cast((output_20 + round) >> 20); +#else + const int64_t round = (output_20 > 0) ? (1 << 19) : -(1 << 19); + const T interpolation = + static_cast((output_20 + round) / (1 << 20)); +#endif // TFLITE_SINGLE_ROUNDING + output_data[Offset(output_shape, b, y, x, c)] = interpolation; + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h b/src/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h index 95550ab..bf0b757 100644 --- a/src/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +++ b/src/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h @@ -15,6 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_ +#include #include #include "tensorflow/lite/kernels/internal/cppmath.h" diff --git a/src/tensorflow/lite/kernels/internal/reference/select.h b/src/tensorflow/lite/kernels/internal/reference/select.h new file mode 100644 index 0000000..3c0d312 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/select.h @@ -0,0 +1,151 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ + +#include + +#include "third_party/ruy/ruy/profiler/instrumentation.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +void Select(const RuntimeShape& input_condition_shape, + const D* input_condition_data, const RuntimeShape& input_x_shape, + const T* input_x_data, const RuntimeShape& input_y_shape, + const T* input_y_data, const RuntimeShape& output_shape, + T* output_data) { + ruy::profiler::ScopeLabel label("Select"); + int64_t flatsize; + // Allow select operator executions on mixed scalar tensors and one element + // tensors. + if (input_condition_shape.FlatSize() == 1 && input_x_shape.FlatSize() == 1 && + input_y_shape.FlatSize() == 1 && output_shape.FlatSize() == 1) { + flatsize = 1; + } else { + flatsize = MatchingFlatSize(input_condition_shape, input_x_shape, + input_y_shape, output_shape); + } + for (int64_t i = 0; i < flatsize; ++i) { + output_data[i] = + input_condition_data[i] ? input_x_data[i] : input_y_data[i]; + } +} + +template +void RankOneSelect(const RuntimeShape& input_condition_shape, + const D* input_condition_data, + const RuntimeShape& input_x_shape, const T* input_x_data, + const RuntimeShape& input_y_shape, const T* input_y_data, + const RuntimeShape& output_shape, T* output_data) { + ruy::profiler::ScopeLabel label("Select/RankOneSelect"); + const int64_t outer_size = input_condition_shape.FlatSize(); + int64_t inner_size; + if (input_condition_shape.DimensionsCount() == 0) { + inner_size = MatchingFlatSize(input_x_shape, input_y_shape, output_shape); + } else { + TFLITE_DCHECK_EQ( + MatchingDim(input_x_shape, 0, input_y_shape, 0, output_shape, 0), + outer_size); + inner_size = + MatchingFlatSizeSkipDim(input_x_shape, 0, input_y_shape, output_shape); + } + + int64_t offset = 0; + for (int64_t i = 0; i < outer_size; i++) { + const T* input_data = input_condition_data[i] ? input_x_data : input_y_data; + memcpy(output_data + offset, input_data + offset, inner_size * sizeof(T)); + offset += inner_size; + } +} + +template +void BroadcastSelect5DSlow(const RuntimeShape& input_condition_shape, + const D* input_condition_data, + const RuntimeShape& input_x_shape, + const T* input_x_data, + const RuntimeShape& input_y_shape, + const T* input_y_data, + const RuntimeShape& output_shape, T* output_data) { + ruy::profiler::ScopeLabel label("Select/BroadcastSelectSlow"); + TFLITE_DCHECK_LE(input_condition_shape.DimensionsCount(), 5); + TFLITE_DCHECK_LE(input_x_shape.DimensionsCount(), 5); + TFLITE_DCHECK_LE(input_y_shape.DimensionsCount(), 5); + TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 5); + + NdArrayDesc<5> desc_condition; + NdArrayDesc<5> desc_x; + NdArrayDesc<5> desc_y; + NdArrayDesc<5> desc_output; + const RuntimeShape extended_output_shape = + RuntimeShape::ExtendedShape(5, output_shape); + CopyDimsToDesc(extended_output_shape, &desc_output); + NdArrayDescsForElementwiseBroadcast(input_condition_shape, input_x_shape, + input_y_shape, &desc_condition, &desc_x, + &desc_y); + + // In Tensorflow, the dimensions are canonically named (batch_number, row, + // col, channel), with extents (batches, height, width, depth), with the + // trailing dimension changing most rapidly (channels has the smallest + // stride, typically 1 element). + // + // In generated C code, we store arrays with the dimensions reversed. The + // first dimension has smallest stride. + // + // We name our variables by their Tensorflow convention, but generate C code + // nesting loops such that the innermost loop has the smallest stride for + // the best cache behavior. + for (int n = 0; n < desc_output.extents[0]; ++n) { + int out_idx_n = desc_output.extents[1] * n; + int cond_idx_n = desc_condition.strides[0] * n; + int in_idx1_n = desc_x.strides[0] * n; + int in_idx2_n = desc_y.strides[0] * n; + for (int b = 0; b < desc_output.extents[1]; ++b) { + int out_idx_b = (out_idx_n + b) * desc_output.extents[2]; + int cond_idx_b = cond_idx_n + desc_condition.strides[1] * b; + int in_idx1_b = in_idx1_n + desc_x.strides[1] * b; + int in_idx2_b = in_idx2_n + desc_y.strides[1] * b; + for (int y = 0; y < desc_output.extents[2]; ++y) { + int out_idx_y = (out_idx_b + y) * desc_output.extents[3]; + int cond_idx_y = cond_idx_b + desc_condition.strides[2] * y; + int in_idx1_y = in_idx1_b + desc_x.strides[2] * y; + int in_idx2_y = in_idx2_b + desc_y.strides[2] * y; + for (int x = 0; x < desc_output.extents[3]; ++x) { + int out_idx = (out_idx_y + x) * desc_output.extents[4]; + int cond_idx = cond_idx_y + desc_condition.strides[3] * x; + int in_idx1 = in_idx1_y + desc_x.strides[3] * x; + int in_idx2 = in_idx2_y + desc_y.strides[3] * x; + for (int c = 0; c < desc_output.extents[4]; ++c) { + output_data[out_idx] = input_condition_data[cond_idx] + ? input_x_data[in_idx1] + : input_y_data[in_idx2]; + out_idx++; + cond_idx += desc_condition.strides[4]; + in_idx1 += desc_x.strides[4]; + in_idx2 += desc_y.strides[4]; + } + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/slice.h b/src/tensorflow/lite/kernels/internal/reference/slice.h new file mode 100644 index 0000000..cb73ea0 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/slice.h @@ -0,0 +1,80 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_ + +#include "tensorflow/lite/kernels/internal/portable_tensor.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +namespace reference_ops { + +template +inline void Slice(const tflite::SliceParams& op_params, + const RuntimeShape& input_shape, + const RuntimeShape& output_shape, + SequentialTensorWriter* writer) { + const RuntimeShape ext_shape = RuntimeShape::ExtendedShape(5, input_shape); + TFLITE_DCHECK_LE(op_params.begin_count, 5); + TFLITE_DCHECK_LE(op_params.size_count, 5); + const int begin_count = op_params.begin_count; + const int size_count = op_params.size_count; + // We front-pad the begin and size vectors. + int start[5]; + int stop[5]; + for (int i = 0; i < 5; ++i) { + int padded_i = 5 - i; + start[i] = + begin_count < padded_i ? 0 : op_params.begin[begin_count - padded_i]; + stop[i] = + (size_count < padded_i || op_params.size[size_count - padded_i] == -1) + ? ext_shape.Dims(i) + : start[i] + op_params.size[size_count - padded_i]; + } + + for (int i0 = start[0]; i0 < stop[0]; ++i0) { + for (int i1 = start[1]; i1 < stop[1]; ++i1) { + for (int i2 = start[2]; i2 < stop[2]; ++i2) { + for (int i3 = start[3]; i3 < stop[3]; ++i3) { + for (int i4 = start[4]; i4 < stop[4]; ++i4) { + writer->Write(Offset(ext_shape, i0, i1, i2, i3, i4)); + } + } + } + } + } +} + +template +inline void Slice(const tflite::SliceParams& op_params, + const RuntimeShape& input_shape, const T* input_data, + const RuntimeShape& output_shape, T* output_data) { + SequentialTensorWriter writer(input_data, output_data); + return Slice(op_params, input_shape, output_shape, &writer); +} + +template +inline void Slice(const tflite::SliceParams& op_params, + const RuntimeShape& input_shape, const TfLiteTensor* input, + const RuntimeShape& output_shape, TfLiteTensor* output) { + SequentialTensorWriter writer(input, output); + return Slice(op_params, input_shape, output_shape, &writer); +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/softmax.h b/src/tensorflow/lite/kernels/internal/reference/softmax.h index 1b3f118..6eca694 100644 --- a/src/tensorflow/lite/kernels/internal/reference/softmax.h +++ b/src/tensorflow/lite/kernels/internal/reference/softmax.h @@ -15,9 +15,10 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_ +#include #include -#include "fixedpoint/fixedpoint.h" +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" @@ -114,6 +115,9 @@ inline void Softmax(const SoftmaxParams& params, FixedPoint0 shifted_scale = FixedPoint0::FromRaw(GetReciprocal( sum_of_exps.raw(), kAccumulationIntegerBits, &num_bits_over_unit)); + const int exponent = num_bits_over_unit + 31 - (sizeof(OutputT) * 8); + TFLITE_CHECK(0 <= exponent && exponent <= 31); + for (int c = 0; c < depth; ++c) { int32_t input_diff = static_cast(input_data[i * depth + c]) - max_in_row; @@ -126,8 +130,7 @@ inline void Softmax(const SoftmaxParams& params, FixedPoint0 exp_in_0 = exp_on_negative_values(scaled_diff_f8); int32_t unsat_output = gemmlowp::RoundingDivideByPOT( - (shifted_scale * exp_in_0).raw(), - num_bits_over_unit + 31 - (sizeof(OutputT) * 8)); + (shifted_scale * exp_in_0).raw(), exponent); const int32_t shifted_output = unsat_output + @@ -159,7 +162,7 @@ inline int16_t SoftMaxCalculateExp(const SoftmaxParams& params, std::min(std::max(sym_scaled_diff, static_cast(-32768)), static_cast(32767)); // apply the exp() LUT activation function - return generic_int16_table_lookup(sat_sym_scaled_diff, params.exp_lut); + return LUTLookup(sat_sym_scaled_diff, params.exp_lut); } // Quantized softmax with int16_t input and int16_t output. inline void SoftmaxInt16(const SoftmaxParams& params, @@ -207,8 +210,8 @@ inline void SoftmaxInt16(const SoftmaxParams& params, std::min(std::max(sym_shifted_sum, static_cast(-32768)), static_cast(32767))); // apply 1/(1 + x) LUT activation function - int16_t reciprocal_scale_Q015 = generic_int16_table_lookup( - sat_sym_shifted_sum, params.one_over_one_plus_x_lut); + int16_t reciprocal_scale_Q015 = + LUTLookup(sat_sym_shifted_sum, params.one_over_one_plus_x_lut); // Rescale the exp_result with reciprocal // range of output is [0, 32767] correspond to [0.0, 1.0] diff --git a/src/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h b/src/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h new file mode 100644 index 0000000..dcef04a --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h @@ -0,0 +1,109 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_ + +#include + +#include "third_party/ruy/ruy/profiler/instrumentation.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +// TODO(b/135760455): Move this method anonymous namespace in a cc file. +inline RuntimeShape ExtendShapeSpaceToBatch(const RuntimeShape& shape) { + if (shape.DimensionsCount() == 4) { + return shape; + } + RuntimeShape new_shape(4, 1); + new_shape.SetDim(0, shape.Dims(0)); + new_shape.SetDim(1, shape.Dims(1)); + new_shape.SetDim(3, shape.Dims(2)); + return new_shape; +} + +template +inline void SpaceToBatchND(const SpaceToBatchParams& params, + const RuntimeShape& unextended_input1_shape, + const T* input1_data, + const RuntimeShape& unextended_input2_shape, + const int32_t* block_shape_data, + const RuntimeShape& unextended_input3_shape, + const int32_t* paddings_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + ruy::profiler::ScopeLabel label("SpaceToBatchND"); + TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3); + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(), + unextended_output_shape.DimensionsCount()); + + // Extends the input/output shape from 3D to 4D if needed, NHC -> NH1C. + const RuntimeShape input1_shape = + ExtendShapeSpaceToBatch(unextended_input1_shape); + const RuntimeShape output_shape = + ExtendShapeSpaceToBatch(unextended_output_shape); + + const int depth = input1_shape.Dims(3); + const int input_width = input1_shape.Dims(2); + const int input_height = input1_shape.Dims(1); + const int input_batch_size = input1_shape.Dims(0); + + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_batch_size = output_shape.Dims(0); + + const int block_shape_height = block_shape_data[0]; + const int block_shape_width = + unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1; + const int padding_top = paddings_data[0]; + const int padding_left = + unextended_input1_shape.DimensionsCount() == 4 ? paddings_data[2] : 0; + + // For uint8 quantized, the correct padding "zero value" is the output offset. + const int32_t pad_value = params.output_offset; + for (int out_b = 0; out_b < output_batch_size; ++out_b) { + int input_batch = out_b % input_batch_size; + int shift_w = (out_b / input_batch_size) % block_shape_width; + int shift_h = (out_b / input_batch_size) / block_shape_width; + for (int out_h = 0; out_h < output_height; ++out_h) { + for (int out_w = 0; out_w < output_width; ++out_w) { + T* out = output_data + Offset(output_shape, out_b, out_h, out_w, 0); + if (out_h * block_shape_height + shift_h < padding_top || + out_h * block_shape_height + shift_h >= + padding_top + input_height || + out_w * block_shape_width + shift_w < padding_left || + out_w * block_shape_width + shift_w >= padding_left + input_width) { + // This may not execute correctly when pad_value != 0 and T != uint8. + memset(out, pad_value, depth * sizeof(T)); + } else { + const T* in = + input1_data + + Offset(input1_shape, input_batch, + (out_h * block_shape_height + shift_h) - padding_top, + (out_w * block_shape_width + shift_w) - padding_left, 0); + memcpy(out, in, depth * sizeof(T)); + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/space_to_depth.h b/src/tensorflow/lite/kernels/internal/reference/space_to_depth.h new file mode 100644 index 0000000..7ad4654 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/space_to_depth.h @@ -0,0 +1,80 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_ + +#include + +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +inline void SpaceToDepth(const tflite::SpaceToDepthParams& op_params, + const RuntimeShape& unextended_input_shape, + const T* input_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + const int input_depth = input_shape.Dims(3); + const int input_width = input_shape.Dims(2); + const int input_height = input_shape.Dims(1); + const int input_batch = input_shape.Dims(0); + + const int output_depth = output_shape.Dims(3); + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_batch = output_shape.Dims(0); + + const int32_t block_size = op_params.block_size; + + TFLITE_DCHECK_EQ(input_width, output_width * block_size); + TFLITE_DCHECK_EQ(input_height, output_height * block_size); + TFLITE_DCHECK_EQ(input_depth * block_size * block_size, output_depth); + TFLITE_DCHECK_EQ(input_batch, output_batch); + + for (int in_b = 0; in_b < input_batch; ++in_b) { + for (int in_h = 0; in_h < input_height; ++in_h) { + for (int in_w = 0; in_w < input_width; ++in_w) { + for (int in_d = 0; in_d < input_depth; ++in_d) { + const int out_d = + in_d + ((in_h % block_size) * block_size + in_w % block_size) * + input_depth; + const int out_w = in_w / block_size; + const int out_h = in_h / block_size; + const int out_b = in_b; + + const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d); + const int output_index = + Offset(output_shape, out_b, out_h, out_w, out_d); + + output_data[output_index] = input_data[input_index]; + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/strided_slice.h b/src/tensorflow/lite/kernels/internal/reference/strided_slice.h index 24aa798..afbb4c8 100644 --- a/src/tensorflow/lite/kernels/internal/reference/strided_slice.h +++ b/src/tensorflow/lite/kernels/internal/reference/strided_slice.h @@ -15,6 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ +#include "third_party/ruy/ruy/profiler/instrumentation.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/portable_tensor.h" @@ -30,9 +31,8 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params, const RuntimeShape& unextended_input_shape, const RuntimeShape& unextended_output_shape, SequentialTensorWriter* writer) { - using strided_slice::LoopCondition; - using strided_slice::StartForAxis; - using strided_slice::StopForAxis; + ruy::profiler::ScopeLabel label("StridedSlice"); + // Note that the output_shape is not used herein. tflite::StridedSliceParams params_copy = op_params; @@ -47,41 +47,71 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params, // requires (ie. all shapes must be 5D and are given backwards). strided_slice::StridedSlicePadIndices(¶ms_copy, 5); - const int start_0 = StartForAxis(params_copy, input_shape, 0); - const int stop_0 = StopForAxis(params_copy, input_shape, 0, start_0); - const int start_1 = StartForAxis(params_copy, input_shape, 1); - const int stop_1 = StopForAxis(params_copy, input_shape, 1, start_1); - const int start_2 = StartForAxis(params_copy, input_shape, 2); - const int stop_2 = StopForAxis(params_copy, input_shape, 2, start_2); - const int start_3 = StartForAxis(params_copy, input_shape, 3); - const int stop_3 = StopForAxis(params_copy, input_shape, 3, start_3); - const int start_4 = StartForAxis(params_copy, input_shape, 4); - const int stop_4 = StopForAxis(params_copy, input_shape, 4, start_4); - - for (int offset_0 = start_0 * input_shape.Dims(1), - end_0 = stop_0 * input_shape.Dims(1), - step_0 = params_copy.strides[0] * input_shape.Dims(1); - !LoopCondition(offset_0, end_0, params_copy.strides[0]); - offset_0 += step_0) { - for (int offset_1 = (offset_0 + start_1) * input_shape.Dims(2), - end_1 = (offset_0 + stop_1) * input_shape.Dims(2), - step_1 = params_copy.strides[1] * input_shape.Dims(2); - !LoopCondition(offset_1, end_1, params_copy.strides[1]); - offset_1 += step_1) { - for (int offset_2 = (offset_1 + start_2) * input_shape.Dims(3), - end_2 = (offset_1 + stop_2) * input_shape.Dims(3), - step_2 = params_copy.strides[2] * input_shape.Dims(3); - !LoopCondition(offset_2, end_2, params_copy.strides[2]); - offset_2 += step_2) { - for (int offset_3 = (offset_2 + start_3) * input_shape.Dims(4), - end_3 = (offset_2 + stop_3) * input_shape.Dims(4), - step_3 = params_copy.strides[3] * input_shape.Dims(4); - !LoopCondition(offset_3, end_3, params_copy.strides[3]); - offset_3 += step_3) { - for (int offset_4 = offset_3 + start_4, end_4 = offset_3 + stop_4; - !LoopCondition(offset_4, end_4, params_copy.strides[4]); - offset_4 += params_copy.strides[4]) { - writer->Write(offset_4); + const int start_0 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 0); + const int stop_0 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 0, start_0); + const int start_1 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 1); + const int stop_1 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 1, start_1); + const int start_2 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 2); + const int stop_2 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 2, start_2); + const int start_3 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 3); + const int stop_3 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 3, start_3); + const int start_4 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 4); + const int stop_4 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 4, start_4); + + auto lc = [&](int end, int stride, int index) { + if (stride < 0) { + return index > end; + } else { + return index < end; + } + }; + // With a static_cast it is not possible to initialize + // a variable of type 'const int *' + // with an rvalue of type 'const int32_t *' (aka 'const long *'). + // reinterpret_cast is required to handle this casting. + const int* shape = reinterpret_cast(input_shape.DimsData()); + const int* stride = reinterpret_cast(params_copy.strides); + const bool inner_stride_is_1 = params_copy.strides[4] == 1; + + for (int offset_0 = start_0; lc(stop_0, stride[0], offset_0); + offset_0 += stride[0]) { + for (int offset_1 = start_1; lc(stop_1, stride[1], offset_1); + offset_1 += stride[1]) { + for (int offset_2 = start_2; lc(stop_2, stride[2], offset_2); + offset_2 += stride[2]) { + for (int offset_3 = start_3; lc(stop_3, stride[3], offset_3); + offset_3 += stride[3]) { + // When the stride is 1, the inner loop is equivalent to the + // optimized slice inner loop. Otherwise, it is identical to the + // strided_slice reference implementation inner loop. + if (inner_stride_is_1) { + const int len = stop_4 - start_4; + int index = start_4 + offset_3 * shape[4] + + offset_2 * shape[3] * shape[4] + + offset_1 * shape[2] * shape[3] * shape[4] + + offset_0 * shape[1] * shape[2] * shape[3] * shape[4]; + if (len > 0) { + writer->WriteN(index, len); + } + } else { + for (int offset_4 = start_4; lc(stop_4, stride[4], offset_4); + offset_4 += stride[4]) { + int index = offset_4 + offset_3 * shape[4] + + offset_2 * shape[3] * shape[4] + + offset_1 * shape[2] * shape[3] * shape[4] + + offset_0 * shape[1] * shape[2] * shape[3] * shape[4]; + writer->Write(index); + } } } } diff --git a/src/tensorflow/lite/kernels/internal/reference/sub.h b/src/tensorflow/lite/kernels/internal/reference/sub.h index 5f4fd19..32482e9 100644 --- a/src/tensorflow/lite/kernels/internal/reference/sub.h +++ b/src/tensorflow/lite/kernels/internal/reference/sub.h @@ -18,9 +18,10 @@ limitations under the License. #include #include +#include #include -#include "ruy/profiler/instrumentation.h" // from @ruy +#include "third_party/ruy/ruy/profiler/instrumentation.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/types.h" @@ -29,157 +30,179 @@ namespace tflite { namespace reference_ops { -inline void SubNonBroadcast(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, - float* output_data) { - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] - input2_data[i], params.float_activation_min, - params.float_activation_max); +template +struct SubImpl { + template + static void BroadcastInput1(const ArithmeticParams& params, + const T* input1_data, const T* input2_data, + T* output_data, size_t size, F binary_func) { + for (size_t c = 0; c < size; ++c) { + output_data[c] = binary_func(input1_data[0], input2_data[c], params); + } } -} -inline void SubNonBroadcast(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int32_t* input1_data, - const RuntimeShape& input2_shape, - const int32_t* input2_data, - const RuntimeShape& output_shape, - int32_t* output_data) { - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] - input2_data[i], params.quantized_activation_min, - params.quantized_activation_max); + template + static void BroadcastInput2(const ArithmeticParams& params, + const T* input1_data, const T* input2_data, + T* output_data, size_t size, F binary_func) { + for (size_t c = 0; c < size; ++c) { + output_data[c] = binary_func(input1_data[c], input2_data[0], params); + } } -} - -// TODO(b/151345304): We can implement BroadcastSub on buffers of arbitrary -// dimensionality if the runtime code does a single loop over one dimension -// that handles broadcasting as the base case. The code generator would then -// generate max(D1, D2) nested for loops. -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, - float* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/float"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, indexes)] - - input2_data[SubscriptToIndex(desc2, indexes)], - params.float_activation_min, params.float_activation_max); - }; - NDOpsHelper(output_desc, sub_func); -} + template + static void ElementWise(const ArithmeticParams& params, const T* input1_data, + const T* input2_data, T* output_data, size_t size, + F binary_func) { + for (size_t c = 0; c < size; ++c) { + output_data[c] = binary_func(input1_data[c], input2_data[c], params); + } + } +}; + +template <> +struct SubImpl { + template + static void BroadcastInput1(const ArithmeticParams& params, + const int32_t* input1_data, + const int32_t* input2_data, int32_t* output_data, + size_t size, F binary_func) { + size_t c = 0; + int32_t activation_min, activation_max; + GetActivationParams(params, &activation_min, &activation_max); +#ifdef USE_NEON + const int32x4_t vmax = vdupq_n_s32(activation_max); + const int32x4_t vmin = vdupq_n_s32(activation_min); + const int32x4_t va = vdupq_n_s32(input1_data[0]); + for (; c + 4 <= size; c += 4) { + const int32x4_t vb = vld1q_s32(&input2_data[c]); + int32x4_t vres = vsubq_s32(va, vb); + vres = vmaxq_s32(vmin, vres); + vres = vminq_s32(vmax, vres); + vst1q_s32(&output_data[c], vres); + } +#endif + for (; c < size; ++c) { + output_data[c] = binary_func(input1_data[0], input2_data[c], params); + } + } -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const uint8_t* input1_data, - const RuntimeShape& input2_shape, - const uint8_t* input2_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/uint8_t"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); + template + static void BroadcastInput2(const ArithmeticParams& params, + const int32_t* input1_data, + const int32_t* input2_data, int32_t* output_data, + size_t size, F binary_func) { + size_t c = 0; + int32_t activation_min, activation_max; + GetActivationParams(params, &activation_min, &activation_max); +#ifdef USE_NEON + const int32x4_t vmax = vdupq_n_s32(activation_max); + const int32x4_t vmin = vdupq_n_s32(activation_min); + const int32x4_t vb = vdupq_n_s32(input2_data[0]); + for (; c + 4 <= size; c += 4) { + const int32x4_t va = vld1q_s32(&input1_data[c]); + int32x4_t vres = vsubq_s32(va, vb); + vres = vmaxq_s32(vmin, vres); + vres = vminq_s32(vmax, vres); + vst1q_s32(&output_data[c], vres); + } +#endif + for (; c < size; ++c) { + output_data[c] = binary_func(input1_data[c], input2_data[0], params); + } + } - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - const int32_t input1_val = - params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; - const int32_t input2_val = - params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[SubscriptToIndex(output_desc, indexes)] = - static_cast(clamped_output); - }; - NDOpsHelper(output_desc, sub_func); + template + static void ElementWise(const ArithmeticParams& params, + const int32_t* input1_data, + const int32_t* input2_data, int32_t* output_data, + size_t size, F binary_func) { + size_t c = 0; + int32_t activation_min, activation_max; + GetActivationParams(params, &activation_min, &activation_max); +#ifdef USE_NEON + int32x4_t vmax = vdupq_n_s32(activation_max); + int32x4_t vmin = vdupq_n_s32(activation_min); + for (; c + 4 <= size; c += 4) { + const int32x4_t va = vld1q_s32(&input1_data[c]); + const int32x4_t vb = vld1q_s32(&input2_data[c]); + int32x4_t vres = vsubq_s32(va, vb); + vres = vmaxq_s32(vmin, vres); + vres = vminq_s32(vmax, vres); + vst1q_s32(&output_data[c], vres); + } +#endif + for (; c < size; ++c) { + output_data[c] = binary_func(input1_data[c], input2_data[c], params); + } + } +}; + +template +inline void BroadcastSubRecursiveDimensions( + int dimension, const ArithmeticParams& params, const T* input1_data, + const T* input2_data, T* output_data, size_t* input1_offset_p, + size_t* input2_offset_p, size_t* output_offset, + size_t* compressed_input1_stride, size_t* compressed_input2_stride, + size_t* compressed_output_shape, F binary_func) { + if (dimension > 0) { + for (size_t c = 0; c < compressed_output_shape[dimension]; ++c) { + size_t input1_offset_c = *input1_offset_p; + size_t input2_offset_c = *input2_offset_p; + BroadcastSubRecursiveDimensions( + dimension - 1, params, input1_data, input2_data, output_data, + &input1_offset_c, &input2_offset_c, output_offset, + compressed_input1_stride, compressed_input2_stride, + compressed_output_shape, binary_func); + *input1_offset_p += compressed_input1_stride[dimension]; + *input2_offset_p += compressed_input2_stride[dimension]; + } + } else { + TFLITE_DCHECK(dimension == 0); + bool input1_is_broadcast = compressed_input1_stride[dimension] == 0; + bool input2_is_broadcast = compressed_input2_stride[dimension] == 0; + TFLITE_DCHECK(!(input1_is_broadcast && input2_is_broadcast)); + const T* input1_data_ptr = input1_data + *input1_offset_p; + const T* input2_data_ptr = input2_data + *input2_offset_p; + T* output_data_ptr = output_data + *output_offset; + if (input1_is_broadcast) { + // input1 is broadcast. + SubImpl::BroadcastInput1( + params, input1_data_ptr, input2_data_ptr, output_data_ptr, + compressed_output_shape[dimension], binary_func); + *input2_offset_p += compressed_output_shape[dimension]; + } else if (input2_is_broadcast) { + // input2 is broadcast. + SubImpl::BroadcastInput2( + params, input1_data_ptr, input2_data_ptr, output_data_ptr, + compressed_output_shape[dimension], binary_func); + *input1_offset_p += compressed_output_shape[dimension]; + } else { + // Add element-wise. + SubImpl::ElementWise(params, input1_data_ptr, input2_data_ptr, + output_data_ptr, + compressed_output_shape[dimension], binary_func); + *input1_offset_p += compressed_output_shape[dimension]; + *input2_offset_p += compressed_output_shape[dimension]; + } + *output_offset += compressed_output_shape[dimension]; + } } -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int32_t* input1_data, - const RuntimeShape& input2_shape, - const int32_t* input2_data, - const RuntimeShape& output_shape, - int32_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/int32_t"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); +// TODO: b/296510380 - we may be able to factor out this to common.h for all +// binary arithmetic ops (add, sub, mul). +template +inline void BroadcastSubCommon(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const T* input1_data, + const RuntimeShape& input2_shape, + const T* input2_data, + const RuntimeShape& output_shape, T* output_data, + F binary_func) { + constexpr int kMaxBroadcastDim = 6; + TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), kMaxBroadcastDim); + TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), kMaxBroadcastDim); + TFLITE_DCHECK_LE(output_shape.DimensionsCount(), kMaxBroadcastDim); // In Tensorflow, the dimensions are canonically named (batch_number, row, // col, channel), with extents (batches, height, width, depth), with the @@ -192,31 +215,6 @@ inline void BroadcastSubSlow(const ArithmeticParams& params, // We name our variables by their Tensorflow convention, but generate C code // nesting loops such that the innermost loop has the smallest stride for the // best cache behavior. - auto sub_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, indexes)] - - input2_data[SubscriptToIndex(desc2, indexes)], - params.quantized_activation_min, params.quantized_activation_max); - }; - NDOpsHelper(output_desc, sub_func); -} - -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int8_t* input1_data, - const RuntimeShape& input2_shape, - const int8_t* input2_data, - const RuntimeShape& output_shape, - int8_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/int8_t"); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); // In Tensorflow, the dimensions are canonically named (batch_number, row, // col, channel), with extents (batches, height, width, depth), with the @@ -229,153 +227,115 @@ inline void BroadcastSubSlow(const ArithmeticParams& params, // We name our variables by their Tensorflow convention, but generate C code // nesting loops such that the innermost loop has the smallest stride for the // best cache behavior. - auto sub_func = [&](int indexes[N]) { - const int32_t input1_val = - params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; - const int32_t input2_val = - params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[SubscriptToIndex(output_desc, indexes)] = - static_cast(clamped_output); - }; - NDOpsHelper(output_desc, sub_func); -} -template -void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int64_t* input1_data, - const RuntimeShape& input2_shape, - const int64_t* input2_data, - const RuntimeShape& output_shape, int64_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/int64_t"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); + size_t compressed_input1_stride[kMaxBroadcastDim]; + size_t compressed_input2_stride[kMaxBroadcastDim]; + size_t compressed_output_shape[kMaxBroadcastDim]; + bool broadcastable_shape = ReduceDimensionsForBroadcast( + input1_shape, input2_shape, compressed_input1_stride, + compressed_input2_stride, compressed_output_shape); + // Skip broadcasting for degenerate shapes. + if (!broadcastable_shape) { + return; + } - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, indexes)] - - input2_data[SubscriptToIndex(desc2, indexes)], - params.int64_activation_min, params.int64_activation_max); - }; - NDOpsHelper(output_desc, sub_func); + size_t input1_offset = 0; + size_t input2_offset = 0; + size_t output_offset = 0; + BroadcastSubRecursiveDimensions( + kMaxBroadcastDim - 1, params, input1_data, input2_data, output_data, + &input1_offset, &input2_offset, &output_offset, compressed_input1_stride, + compressed_input2_stride, compressed_output_shape, binary_func); } -template +// TODO(b/151345304): We can implement BroadcastSub on buffers of arbitrary +// dimensionality if the runtime code does a single loop over one dimension +// that handles broadcasting as the base case. The code generator would then +// generate max(D1, D2) nested for loops. +template void BroadcastSubSlow(const ArithmeticParams& params, const RuntimeShape& input1_shape, const T* input1_data, const RuntimeShape& input2_shape, const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/templated"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, indexes)] - - input2_data[SubscriptToIndex(desc2, indexes)], - params.quantized_activation_min, params.quantized_activation_max); - }; - NDOpsHelper(output_desc, sub_func); + ruy::profiler::ScopeLabel label("BroadcastSubSlow/T"); + BroadcastSubCommon( + params, input1_shape, input1_data, input2_shape, input2_data, + output_shape, output_data, + [](T input1_val, T input2_val, const ArithmeticParams& params) { + T activation_min, activation_max; + GetActivationParams(params, &activation_min, &activation_max); + return ActivationFunctionWithMinMax(input1_val - input2_val, + activation_min, activation_max); + }); } -// Element-wise Sub that can often be used for inner loop of broadcast sub as -// well as the non-broadcast sub. -inline void SubElementwise(int size, const ArithmeticParams& params, - const uint8_t* input1_data, - const uint8_t* input2_data, uint8_t* output_data) { - TFLITE_DCHECK_GT(params.input1_offset, -256); - TFLITE_DCHECK_GT(params.input2_offset, -256); - TFLITE_DCHECK_LT(params.input1_offset, 256); - TFLITE_DCHECK_LT(params.input2_offset, 256); +inline void BroadcastSub16POTSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const int16_t* input1_data, + const RuntimeShape& input2_shape, + const int16_t* input2_data, + const RuntimeShape& output_shape, + int16_t* output_data) { + ruy::profiler::ScopeLabel label("BroadcastSub16POTSlow/int16_t"); + BroadcastSubCommon( + params, input1_shape, input1_data, input2_shape, input2_data, + output_shape, output_data, + [](int16_t input1_val, int16_t input2_val, + const ArithmeticParams& params) { + const int32_t scaled_input1_val = + gemmlowp::RoundingDivideByPOT(input1_val, -params.input1_shift); + const int32_t scaled_input2_val = + gemmlowp::RoundingDivideByPOT(input2_val, -params.input2_shift); + const int32_t raw_output = scaled_input1_val - scaled_input2_val; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + return static_cast(clamped_output); + }); +} - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); - } +template +void BroadcastQuantSubSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const T* input1_data, + const RuntimeShape& input2_shape, + const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { + ruy::profiler::ScopeLabel label("BroadcastQuantSubSlow/T"); + BroadcastSubCommon( + params, input1_shape, input1_data, input2_shape, input2_data, + output_shape, output_data, + [](T input1_val, T input2_val, const ArithmeticParams& params) { + const int32_t shifted_input1_val = + (params.input1_offset + input1_val) * (1 << params.left_shift); + const int32_t shifted_input2_val = + (params.input2_offset + input2_val) * (1 << params.left_shift); + const int32_t scaled_input1_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input1_val, params.input1_multiplier, + params.input1_shift); + const int32_t scaled_input2_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input2_val, params.input2_multiplier, + params.input2_shift); + const int32_t raw_sub = scaled_input1_val - scaled_input2_val; + const int32_t raw_output = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + raw_sub, params.output_multiplier, params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + return static_cast(clamped_output); + }); } // Element-wise add that can often be used for inner loop of broadcast add as // well as the non-broadcast add. +template inline void SubElementwise(int size, const ArithmeticParams& params, - const int8_t* input1_data, const int8_t* input2_data, - int8_t* output_data) { - const int32_t int8_max_value = std::numeric_limits::max(); - TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value); - TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value); - TFLITE_DCHECK_LE(params.input1_offset, int8_max_value); - TFLITE_DCHECK_LE(params.input2_offset, int8_max_value); - + const T* input1_data, const T* input2_data, + T* output_data) { for (int i = 0; i < size; ++i) { const int32_t input1_val = params.input1_offset + input1_data[i]; const int32_t input2_val = params.input2_offset + input2_data[i]; @@ -395,7 +355,7 @@ inline void SubElementwise(int size, const ArithmeticParams& params, const int32_t clamped_output = std::min(params.quantized_activation_max, std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); + output_data[i] = static_cast(clamped_output); } } @@ -425,11 +385,27 @@ inline void Sub(const ArithmeticParams& params, const int flat_size = MatchingElementsSize(input1_shape, input2_shape, output_shape); - const int32_t int8_max_value = std::numeric_limits::max(); - TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value); - TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value); - TFLITE_DCHECK_LE(params.input1_offset, int8_max_value); - TFLITE_DCHECK_LE(params.input2_offset, int8_max_value); + TFLITE_DCHECK_GE(params.input1_offset, -128); + TFLITE_DCHECK_GE(params.input2_offset, -128); + // offset = -quantization_params.zero_point in PrepareGeneralSubOp(). + // So it's maximum can be 128 not 127. + TFLITE_DCHECK_LE(params.input1_offset, 128); + TFLITE_DCHECK_LE(params.input2_offset, 128); + SubElementwise(flat_size, params, input1_data, input2_data, output_data); +} + +inline void Sub(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const int16_t* input1_data, + const RuntimeShape& input2_shape, const int16_t* input2_data, + const RuntimeShape& output_shape, int16_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + + TFLITE_DCHECK_EQ(params.input1_offset, 0); + TFLITE_DCHECK_EQ(params.input2_offset, 0); SubElementwise(flat_size, params, input1_data, input2_data, output_data); } @@ -438,35 +414,12 @@ void Sub(const ArithmeticParams& params, const RuntimeShape& input1_shape, const T* input1_data, const RuntimeShape& input2_shape, const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = - input1_data[SubscriptToIndex(desc1, b, y, x, c)] - - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - } - } - } - } + BroadcastSubCommon( + params, input1_shape, input1_data, input2_shape, input2_data, + output_shape, output_data, + [](T input1_val, T input2_val, const ArithmeticParams& params) { + return input1_val - input2_val; + }); } inline void SetActivationMinMax(const ArithmeticParams& params, diff --git a/src/tensorflow/lite/kernels/internal/reference/tanh.h b/src/tensorflow/lite/kernels/internal/reference/tanh.h index 3a05c47..9d99691 100644 --- a/src/tensorflow/lite/kernels/internal/reference/tanh.h +++ b/src/tensorflow/lite/kernels/internal/reference/tanh.h @@ -17,7 +17,7 @@ limitations under the License. #include -#include "fixedpoint/fixedpoint.h" +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/types.h" diff --git a/src/tensorflow/lite/kernels/internal/reference/transpose.h b/src/tensorflow/lite/kernels/internal/reference/transpose.h new file mode 100644 index 0000000..7e2bf7b --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/transpose.h @@ -0,0 +1,203 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_ + +#include + +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +namespace reference_ops { + +namespace transpose_internal { + +// Recursively explores all the dimensions of the output tensor and writes the +// corresponding input tensor data. +// +// - depth: the current depth of the recursion. +// - dims: tensor dimension count, also `perm` size. +// - perm: permutation array. +// - input_data: Running input data pointer. If depth == num_dims-1, this points +// to the first element of the last dimension to traverse. +// - input_stride: Reverse partial product of input shapes. +// - output_data: Running output data pointer. If depth == num_dims-1, this +// points to the first element of the last dimension to traverse. +// - output_stride: Reverse partial product of output shapes. +// - output_shape: Shape of the output tensor. +// +// ## Algorithm explanation +// +// Assume a 3D tensor T with a shape of [I, J, K] stored in row major order. +// T[i, j, k] is at position `i*J*K + j*K + k` in the tensor buffer. +// +// If we want to go through the whole tensor iteratively, we can use loops. +// +// ``` +// for(i = 0; i < I; ++i) { +// for(j = 0; j < J; ++j) { +// for(k = 0; k < K; ++k) { +// T.data[i*J*K + j*K + k] = ... +// } +// } +// } +// ``` +// +// We can also compute the offset as we go through the loops. +// +// ``` +// stride_i = K * J; +// stride_j = K; +// stride_k = 1; +// for(i = 0; i < I; ++i) { +// offset_i = i * stride_i; +// offset_j = 0; +// for(j = 0; j < J; ++j) { +// offset_j += stride_j; +// offset_k = 0; +// for(k = 0; k < K; ++k) { +// offset_k += stride_k; +// T.data[offset_i + offset_j + offset_k] = ... +// } +// } +// } +// ``` +// +// This nicely extends to a recursive version which is the base of this +// algorithm and supports any number of dimensions. +// +// ``` +// shape = [I, J, K] +// strides = [K*J, K, 1] +// void recurse(T* data, shape, strides, depth = 0) { +// if(depth == shape.size) { +// *data = ... +// } else { +// for(a = 0; a < shape[depth]; ++a) { +// recurse(data, shape, strides, depth+1); +// data += strides[depth]; +// } +// } +// } +// ``` +template +void TransposeImpl(const int depth, const int dims, const int32_t* perm, + const T* input_data, const int* input_stride, T* output_data, + const int* output_stride, const int32_t* output_shape) { + const int dimension_size = output_shape[depth]; + if (depth == dims - 1) { + const int loop_stride = input_stride[perm[depth]]; + for (int i = 0; i < dimension_size; ++i) { + output_data[i] = *input_data; + input_data += loop_stride; + } + } else { + for (int i = 0; i < dimension_size; ++i) { + TransposeImpl(depth + 1, dims, perm, input_data, input_stride, + output_data, output_stride, output_shape); + + input_data += input_stride[perm[depth]]; + output_data += output_stride[depth]; + } + } +} + +// Compile-time switch to get the storage type of the transposition. +template +struct TransposeStorageType; + +template <> +struct TransposeStorageType<1> { + using type = int8_t; +}; + +template <> +struct TransposeStorageType<2> { + using type = int16_t; +}; + +template <> +struct TransposeStorageType<4> { + using type = int32_t; +}; + +template <> +struct TransposeStorageType<8> { + using type = int64_t; +}; + +// Sets up the stride arrays for the recursive transpose algorithm. +// +// Implementation notes: +// +// This is a reverse partial product. We could use standard algorithms to +// implement this but the result is not a readable and is tricky to get right +// because the first element must be set to 1, which leads to offset +// shenanigans: +// +// ``` +// stride[dims - 1] = 1; +// std::partial_sum(std::make_reverse_iterator(shape + dims), +// std::make_reverse_iterator(shape + 1), +// stride.rend() - input_rank + 1, std::multiplies()); +// ``` +// +// Note that Abseil isn't used in kernels implementation. That would make the +// above solution more readable. +inline void SetupTransposeStrides( + std::array& stride, const int32_t* shape, + const int dims) { + stride[dims - 1] = 1; + for (int i = dims - 2; i >= 0; --i) { + stride[i] = stride[i + 1] * shape[i + 1]; + } +} + +} // namespace transpose_internal + +// Copies a tensor to an other buffer and permutes its dimensions. +// +// Note: template parameter N is not used anymore. It is kept for API +// compatibility with TFLite micro. +template +void Transpose(const TransposeParams& params, const RuntimeShape& input_shape, + const T* input_data, const RuntimeShape& output_shape, + T* output_data) { + using transpose_internal::SetupTransposeStrides; + using transpose_internal::TransposeImpl; + using transpose_internal::TransposeStorageType; + // Transpose kernel only does rearranging values not numeric evaluations on + // each cell. It's safe to implement per size of scalar type and this trick + // keeps the total code size in a reasonable range. + using StorageType = typename TransposeStorageType::type; + const StorageType* const input_data_storage = + reinterpret_cast(input_data); + StorageType* const output_data_storage = + reinterpret_cast(output_data); + + const int dims = input_shape.DimensionsCount(); + std::array input_stride, output_stride; + SetupTransposeStrides(input_stride, input_shape.DimsData(), dims); + SetupTransposeStrides(output_stride, output_shape.DimsData(), dims); + TransposeImpl(0, dims, ¶ms.perm[0], input_data_storage, + input_stride.data(), output_data_storage, output_stride.data(), + output_shape.DimsData()); +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_ diff --git a/src/tensorflow/lite/kernels/internal/reference/transpose_conv.h b/src/tensorflow/lite/kernels/internal/reference/transpose_conv.h new file mode 100644 index 0000000..744ed0f --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/reference/transpose_conv.h @@ -0,0 +1,322 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ + +#include + +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +namespace reference_ops { + +inline void TransposeConv( + const ConvParams& params, const RuntimeShape& input_shape, + const float* input_data, const RuntimeShape& filter_shape, + const float* filter_data, const RuntimeShape& bias_shape, + const float* bias_data, const RuntimeShape& output_shape, + float* output_data, const RuntimeShape& im2col_shape, float* im2col_data) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const float output_activation_min = params.float_activation_min; + const float output_activation_max = params.float_activation_max; + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + // Although transpose convolution simplifies to convolution with transposed + // weights for strides of 1, non-unitary striding complicates matters. To + // keep this reference implementation as clear as possible, we use a + // "scatter" access pattern, where we loop through all the input elements, + // computing their influence on the output, rather than looping through the + // output elements in the typical "gather" access pattern of a conv. We + // therefore must initialize the output array to zero. + const int num_elements = output_shape.FlatSize(); + for (int i = 0; i < num_elements; i++) { + output_data[i] = 0.0f; + } + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + float input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + float filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + input_value * filter_value; + } + } + } + } + } + } + } + } + + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + float acc = output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) acc += bias_data[out_channel]; + + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + ActivationFunctionWithMinMax(acc, output_activation_min, + output_activation_max); + } + } + } + } +} + +inline void TransposeConv( + const ConvParams& params, const RuntimeShape& input_shape, + const uint8_t* input_data, const RuntimeShape& filter_shape, + const uint8_t* filter_data, const RuntimeShape& bias_shape, + const int32_t* bias_data, const RuntimeShape& output_shape, + uint8_t* output_data, const RuntimeShape& im2col_shape, + uint8_t* im2col_data, int32_t* scratch_buffer) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int32_t input_offset = params.input_offset; + const int32_t filter_offset = params.weights_offset; + const int32_t output_offset = params.output_offset; + const int32_t output_multiplier = params.output_multiplier; + const int output_shift = params.output_shift; + const int32_t output_activation_min = params.quantized_activation_min; + const int32_t output_activation_max = params.quantized_activation_max; + TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + const int num_elements = output_shape.FlatSize(); + // We need to initialize scratch_buffer to all 0s, as we apply the same + // 'scatter' based trick as in float version. + memset(scratch_buffer, 0, num_elements * sizeof(int32_t)); + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence. + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location. + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds. + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + uint8_t input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + uint8_t filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + (input_value + input_offset) * + (filter_value + filter_offset); + } + } + } + } + } + } + } + } + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + int32_t acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) { + acc += bias_data[out_channel]; + } + int32_t scaled_acc = MultiplyByQuantizedMultiplier( + acc, output_multiplier, output_shift); + scaled_acc += output_offset; + scaled_acc = std::max(scaled_acc, output_activation_min); + scaled_acc = std::min(scaled_acc, output_activation_max); + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + static_cast(scaled_acc); + } + } + } + } +} + +inline void HybridTransposeConv( + const ConvParams& params, float* scaling_factors_ptr, + const RuntimeShape& input_shape, const int8_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const float* bias_data, + const RuntimeShape& output_shape, float* output_data, + const float* per_channel_scale, int32_t* input_offset) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const float output_activation_min = params.float_activation_min; + const float output_activation_max = params.float_activation_max; + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + // Although transpose convolution simplifies to convolution with transposed + // weights for strides of 1, non-unitary striding complicates matters. To + // keep this reference implementation as clear as possible, we use a + // "scatter" access pattern, where we loop through all the input elements, + // computing their influence on the output, rather than looping through the + // output elements in the typical "gather" access pattern of a conv. We + // therefore must initialize the output array to zero. + const int num_elements = output_shape.FlatSize(); + for (int i = 0; i < num_elements; i++) { + output_data[i] = 0.0f; + } + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + const float scaling_factor = scaling_factors_ptr[batch]; + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + int32_t input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + int32_t filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + int32_t acc = + (input_value - input_offset[batch]) * filter_value; + output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + acc * per_channel_scale[out_channel] * scaling_factor; + } + } + } + } + } + } + } + } + + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + float acc = output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) acc += bias_data[out_channel]; + + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + ActivationFunctionWithMinMax(acc, output_activation_min, + output_activation_max); + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ diff --git a/src/tensorflow/lite/kernels/internal/runtime_shape.cpp b/src/tensorflow/lite/kernels/internal/runtime_shape.cpp new file mode 100644 index 0000000..dd12278 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/runtime_shape.cpp @@ -0,0 +1,23 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/runtime_shape.h" + +namespace tflite { + +// Defining a constexpr static class member is necessary in C++11 +constexpr int tflite::RuntimeShape::kMaxSmallSize; + +} // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/runtime_shape.h b/src/tensorflow/lite/kernels/internal/runtime_shape.h new file mode 100644 index 0000000..bc786bd --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/runtime_shape.h @@ -0,0 +1,168 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_ + +#include + +#include "tensorflow/lite/kernels/internal/compatibility.h" + +namespace tflite { + +template +struct Dims { + int sizes[N]; + int strides[N]; +}; + +class RuntimeShape { + public: + RuntimeShape& operator=(RuntimeShape const&) = delete; + + // RuntimeShape in TFLM supports up to 6 dimensions. + // The name kMaxSmallSize comes from the same file of the upstream + // tensorflow lite repo and need to be kept the same for max reuse. + static constexpr int kMaxSmallSize = 6; + + RuntimeShape() : size_(0) {} + + explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) { + TFLITE_DCHECK_LE(dimensions_count, kMaxSmallSize); + } + + RuntimeShape(int shape_size, int32_t value) : size_(shape_size) { + TFLITE_DCHECK_LE(shape_size, kMaxSmallSize); + for (int i = 0; i < shape_size; ++i) { + SetDim(i, value); + } + } + + RuntimeShape(int dimensions_count, const int32_t* dims_data) + : size_(dimensions_count) { + // check of dimensions_count handled by ReplaceWith() + ReplaceWith(dimensions_count, dims_data); + } + + bool operator==(const RuntimeShape& comp) const { + return this->size_ == comp.size_ && + std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) == + 0; + } + + ~RuntimeShape() {} + + int32_t DimensionsCount() const { return size_; } + int32_t Dims(int i) const { + TFLITE_DCHECK_GE(i, 0); + TFLITE_DCHECK_LT(i, size_); + return dims_[i]; + } + void SetDim(int i, int32_t val) { + TFLITE_DCHECK_GE(i, 0); + TFLITE_DCHECK_LT(i, size_); + dims_[i] = val; + } + + static RuntimeShape ExtendedShape(int new_shape_size, + const RuntimeShape& shape) { + TFLITE_DCHECK_LE(new_shape_size, kMaxSmallSize); + return RuntimeShape(new_shape_size, shape, 1); + } + int32_t* DimsData() { return dims_; } + const int32_t* DimsData() const { return dims_; } + const int32_t* DimsDataUpTo5D() const { return dims_; } + + void ReplaceWith(int dimensions_count, const int32_t* dims_data) { + TFLITE_DCHECK_LE(dimensions_count, kMaxSmallSize); + size_ = dimensions_count; + int32_t* dst_dims = DimsData(); + std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t)); + } + + // Returns the total count of elements, that is the size when flattened into a + // vector. + int FlatSize() const { + int buffer_size = 1; + const int* dims_data = reinterpret_cast(DimsData()); + for (int i = 0; i < size_; i++) { + buffer_size *= dims_data[i]; + } + return buffer_size; + } + + private: + // For use only by ExtendedShape(), written to guarantee (return-value) copy + // elision in C++17. + // This creates a shape padded to the desired size with the specified value. + RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value) + : size_(new_shape_size) { + // If the following check fails, it is likely because a 4D-only kernel is + // being used with an array of larger dimension count. + TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount()); + const int size_increase = new_shape_size - shape.DimensionsCount(); + for (int i = 0; i < size_increase; ++i) { + SetDim(i, pad_value); + } + std::memcpy(DimsData() + size_increase, shape.DimsData(), + sizeof(int32_t) * shape.DimensionsCount()); + } + + int32_t size_; + union { + int32_t dims_[kMaxSmallSize]; + }; +}; + +// Since tensors with '0' in their shape are valid in TF, these offset functions +// allow that as long as the corresponding index is also 0. It is upto the +// calling ops to ensure that they perform verification checks on tensor shapes +// if they don't support a particular behavior. + +inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) { + TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4); + const int* dims_data = reinterpret_cast(shape.DimsData()); + TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) || + (i0 >= 0 && i0 < dims_data[0])); + TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) || + (i1 >= 0 && i1 < dims_data[1])); + TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) || + (i2 >= 0 && i2 < dims_data[2])); + TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) || + (i3 >= 0 && i3 < dims_data[3])); + return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3; +} + +inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3, + int i4) { + TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5); + const int* dims_data = reinterpret_cast(shape.DimsData()); + TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) || + (i0 >= 0 && i0 < dims_data[0])); + TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) || + (i1 >= 0 && i1 < dims_data[1])); + TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) || + (i2 >= 0 && i2 < dims_data[2])); + TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) || + (i3 >= 0 && i3 < dims_data[3])); + TFLITE_DCHECK((dims_data[4] == 0 && i4 == 0) || + (i4 >= 0 && i4 < dims_data[4])); + return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) * + dims_data[4] + + i4; +} + +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_ diff --git a/src/tensorflow/lite/kernels/internal/strided_slice_logic.h b/src/tensorflow/lite/kernels/internal/strided_slice_logic.h index 3d91fbd..449cac0 100644 --- a/src/tensorflow/lite/kernels/internal/strided_slice_logic.h +++ b/src/tensorflow/lite/kernels/internal/strided_slice_logic.h @@ -69,6 +69,73 @@ inline void StridedSlicePadIndices(tflite::StridedSliceParams* p, p->strides_count = dim_count; } +// Return the index for the first element along that axis. This index will be a +// positive integer between [0, axis_size] (or [-1, axis_size -1] if stride < 0) +// that can be used to index directly into the data. +inline int StridedSliceStartForAxis(const tflite::StridedSliceParams& params, + const RuntimeShape& input_shape, + int32_t axis) { + const int32_t axis_size = input_shape.Dims(axis); + int32_t start = params.start_indices[axis]; + const int32_t stride = params.strides[axis]; + const int32_t begin_mask = (params.begin_mask & 1 << axis); + if (start < 0) { + start += axis_size; + } + if (stride > 0) { + start = Clamp(start, 0, axis_size); + } else { + start = Clamp(start, -1, axis_size - 1); + } + if (begin_mask) { + if (stride > 0) { + start = 0; + } else { + start = axis_size - 1; + } + } + return start; +} + +inline int StridedSliceEndForAxis(const tflite::StridedSliceParams& params, + const RuntimeShape& input_shape, int axis, + int start) { + const auto shrink_axis_mask = params.shrink_axis_mask; + const bool shrink_axis = shrink_axis_mask & (1 << axis); + const int axis_size = input_shape.Dims(axis); + const bool offset = params.offset; + if (shrink_axis) { + if (start >= axis_size) { + return start; + } else { + return start + 1; + } + } + const auto* indices = params.stop_indices; + int end = indices[axis]; + if (offset) { + end += start; + } + const int32_t stride = params.strides[axis]; + const int32_t end_mask = (params.end_mask & 1 << axis); + if (end < 0) { + end += axis_size; + } + if (stride > 0) { + end = Clamp(end, 0, axis_size); + } else { + end = Clamp(end, -1, axis_size - 1); + } + if (end_mask) { + if (stride > 0) { + end = axis_size; + } else { + end = -1; + } + } + return end; +} + // Return the index for the first element along that axis. This index will be a // positive integer between [0, axis_size] (or [-1, axis_size -1] if stride < 0) // that can be used to index directly into the data. @@ -140,7 +207,7 @@ inline int StopForAxis(const tflite::StridedSliceParams& params, // start_for_axis + 1 to generate a length 1 slice, since start_for_axis has // already been adjusted for negative indices. if (shrink_axis) { - stop = start_for_axis + 1; + return start_for_axis + 1; } // end_mask override @@ -183,7 +250,7 @@ inline tflite::StridedSliceParams BuildStridedSliceParams( int begin_mask, int end_mask, int shrink_axis_mask, const std::vector& start_indices, const std::vector& stop_indices, const std::vector& strides) { - tflite::StridedSliceParams op_params; + tflite::StridedSliceParams op_params{}; const int dims_count = start_indices.size(); op_params.start_indices_count = dims_count; diff --git a/src/tensorflow/lite/kernels/internal/tensor_ctypes.cpp b/src/tensorflow/lite/kernels/internal/tensor_ctypes.cpp new file mode 100644 index 0000000..6bd58fc --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/tensor_ctypes.cpp @@ -0,0 +1,37 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" + +#include + +namespace tflite { + +RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { + if (tensor == nullptr) { + return RuntimeShape(); + } + + TfLiteIntArray* dims = tensor->dims; + const int dims_size = dims->size; + const int32_t* dims_data = reinterpret_cast(dims->data); + return RuntimeShape(dims_size, dims_data); +} + +RuntimeShape GetTensorShape(std::vector data) { + return RuntimeShape(data.size(), data.data()); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/tensor_ctypes.h b/src/tensorflow/lite/kernels/internal/tensor_ctypes.h index f1d3e17..9a7205c 100644 --- a/src/tensorflow/lite/kernels/internal/tensor_ctypes.h +++ b/src/tensorflow/lite/kernels/internal/tensor_ctypes.h @@ -15,7 +15,10 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ -#include "tensorflow/lite/c/common.h" +#include + +#include "tensorflow/lite/core/c/common.h" +#include "tensorflow/lite/core/macros.h" #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { @@ -31,16 +34,8 @@ inline const T* GetTensorData(const TfLiteTensor* tensor) { : nullptr; } -inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { - if (tensor == nullptr) { - return RuntimeShape(); - } - - TfLiteIntArray* dims = tensor->dims; - const int dims_size = dims->size; - const int32_t* dims_data = reinterpret_cast(dims->data); - return RuntimeShape(dims_size, dims_data); -} +TFLITE_NOINLINE RuntimeShape GetTensorShape(const TfLiteTensor* tensor); +RuntimeShape GetTensorShape(std::vector data); } // namespace tflite diff --git a/src/tensorflow/lite/kernels/internal/tensor_utils.cpp b/src/tensorflow/lite/kernels/internal/tensor_utils.cpp new file mode 100644 index 0000000..7e5d981 --- /dev/null +++ b/src/tensorflow/lite/kernels/internal/tensor_utils.cpp @@ -0,0 +1,25 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ + +// internal/reference/portable_tensor_utils.h has the implementation of the +// functions declared in internal/portable_tensor_utils.h. This somewhat +// confusing setup is derived from how the code is organized in TfLite where it +// is used to select between NEON, SSE and portable implementaitons. See +// https://github.com/tensorflow/tensorflow/blob/d76c23975c4a3a0d7987cfe3f45c76566df06180/tensorflow/lite/kernels/internal/tensor_utils.cc +// for how the code is written in TfLite. + +#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h" diff --git a/src/tensorflow/lite/kernels/internal/types.h b/src/tensorflow/lite/kernels/internal/types.h index 37403a8..510ffa3 100644 --- a/src/tensorflow/lite/kernels/internal/types.h +++ b/src/tensorflow/lite/kernels/internal/types.h @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,10 @@ limitations under the License. #include #include #include +#include #include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/runtime_shape.h" namespace tflite { @@ -43,6 +45,20 @@ struct PaddingValues { int16_t height_offset; }; +struct Padding3DValues { + int16_t width; + int16_t height; + int16_t depth; + // offset is used for calculating "remaining" padding, for example, `width` + // is 1 and `width_offset` is 1, so padding_left is 1 while padding_right is + // 1 + 1 = 2. + int16_t width_offset; + // Same as width_offset except it's over the height dimension. + int16_t height_offset; + // Same as width_offset except it's over the depth dimension. + int16_t depth_offset; +}; + // This enumeration allows for non-default formats for the weights array // of a fully-connected operator, allowing the use of special optimized // runtime paths. @@ -125,209 +141,25 @@ inline bool operator==(const QuantizationParams& qp1, return qp1.zero_point == qp2.zero_point && qp1.scale == qp2.scale; } -template -struct Dims { - int sizes[N]; - int strides[N]; -}; - -class RuntimeShape { - public: - // Shapes with dimensions up to 5 are stored directly in the structure, while - // larger shapes are separately allocated. - static constexpr int kMaxSmallSize = 5; - - RuntimeShape& operator=(RuntimeShape const&) = delete; - - RuntimeShape() : size_(0) {} - - explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) { - if (dimensions_count > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - dims_pointer_ = new int32_t[dimensions_count]; -#endif // TF_LITE_STATIC_MEMORY - } - } - - RuntimeShape(int shape_size, int32_t value) : size_(0) { - Resize(shape_size); - for (int i = 0; i < shape_size; ++i) { - SetDim(i, value); - } - } - - RuntimeShape(int dimensions_count, const int32_t* dims_data) : size_(0) { - ReplaceWith(dimensions_count, dims_data); - } - - RuntimeShape(const std::initializer_list init_list) : size_(0) { - BuildFrom(init_list); - } - - // Avoid using this constructor. We should be able to delete it when C++17 - // rolls out. - RuntimeShape(RuntimeShape const& other) : size_(other.DimensionsCount()) { - if (size_ > kMaxSmallSize) { - dims_pointer_ = new int32_t[size_]; - } - std::memcpy(DimsData(), other.DimsData(), sizeof(int32_t) * size_); - } - - bool operator==(const RuntimeShape& comp) const { - return this->size_ == comp.size_ && - std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) == - 0; - } - - ~RuntimeShape() { - if (size_ > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - delete[] dims_pointer_; -#endif // TF_LITE_STATIC_MEMORY - } - } - - inline int32_t DimensionsCount() const { return size_; } - inline int32_t Dims(int i) const { - TFLITE_DCHECK_GE(i, 0); - TFLITE_DCHECK_LT(i, size_); - return size_ > kMaxSmallSize ? dims_pointer_[i] : dims_[i]; - } - inline void SetDim(int i, int32_t val) { - TFLITE_DCHECK_GE(i, 0); - TFLITE_DCHECK_LT(i, size_); - if (size_ > kMaxSmallSize) { - dims_pointer_[i] = val; - } else { - dims_[i] = val; - } - } - - inline int32_t* DimsData() { - return size_ > kMaxSmallSize ? dims_pointer_ : dims_; - } - inline const int32_t* DimsData() const { - return size_ > kMaxSmallSize ? dims_pointer_ : dims_; - } - // The caller must ensure that the shape is no bigger than 5-D. - inline const int32_t* DimsDataUpTo5D() const { return dims_; } - - inline void Resize(int dimensions_count) { - if (size_ > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - delete[] dims_pointer_; -#endif // TF_LITE_STATIC_MEMORY - } - size_ = dimensions_count; - if (dimensions_count > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - dims_pointer_ = new int32_t[dimensions_count]; -#endif // TF_LITE_STATIC_MEMORY - } - } - - inline void ReplaceWith(int dimensions_count, const int32_t* dims_data) { - Resize(dimensions_count); - int32_t* dst_dims = DimsData(); - std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t)); - } - - template - inline void BuildFrom(const T& src_iterable) { - const int dimensions_count = - std::distance(src_iterable.begin(), src_iterable.end()); - Resize(dimensions_count); - int32_t* data = DimsData(); - for (auto it : src_iterable) { - *data = it; - ++data; - } - } - - // This will probably be factored out. Old code made substantial use of 4-D - // shapes, and so this function is used to extend smaller shapes. Note that - // (a) as Dims<4>-dependent code is eliminated, the reliance on this should be - // reduced, and (b) some kernels are stricly 4-D, but then the shapes of their - // inputs should already be 4-D, so this function should not be needed. - inline static RuntimeShape ExtendedShape(int new_shape_size, - const RuntimeShape& shape) { - return RuntimeShape(new_shape_size, shape, 1); - } - - inline void BuildFrom(const std::initializer_list init_list) { - BuildFrom>(init_list); - } - - // Returns the total count of elements, that is the size when flattened into a - // vector. - inline int FlatSize() const { - int buffer_size = 1; - const int* dims_data = reinterpret_cast(DimsData()); - for (int i = 0; i < size_; i++) { - buffer_size *= dims_data[i]; - } - return buffer_size; - } - - bool operator!=(const RuntimeShape& comp) const { return !((*this) == comp); } - - private: - // For use only by ExtendedShape(), written to guarantee (return-value) copy - // elision in C++17. - // This creates a shape padded to the desired size with the specified value. - RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value) - : size_(0) { - // If the following check fails, it is likely because a 4D-only kernel is - // being used with an array of larger dimension count. - TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount()); - Resize(new_shape_size); - const int size_increase = new_shape_size - shape.DimensionsCount(); - for (int i = 0; i < size_increase; ++i) { - SetDim(i, pad_value); - } - std::memcpy(DimsData() + size_increase, shape.DimsData(), - sizeof(int32_t) * shape.DimensionsCount()); - } - - int32_t size_; - union { - int32_t dims_[kMaxSmallSize]; - int32_t* dims_pointer_; - }; +// Quantization parameters for each channel, determining the mapping of +// quantized values to real values. See QuantizationParams for a single set of +// parameters per tensor. This has one parameters set per each channel. +// +// The correspondence is as follows: +// +// real_value = scale[channel] * (quantized_value - zero_point[channel]); +// +struct PerChannelQuantizationParams { + // The following members typically point to the corresponding members of a + // TfLiteAffineQuantization struct. + const float* scale; + const int32_t* zero_point; + int32_t quantized_dimension; }; -// Converts inference-style shape to legacy tflite::Dims<4>. -inline tflite::Dims<4> ToRuntimeDims(const tflite::RuntimeShape& array_shape) { - tflite::Dims<4> result; - const int dimensions_count = array_shape.DimensionsCount(); - TFLITE_CHECK_LE(dimensions_count, 4); - int cum_prod = 1; - for (int i = 0; i < 4; i++) { - const int new_dim = - (i < dimensions_count) ? array_shape.Dims(dimensions_count - 1 - i) : 1; - result.sizes[i] = new_dim; - result.strides[i] = cum_prod; - cum_prod *= new_dim; - } - return result; -} - -// TODO(b/80418076): Move to legacy ops file, update invocations. -inline RuntimeShape DimsToShape(const tflite::Dims<4>& dims) { - return RuntimeShape( - {dims.sizes[3], dims.sizes[2], dims.sizes[1], dims.sizes[0]}); -} - // Gets next index to iterate through a multidimensional array. -inline bool NextIndex(const int num_dims, const int* dims, int* current) { +template +inline bool NextIndex(const int num_dims, const int* dims, IndexType* current) { if (num_dims == 0) { return false; } @@ -335,7 +167,7 @@ inline bool NextIndex(const int num_dims, const int* dims, int* current) { TFLITE_DCHECK(current != nullptr); int carry = 1; for (int idx = num_dims - 1; idx >= 0; --idx) { - int current_val = current[idx] + carry; + IndexType current_val = current[idx] + carry; TFLITE_DCHECK_GE(dims[idx], current_val); if (dims[idx] == current_val) { current[idx] = 0; @@ -382,21 +214,20 @@ inline size_t ReducedOutputOffset(const int num_dims, const int* dims, return offset; } -inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4); - const int* dims_data = reinterpret_cast(shape.DimsDataUpTo5D()); - TFLITE_DCHECK(i0 >= 0 && i0 < dims_data[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < dims_data[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < dims_data[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < dims_data[3]); - return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3; -} +// Since tensors with '0' in their shape are valid in TF, these offset functions +// allow that as long as the corresponding index is also 0. It is upto the +// calling ops to ensure that they perform verification checks on tensor shapes +// if they don't support a particular behavior. inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) { - TFLITE_DCHECK(i0 >= 0 && i0 < dims.sizes[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < dims.sizes[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < dims.sizes[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < dims.sizes[3]); + TFLITE_DCHECK((i0 == 0 && dims.sizes[0] == 0) || + (i0 >= 0 && i0 < dims.sizes[0])); + TFLITE_DCHECK((i1 == 0 && dims.sizes[1] == 0) || + (i1 >= 0 && i1 < dims.sizes[1])); + TFLITE_DCHECK((i2 == 0 && dims.sizes[2] == 0) || + (i2 >= 0 && i2 < dims.sizes[2])); + TFLITE_DCHECK((i3 == 0 && dims.sizes[3] == 0) || + (i3 >= 0 && i3 < dims.sizes[3])); return i0 * dims.strides[0] + i1 * dims.strides[1] + i2 * dims.strides[2] + i3 * dims.strides[3]; } @@ -405,10 +236,6 @@ inline int Offset(const Dims<4>& dims, int* index) { return Offset(dims, index[0], index[1], index[2], index[3]); } -inline int Offset(const RuntimeShape& shape, int* index) { - return Offset(shape, index[0], index[1], index[2], index[3]); -} - // Get array size, DCHECKing that the dim index is in range. // // Note that this will be phased out with Dims<4>, since RuntimeShape::Dims() @@ -570,6 +397,58 @@ inline int MatchingFlatSize(const Dims& dims, const Dims& check_dims_0, return MatchingFlatSize(dims, check_dims_1, check_dims_2, check_dims_3); } +// Flat size calculation, checking if their extended shapes match. +inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape, + const RuntimeShape& check_shape_0) { + const int shape_dims = shape.DimensionsCount(); + const int check_shape_0_dims = check_shape_0.DimensionsCount(); + const int min_dims = std::min(shape_dims, check_shape_0_dims); + + for (int i = 0; i < min_dims; ++i) { + TFLITE_DCHECK_EQ(shape.Dims(shape_dims - 1 - i), + check_shape_0.Dims(check_shape_0_dims - 1 - i)); + } + for (int i = min_dims; i < shape_dims; ++i) { + TFLITE_DCHECK_EQ(shape.Dims(shape_dims - 1 - i), 1); + } + for (int i = min_dims; i < check_shape_0_dims; ++i) { + TFLITE_DCHECK_EQ(check_shape_0.Dims(check_shape_0_dims - 1 - i), 1); + } + return shape.FlatSize(); +} + +inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape, + const RuntimeShape& check_shape_0, + const RuntimeShape& check_shape_1) { + const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0); + TFLITE_DCHECK_EQ(MatchingExtendedShapeFlatSize(shape, check_shape_1), + flat_size); + return flat_size; +} + +inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape, + const RuntimeShape& check_shape_0, + const RuntimeShape& check_shape_1, + const RuntimeShape& check_shape_2) { + const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0); + TFLITE_DCHECK_EQ( + MatchingExtendedShapeFlatSize(shape, check_shape_1, check_shape_2), + flat_size); + return flat_size; +} + +inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape, + const RuntimeShape& check_shape_0, + const RuntimeShape& check_shape_1, + const RuntimeShape& check_shape_2, + const RuntimeShape& check_shape_3) { + const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0); + TFLITE_DCHECK_EQ(MatchingExtendedShapeFlatSize(shape, check_shape_1, + check_shape_2, check_shape_3), + flat_size); + return flat_size; +} + // Data is required to be contiguous, and so many operators can use either the // full array flat size or the flat size with one dimension skipped (commonly // the depth). @@ -782,6 +661,9 @@ struct ArithmeticParams { // int64_t activation params. int64_t int64_activation_min; int64_t int64_activation_max; + // int16_t activation params. + int16_t int16_activation_min; + int16_t int16_activation_max; // Processed output dimensions. // Let input "a" be the one that broadcasts in the faster-changing dimension. @@ -840,6 +722,21 @@ struct ConvParams { float float_activation_max; }; +struct Conv3DParams { + Padding3DValues padding_values; + int stride_width; + int stride_height; + int stride_depth; + int dilation_width; + int dilation_height; + int dilation_depth; + // float activation params. + float float_activation_min; + float float_activation_max; +}; + +typedef Conv3DParams Conv3DTransposeParams; + struct DepthToSpaceParams { int32_t block_size; }; @@ -907,6 +804,7 @@ struct FullyConnectedParams { struct GatherParams { int16_t axis; + int16_t batch_dims; }; struct L2NormalizationParams { @@ -973,9 +871,9 @@ struct PackParams { struct PadParams { int8_t left_padding_count; - int32_t left_padding[4]; + int32_t left_padding[5]; int8_t right_padding_count; - int32_t right_padding[4]; + int32_t right_padding[5]; ResizingCategory resizing_category; }; @@ -1025,9 +923,9 @@ struct ResizeNearestNeighborParams { struct SliceParams { int8_t begin_count; - int32_t begin[4]; + int32_t begin[5]; int8_t size_count; - int32_t size[4]; + int32_t size[5]; }; struct SoftmaxParams { @@ -1081,11 +979,12 @@ struct StridedSliceParams { int8_t strides_count; int32_t strides[5]; - int16_t begin_mask; - int16_t ellipsis_mask; - int16_t end_mask; - int16_t new_axis_mask; - int16_t shrink_axis_mask; + uint16_t begin_mask; + uint16_t ellipsis_mask; + uint16_t end_mask; + uint16_t new_axis_mask; + uint16_t shrink_axis_mask; + bool offset; }; struct TanhParams { @@ -1095,9 +994,11 @@ struct TanhParams { int input_left_shift; }; +constexpr int kTransposeMaxDimensions = 6; + struct TransposeParams { int8_t perm_count; - int32_t perm[5]; + int32_t perm[kTransposeMaxDimensions]; }; struct UnpackParams { @@ -1127,6 +1028,18 @@ inline void SetActivationParams(int32_t min, int32_t max, P* params) { params->quantized_activation_max = max; } +template +inline void SetActivationParams(uint32_t min, uint32_t max, P* params) { + params->quantized_activation_min = min; + params->quantized_activation_max = max; +} + +template +inline void SetActivationParams(int16_t min, int16_t max, P* params) { + params->int16_activation_min = min; + params->int16_activation_max = max; +} + template inline void SetActivationParams(int64_t min, int64_t max, P* params) { params->int64_activation_min = min; @@ -1139,6 +1052,18 @@ inline void GetActivationParams(const P& params, int32_t* min, int32_t* max) { *max = params.quantized_activation_max; } +template +inline void GetActivationParams(const P& params, uint32_t* min, uint32_t* max) { + *min = params.quantized_activation_min; + *max = params.quantized_activation_max; +} + +template +inline void GetActivationParams(const P& params, int16_t* min, int16_t* max) { + *min = params.int16_activation_min; + *max = params.int16_activation_max; +} + template inline void GetActivationParams(const P& params, float* min, float* max) { *min = params.float_activation_min; @@ -1150,6 +1075,23 @@ inline void GetActivationParams(const P& params, int64_t* min, int64_t* max) { *min = params.int64_activation_min; *max = params.int64_activation_max; } + +// Type trait to check of given type has size smaller than 4 bytes. +template +struct is_small_integer + : public std::integral_constant::value || + std::is_same::value || + std::is_same::value || + std::is_same::value> {}; + +// Type trait to check of given type is int32 or int64. +template +struct is_int32_or_int64 + : public std::integral_constant::value || + std::is_same::value> { +}; + } // namespace tflite #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_ diff --git a/src/tensorflow/lite/kernels/kernel_util.cpp b/src/tensorflow/lite/kernels/kernel_util.cpp index 88d91a8..d6734e9 100644 --- a/src/tensorflow/lite/kernels/kernel_util.cpp +++ b/src/tensorflow/lite/kernels/kernel_util.cpp @@ -22,11 +22,22 @@ limitations under the License. #include #include -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" +#ifndef ARDUINO +#include + +#include "tensorflow/lite/array.h" +#endif // ARDUINO + +#include "tensorflow/lite/context_util.h" +#include "tensorflow/lite/core/c/builtin_op_data.h" +#include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" +#if defined(__APPLE__) +#include "TargetConditionals.h" +#endif + namespace tflite { namespace { @@ -112,6 +123,7 @@ TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node, TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, int index) { TfLiteTensor* tensor = GetMutableInput(context, node, index); + if (tensor == nullptr) return nullptr; return tensor->is_variable ? tensor : nullptr; } @@ -140,7 +152,7 @@ const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context, return GetInput(context, node, index); } -#ifndef TF_LITE_STATIC_MEMORY +#ifndef ARDUINO TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node, int index) { const int tensor_index = ValidateTensorIndexing( @@ -182,7 +194,7 @@ TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context, *tensor = GetTensorAtIndex(context, tensor_index); return kTfLiteOk; } -#endif // TF_LITE_STATIC_MEMORY +#endif // ARDUINO // Per-axis TfLiteStatus PopulateConvolutionQuantizationParams( @@ -190,7 +202,7 @@ TfLiteStatus PopulateConvolutionQuantizationParams( const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift) { + int32_t* per_channel_multiplier, int32_t* per_channel_shift) { const auto* affine_quantization = reinterpret_cast(filter->quantization.params); return PopulateConvolutionQuantizationParams( @@ -205,7 +217,8 @@ TfLiteStatus PopulateConvolutionQuantizationParams( const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels) { + int32_t* per_channel_multiplier, int32_t* per_channel_shift, + int num_channels) { TF_LITE_ENSURE_EQ(context, input->quantization.type, kTfLiteAffineQuantization); TF_LITE_ENSURE_EQ(context, filter->quantization.type, @@ -226,7 +239,8 @@ TfLiteStatus PopulateConvolutionQuantizationParams( // Currently only Int8/Int16 is supported for per channel quantization. TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteInt16); - TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8); + TF_LITE_ENSURE(context, + filter->type == kTfLiteInt8 || filter->type == kTfLiteInt4); TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels); TF_LITE_ENSURE_EQ( context, num_channels, @@ -326,30 +340,49 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, } namespace { -void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation, - int32_t qmin, int32_t qmax, - TfLiteTensor* output, - int32_t* act_min, int32_t* act_max) { + +inline TfLiteStatus Quantize(TfLiteContext* context, float scale, + int32_t zero_point, float f, int32_t& q) { + const float tmp = TfLiteRound(f / scale); + const bool no_integer_overflow_from_quantization = + (tmp >= static_cast(std::numeric_limits::min()) && + tmp <= static_cast(std::numeric_limits::max())); + TF_LITE_ENSURE(context, no_integer_overflow_from_quantization); + q = zero_point + static_cast(tmp); + return kTfLiteOk; +} + +TfLiteStatus CalculateActivationRangeQuantizedImpl( + TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin, + int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) { const auto scale = output->params.scale; const auto zero_point = output->params.zero_point; - auto quantize = [scale, zero_point](float f) { - return zero_point + static_cast(TfLiteRound(f / scale)); - }; - + int32_t tmp_q; if (activation == kTfLiteActRelu) { - *act_min = std::max(qmin, quantize(0.0)); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, 0.0, tmp_q)); + *act_min = std::max(qmin, tmp_q); *act_max = qmax; } else if (activation == kTfLiteActRelu6) { - *act_min = std::max(qmin, quantize(0.0)); - *act_max = std::min(qmax, quantize(6.0)); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, 0.0, tmp_q)); + *act_min = std::max(qmin, tmp_q); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, 6.0, tmp_q)); + *act_max = std::min(qmax, tmp_q); } else if (activation == kTfLiteActReluN1To1) { - *act_min = std::max(qmin, quantize(-1.0)); - *act_max = std::min(qmax, quantize(1.0)); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, -1.0, tmp_q)); + *act_min = std::max(qmin, tmp_q); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, 1.0, tmp_q)); + *act_max = std::min(qmax, tmp_q); } else { *act_min = qmin; *act_max = qmax; } + return kTfLiteOk; } } // namespace @@ -373,36 +406,87 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context, TF_LITE_ENSURE(context, false); } - CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, - act_max); - return kTfLiteOk; + return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax, + output, act_min, act_max); } bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { return TfLiteIntArrayEqual(input1->dims, input2->dims); } -// TODO(petewarden): Having macros around this is ugly, look at other strategies -// before replicating this approach elsewhere. -#ifndef TF_LITE_STATIC_MEMORY +#ifndef ARDUINO +TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteIntArray** output_shape) { + if (NumDimensions(input) != 1) { + TF_LITE_KERNEL_LOG(const_cast(context), + "Invalid %dD input tensor (must be a 1D tensor).", + NumDimensions(input)); + return kTfLiteError; + } + const int output_dims = SizeOfDimension(input, 0); + IntArrayUniquePtr shape(TfLiteIntArrayCreate(output_dims)); + for (int i = 0; i < output_dims; i++) { + shape->data[i] = input->data.i32[i]; + } + *output_shape = shape.release(); + return kTfLiteOk; +} + +// TODO(b/172067338): Having this function be part of TF_LITE_STATIC_MEMORY +// build results in a 6KB size increase, even though the function is unsused for +// that build. What appears to be happening is that while the linker drops the +// unsused function, the string library that gets pulled in is not dropped, +// resulting in the increased binary size. +std::string GetShapeDebugString(const TfLiteIntArray* shape) { + std::string str; + for (int d = 0; d < shape->size; ++d) { + if (str.empty()) + str = "[" + std::to_string(shape->data[d]); + else + // Don't add space after "," to make the output consistent with + // tensorflow::shape_inference::InferenceContext::DebugString() + str += "," + std::to_string(shape->data[d]); + } + if (str.empty()) { + str = "[]"; + } else { + str += "]"; + } + return str; +} + +std::string GetTensorDebugString(const TfLiteTensor* tensor) { + return std::string("{\n type: ") + TfLiteTypeGetName(tensor->type) + + "\n data: {...}\n dims: " + GetShapeDebugString(tensor->dims) + + "\n}"; +} + TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteIntArray** output_shape) { - int dims1 = NumDimensions(input1); - int dims2 = NumDimensions(input2); - int out_dims = std::max(dims1, dims2); - if (NumElements(input1) == 0) { - *output_shape = TfLiteIntArrayCopy(input1->dims); - return kTfLiteOk; - } - std::unique_ptr shape( - TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); + const int dims1 = NumDimensions(input1); + const int dims2 = NumDimensions(input2); + const int out_dims = std::max(dims1, dims2); + + IntArrayUniquePtr shape(TfLiteIntArrayCreate(out_dims)); for (int i = 0; i < out_dims; ++i) { - int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); - int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); - TF_LITE_ENSURE(context, d1 == d2 || d1 == 1 || d2 == 1); - shape->data[out_dims - i - 1] = std::max(d1, d2); + const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); + const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); + if (!(d1 == d2 || d1 == 1 || d2 == 1)) { + TF_LITE_KERNEL_LOG(context, + "Given shapes, %s and %s, are not broadcastable.", + GetShapeDebugString(input1->dims).c_str(), + GetShapeDebugString(input2->dims).c_str()); + return kTfLiteError; + } + + if (d1 == 0 || d2 == 0) { + shape->data[out_dims - i - 1] = 0; + } else { + shape->data[out_dims - i - 1] = std::max(d1, d2); + } } *output_shape = shape.release(); return kTfLiteOk; @@ -413,68 +497,103 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input2, const TfLiteTensor* input3, TfLiteIntArray** output_shape) { - int dims1 = NumDimensions(input1); - int dims2 = NumDimensions(input2); - int dims3 = NumDimensions(input3); - int out_dims = std::max(std::max(dims1, dims2), dims3); - std::unique_ptr shape( - TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); + const int dims1 = NumDimensions(input1); + const int dims2 = NumDimensions(input2); + const int dims3 = NumDimensions(input3); + const int out_dims = std::max(std::max(dims1, dims2), dims3); + IntArrayUniquePtr shape(TfLiteIntArrayCreate(out_dims)); for (int i = 0; i < out_dims; ++i) { - int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); - int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); - int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); + const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); + const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); + const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); + const int min_value = std::min(std::min(d1, d2), d3); int max_value = std::max(std::max(d1, d2), d3); - TF_LITE_ENSURE(context, d1 == 1 || d1 == max_value); - TF_LITE_ENSURE(context, d2 == 1 || d2 == max_value); - TF_LITE_ENSURE(context, d3 == 1 || d3 == max_value); + // If one dimention is 0, others must be 0 or 1. + if (min_value == 0) max_value = 0; + if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) || + !(d3 == 1 || d3 == max_value)) { + TF_LITE_KERNEL_LOG(context, + "Given shapes, %s, %s and %s, are not broadcastable.", + GetShapeDebugString(input1->dims).c_str(), + GetShapeDebugString(input2->dims).c_str(), + GetShapeDebugString(input3->dims).c_str()); + return kTfLiteError; + } shape->data[out_dims - i - 1] = max_value; } *output_shape = shape.release(); return kTfLiteOk; } -#endif // TF_LITE_STATIC_MEMORY +#endif // ARDUINO // Size of string is not constant, return 0 in such case. int TfLiteTypeGetSize(TfLiteType type) { switch (type) { case kTfLiteUInt8: - TF_LITE_ASSERT_EQ(sizeof(uint8_t), 1); + static_assert(sizeof(uint8_t) == 1, ""); return 1; case kTfLiteInt8: - TF_LITE_ASSERT_EQ(sizeof(int8_t), 1); + static_assert(sizeof(int8_t) == 1, ""); return 1; case kTfLiteBool: return sizeof(bool); + case kTfLiteUInt16: + static_assert(sizeof(uint16_t) == 2, ""); + return 2; case kTfLiteInt16: - TF_LITE_ASSERT_EQ(sizeof(int16_t), 2); + static_assert(sizeof(int16_t) == 2, ""); return 2; case kTfLiteFloat16: - TF_LITE_ASSERT_EQ(sizeof(int16_t), 2); + static_assert(sizeof(int16_t) == 2, ""); return 2; case kTfLiteFloat32: - TF_LITE_ASSERT_EQ(sizeof(float), 4); + static_assert(sizeof(float) == 4, ""); return 4; case kTfLiteInt32: - TF_LITE_ASSERT_EQ(sizeof(int32_t), 4); + static_assert(sizeof(int32_t) == 4, ""); + return 4; + case kTfLiteUInt32: + static_assert(sizeof(uint32_t) == 4, ""); return 4; case kTfLiteInt64: - TF_LITE_ASSERT_EQ(sizeof(int64_t), 8); + static_assert(sizeof(int64_t) == 8, ""); return 8; case kTfLiteUInt64: - TF_LITE_ASSERT_EQ(sizeof(uint64_t), 8); + static_assert(sizeof(uint64_t) == 8, ""); return 8; case kTfLiteFloat64: - TF_LITE_ASSERT_EQ(sizeof(double), 8); + static_assert(sizeof(double) == 8, ""); return 8; case kTfLiteComplex64: - TF_LITE_ASSERT_EQ(sizeof(std::complex), 8); + static_assert(sizeof(std::complex) == 8, ""); return 8; case kTfLiteComplex128: - TF_LITE_ASSERT_EQ(sizeof(std::complex), 16); + static_assert(sizeof(std::complex) == 16, ""); return 16; default: return 0; } } +bool IsMobilePlatform() { +#if defined(ANDROID) || defined(__ANDROID__) + return true; +#elif defined(__APPLE__) && (TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE) + return true; +#else + return false; +#endif +} + +bool HasUnspecifiedDimension(const TfLiteTensor* tensor) { +#ifndef ARDUINO + if (tensor->dims_signature) { + for (int i : TfLiteIntArrayView(tensor->dims_signature)) { + if (i == -1) return true; + } + } +#endif // ARDUINO + return false; +} + } // namespace tflite diff --git a/src/tensorflow/lite/kernels/kernel_util.h b/src/tensorflow/lite/kernels/kernel_util.h index 7a1aa16..cdd7737 100644 --- a/src/tensorflow/lite/kernels/kernel_util.h +++ b/src/tensorflow/lite/kernels/kernel_util.h @@ -18,9 +18,15 @@ limitations under the License. #include #include +#ifndef ARDUINO +#include +#endif // ARDUINO -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/core/c/builtin_op_data.h" +#include "tensorflow/lite/core/c/common.h" +#ifndef NDEBUG +#include "tensorflow/lite/kernels/op_macros.h" +#endif namespace tflite { @@ -94,7 +100,7 @@ TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node, const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context, const TfLiteNode* node, int index); -#ifndef TF_LITE_STATIC_MEMORY +#ifndef ARDUINO // Note: You must check if result is not null: // // TfLiteTensor* my_tensor = GetTemporary(context, node, kMyTensorIdx); @@ -142,30 +148,49 @@ const TfLiteTensor* GetIntermediates(TfLiteContext* context, TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context, const TfLiteNode* node, int index, TfLiteTensor** tensor); -#endif // TF_LITE_STATIC_MEMORY +#endif // ARDUINO inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; } inline int SizeOfDimension(const TfLiteTensor* t, int dim) { return t->dims->data[dim]; } -inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; } -inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; } +inline int NumInputs(const TfLiteNode* node) { + return node->inputs == nullptr ? 0 : node->inputs->size; +} +inline int NumOutputs(const TfLiteNode* node) { + return node->outputs == nullptr ? 0 : node->outputs->size; +} -#ifndef TF_LITE_STATIC_MEMORY +#ifndef ARDUINO inline int NumIntermediates(const TfLiteNode* node) { return node->intermediates->size; } -#endif // TF_LITE_STATIC_MEMORY +#endif // ARDUINO -inline int64_t NumElements(const TfLiteIntArray* dims) { +inline int64_t NumElements(const int* dims, int num_dims) { int64_t count = 1; - for (int i = 0; i < dims->size; ++i) { - count *= dims->data[i]; + for (int i = 0; i < num_dims; ++i) { +#ifndef NDEBUG + if (count <= 0) { + break; + } + // Check that number of elements can fit in 32 bit int. Most of tflite + // assumes the result of `NumElements` is < MAX_INT and static or implicit + // casts to `int32_t` without any checks. It is more meaningful to check + // that the result fits into 32 bits than for standard overflow on 64 bit + // type. + TF_LITE_ASSERT(dims[i] < std::numeric_limits::max() / count); +#endif + count *= dims[i]; } return count; } +inline int64_t NumElements(const TfLiteIntArray* dims) { + return NumElements(dims->data, dims->size); +} + inline int64_t NumElements(const TfLiteTensor* t) { return NumElements(t->dims); } @@ -179,27 +204,33 @@ inline bool IsConstantTensor(const TfLiteTensor* tensor) { return tensor->allocation_type == kTfLiteMmapRo; } +inline bool IsConstantOrPersistentTensor(const TfLiteTensor* tensor) { + return IsConstantTensor(tensor) || + (tensor->allocation_type == kTfLitePersistentRo); +} + // Determines whether tensor is dynamic. Note that a tensor can be non-const and // not dynamic. This function specifically checks for a dynamic tensor. inline bool IsDynamicTensor(const TfLiteTensor* tensor) { return tensor->allocation_type == kTfLiteDynamic; } - +#ifndef ARDUINO // Sets tensor to dynamic. inline void SetTensorToDynamic(TfLiteTensor* tensor) { if (tensor->allocation_type != kTfLiteDynamic) { + TfLiteTensorDataFree(tensor); tensor->allocation_type = kTfLiteDynamic; - tensor->data.raw = nullptr; } } // Sets tensor to persistent and read-only. inline void SetTensorToPersistentRo(TfLiteTensor* tensor) { if (tensor->allocation_type != kTfLitePersistentRo) { + TfLiteTensorDataFree(tensor); tensor->allocation_type = kTfLitePersistentRo; - tensor->data.raw = nullptr; } } +#endif // ARDUINO // Determines whether it is a hybrid op - one that has float inputs and // quantized weights. @@ -214,14 +245,15 @@ TfLiteStatus PopulateConvolutionQuantizationParams( const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift); + int32_t* per_channel_multiplier, int32_t* per_channel_shift); TfLiteStatus PopulateConvolutionQuantizationParams( TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels); + int32_t* per_channel_multiplier, int32_t* per_channel_shift, + int num_channels); // Calculates the multiplication factor for a quantized convolution (or // quantized depthwise convolution) involving the given tensors. Returns an @@ -270,6 +302,18 @@ void CalculateActivationRange(TfLiteFusedActivation activation, // Return true if the given tensors have the same shape. bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2); +#if !defined(ARDUINO) +// Gets the output shape from the input tensor. +TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteIntArray** output_shape); + +std::string GetShapeDebugString(const TfLiteIntArray* shape); + +std::string GetTensorDebugString(const TfLiteTensor* tensor); + +#endif // !defined(ARDUINO) + // Calculates the output_shape that is necessary for element-wise operations // with broadcasting involving the two input tensors. TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, @@ -285,9 +329,15 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input3, TfLiteIntArray** output_shape); -// Return the size of given type in bytes. Return 0 in in case of string. +// Return the size of given type in bytes. Return 0 in case of string. int TfLiteTypeGetSize(TfLiteType type); +// Whether the current platform is mobile (Android or iOS). +bool IsMobilePlatform(); + +// Returns whether there is unspecified dimension in the tensor's dim signature. +bool HasUnspecifiedDimension(const TfLiteTensor* tensor); + } // namespace tflite #endif // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_ diff --git a/src/tensorflow/lite/kernels/op_macros.h b/src/tensorflow/lite/kernels/op_macros.h index 293dc76..4b241f0 100644 --- a/src/tensorflow/lite/kernels/op_macros.h +++ b/src/tensorflow/lite/kernels/op_macros.h @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,59 +15,30 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ -// If we're on a platform without standard IO functions, fall back to a -// non-portable function. -#ifdef TF_LITE_MCU_DEBUG_LOG +#include "tensorflow/lite/micro/micro_log.h" -#include "tensorflow/lite/micro/debug_log.h" - -#define DEBUG_LOG(x) \ - do { \ - DebugLog(x); \ - } while (0) - -inline void InfiniteLoop() { - DEBUG_LOG("HALTED\n"); +#if !defined(TF_LITE_MCU_DEBUG_LOG) +#include +#define TFLITE_ABORT abort() +#else +inline void AbortImpl() { + MicroPrintf("HALTED"); while (1) { } } +#define TFLITE_ABORT AbortImpl(); +#endif -#define TFLITE_ABORT InfiniteLoop(); - -#else // TF_LITE_MCU_DEBUG_LOG - -#include -#include - -#define DEBUG_LOG(x) \ - do { \ - fprintf(stderr, "%s", (x)); \ - } while (0) - -// Report Error for unsupported type by op 'op_name' and returns kTfLiteError. -#define TF_LITE_UNSUPPORTED_TYPE(context, type, op_name) \ - do { \ - TF_LITE_KERNEL_LOG((context), "%s:%d Type %s is unsupported by op %s.", \ - __FILE__, __LINE__, TfLiteTypeGetName(type), \ - (op_name)); \ - return kTfLiteError; \ - } while (0) - -#define TFLITE_ABORT abort() - -#endif // TF_LITE_MCU_DEBUG_LOG - -#if defined(NDEBUG) || defined(ARDUINO) +#if defined(ARDUINO) #define TFLITE_ASSERT_FALSE (static_cast(0)) #else #define TFLITE_ASSERT_FALSE TFLITE_ABORT #endif -#define TF_LITE_FATAL(msg) \ - do { \ - DEBUG_LOG(msg); \ - DEBUG_LOG("\nFATAL\n"); \ - TFLITE_ABORT; \ +#define TF_LITE_FATAL(msg) \ + do { \ + MicroPrintf("%s", (msg)); \ + TFLITE_ABORT; \ } while (0) #define TF_LITE_ASSERT(x) \ @@ -75,9 +46,4 @@ inline void InfiniteLoop() { if (!(x)) TF_LITE_FATAL(#x); \ } while (0) -#define TF_LITE_ASSERT_EQ(x, y) \ - do { \ - if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ - } while (0) - #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ diff --git a/src/tensorflow/lite/kernels/padding.h b/src/tensorflow/lite/kernels/padding.h index 1116b1d..cc9d596 100644 --- a/src/tensorflow/lite/kernels/padding.h +++ b/src/tensorflow/lite/kernels/padding.h @@ -15,11 +15,11 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_PADDING_H_ #define TENSORFLOW_LITE_KERNELS_PADDING_H_ -#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/core/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/types.h" namespace tflite { -// TODO(renjieliu): Migrate others to use ComputePaddingWithLeftover. inline int ComputePadding(int stride, int dilation_rate, int in_size, int filter_size, int out_size) { int effective_filter_size = (filter_size - 1) * dilation_rate + 1; @@ -44,6 +44,11 @@ inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size, inline int ComputeOutSize(TfLitePadding padding, int image_size, int filter_size, int stride, int dilation_rate = 1) { int effective_filter_size = (filter_size - 1) * dilation_rate + 1; + + // TODO(b/186448822): This uses 0 since the function has no other way to + // report error case + if (stride == 0) return 0; + switch (padding) { case kTfLitePaddingSame: return (image_size + stride - 1) / stride; @@ -75,6 +80,36 @@ inline TfLitePaddingValues ComputePaddingHeightWidth( padding_values.width_offset = offset; return padding_values; } + +inline Padding3DValues ComputePadding3DValues( + int stride_height, int stride_width, int stride_depth, + int dilation_rate_height, int dilation_rate_width, int dilation_rate_depth, + int in_height, int in_width, int in_depth, int filter_height, + int filter_width, int filter_depth, TfLitePadding padding, int* out_height, + int* out_width, int* out_depth) { + *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, + dilation_rate_width); + *out_height = ComputeOutSize(padding, in_height, filter_height, stride_height, + dilation_rate_height); + *out_depth = ComputeOutSize(padding, in_depth, filter_depth, stride_depth, + dilation_rate_depth); + + Padding3DValues padding_values; + int offset = 0; + padding_values.depth = + ComputePaddingWithOffset(stride_depth, dilation_rate_depth, in_depth, + filter_depth, *out_depth, &offset); + padding_values.depth_offset = offset; + padding_values.height = + ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height, + filter_height, *out_height, &offset); + padding_values.height_offset = offset; + padding_values.width = + ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width, + filter_width, *out_width, &offset); + padding_values.width_offset = offset; + return padding_values; +} } // namespace tflite #endif // TENSORFLOW_LITE_KERNELS_PADDING_H_ diff --git a/src/tensorflow/lite/micro/all_ops_resolver.cpp b/src/tensorflow/lite/micro/all_ops_resolver.cpp deleted file mode 100644 index f7bbcb9..0000000 --- a/src/tensorflow/lite/micro/all_ops_resolver.cpp +++ /dev/null @@ -1,81 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/all_ops_resolver.h" - -#include "tensorflow/lite/micro/kernels/micro_ops.h" - -namespace tflite { - -AllOpsResolver::AllOpsResolver() { - // Please keep this list of Builtin Operators in alphabetical order. - AddAbs(); - AddAdd(); - AddArgMax(); - AddArgMin(); - AddAveragePool2D(); - AddCeil(); - AddConcatenation(); - AddConv2D(); - AddCos(); - AddDepthwiseConv2D(); - AddDequantize(); - AddDetectionPostprocess(); - AddEqual(); - AddEthosU(); - AddFloor(); - AddFullyConnected(); - AddGreater(); - AddGreaterEqual(); - AddHardSwish(); - AddL2Normalization(); - AddLess(); - AddLessEqual(); - AddLog(); - AddLogicalAnd(); - AddLogicalNot(); - AddLogicalOr(); - AddLogistic(); - AddMaximum(); - AddMaxPool2D(); - AddMean(); - AddMinimum(); - AddMul(); - AddNeg(); - AddNotEqual(); - AddPack(); - AddPad(); - AddPadV2(); - AddPrelu(); - AddQuantize(); - AddReduceMax(); - AddRelu(); - AddRelu6(); - AddReshape(); - AddResizeNearestNeighbor(); - AddRound(); - AddRsqrt(); - AddShape(); - AddSin(); - AddSoftmax(); - AddSplit(); - AddSplitV(); - AddSqrt(); - AddSquare(); - AddStridedSlice(); - AddSub(); - AddSvdf(); - AddTanh(); - AddUnpack(); -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/all_ops_resolver.h b/src/tensorflow/lite/micro/all_ops_resolver.h deleted file mode 100644 index e8105b9..0000000 --- a/src/tensorflow/lite/micro/all_ops_resolver.h +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ -#define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ - -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" - -namespace tflite { - -// The magic number in the template parameter is the maximum number of ops that -// can be added to AllOpsResolver. It can be increased if needed. And most -// applications that care about the memory footprint will want to directly use -// MicroMutableOpResolver and have an application specific template parameter. -// The examples directory has sample code for this. -class AllOpsResolver : public MicroMutableOpResolver<128> { - public: - AllOpsResolver(); - - private: - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ diff --git a/src/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h b/src/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h new file mode 100644 index 0000000..b92d6b2 --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h @@ -0,0 +1,100 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_ + +#include +#include + +#include "tensorflow/lite/c/c_api_types.h" + +namespace tflite { +// Interface classes that the TFLM framework relies on to get buffers it needs. +// There are two types of buffers that the TFLM framework requires: persistent +// and non-persistent. Persistent buffers, once allocated, are never freed by +// the TFLM framework. Non-persist buffers can be allocated and deallocated by +// the TFLM framework. This file defines two interfaces classes that TFLM +// framework will rely on to manage these buffers. + +// Interface class for managing persistent buffers. +class IPersistentBufferAllocator { + public: + IPersistentBufferAllocator() {} + virtual ~IPersistentBufferAllocator() {} + + // Allocates persistent memory. The persistent buffer is never freed. + virtual uint8_t* AllocatePersistentBuffer(size_t size, size_t alignment) = 0; + + // Returns the size of all persistent allocations in bytes. + virtual size_t GetPersistentUsedBytes() const = 0; +}; + +// Interface class for managing non-persistent buffers. +// The default non-persistent buffers are temp buffers that are not resizable. +// Support of at least one resizable buffer is required. +class INonPersistentBufferAllocator { + public: + INonPersistentBufferAllocator() {} + virtual ~INonPersistentBufferAllocator() {} + + // Allocates a temporary buffer. This buffer is not resizable. + virtual uint8_t* AllocateTemp(size_t size, size_t alignment) = 0; + + // Signals that a temporary buffer is no longer needed. + virtual void DeallocateTemp(uint8_t* buf) = 0; + + // Returns true if all temporary buffers are already deallocated. + virtual bool IsAllTempDeallocated() = 0; + + // Signals that all temporary allocations can be reclaimed. TFLM calls this + // API when it knows that all temporary buffers that it requested has been + // deallocated. The goal of API is to facilitate implementations of + // INonPersistentBufferAllocator can reuse buffer with some reasonable + // complexity. + virtual TfLiteStatus ResetTempAllocations() = 0; + + // Returns a buffer that is resizable viable ResizeBuffer(). + virtual uint8_t* AllocateResizableBuffer(size_t size, size_t alignment) = 0; + + // Resizes a buffer that is previously returned by the + // AllocateResizableBuffer. + virtual TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size, + size_t alignment) = 0; + + // Frees up the memory occupied by the resizable buffer. + virtual TfLiteStatus DeallocateResizableBuffer(uint8_t* resizable_buf) = 0; + + // Returns a pointer pointing to the start of the overlay memory, which is + // used for activation tensors and scratch buffers by kernels at Invoke stage. + virtual uint8_t* GetOverlayMemoryAddress() const = 0; + + // Reserves the size of the overlay memory. This overlay is reserved for the + // kernels at Invoke stage. This is referred to as the overlay because before + // Invoket state, the same memory can be used for temp buffers. The layout of + // the memory is planned by the memory planner separately at Invoke stage. + virtual TfLiteStatus ReserveNonPersistentOverlayMemory(size_t size, + size_t alignment) = 0; + + // Returns the size of non-persistent buffer in use. + virtual size_t GetNonPersistentUsedBytes() const = 0; + + // Returns the number of bytes available with a given alignment. This number + // takes in account any temporary allocations. + virtual size_t GetAvailableMemory(size_t alignment) const = 0; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_ diff --git a/src/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.cpp b/src/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.cpp new file mode 100644 index 0000000..a8f00ea --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.cpp @@ -0,0 +1,170 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h" + +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +NonPersistentArenaBufferAllocator::NonPersistentArenaBufferAllocator( + uint8_t* buffer, size_t buffer_size) + : buffer_head_(buffer), + buffer_tail_(buffer + buffer_size), + head_temp_(buffer), + next_temp_(buffer) {} + +NonPersistentArenaBufferAllocator::~NonPersistentArenaBufferAllocator() {} + +// Allocates a temporary buffer. This buffer is not resizable. +uint8_t* NonPersistentArenaBufferAllocator::AllocateTemp(size_t size, + size_t alignment) { + uint8_t* const aligned_result = AlignPointerUp(next_temp_, alignment); + const size_t available_memory = buffer_tail_ - aligned_result; + if (available_memory < size) { + MicroPrintf( + "Failed to allocate temp memory. Requested: %u, " + "available %u, missing: %u", + size, available_memory, size - available_memory); + return nullptr; + } + next_temp_ = aligned_result + size; + temp_buffer_ptr_check_sum_ ^= reinterpret_cast(aligned_result); + temp_buffer_count_++; + return aligned_result; +} + +// Signals that a temporary buffer is no longer needed. +void NonPersistentArenaBufferAllocator::DeallocateTemp(uint8_t* temp_buf) { + temp_buffer_ptr_check_sum_ ^= reinterpret_cast(temp_buf); + temp_buffer_count_--; +} + +// Returns true if all temporary buffers are already deallocated. +bool NonPersistentArenaBufferAllocator::IsAllTempDeallocated() { + if (temp_buffer_count_ != 0 || temp_buffer_ptr_check_sum_ != 0) { + MicroPrintf( + "Number of allocated temp buffers: %d. Checksum passing status: %d", + temp_buffer_count_, !temp_buffer_ptr_check_sum_); + return false; + } + return true; +} + +// Signals that all temporary allocations can be reclaimed. TFLM calls this +// API when it knows that all temporary buffers that it requested has been +// deallocated. The goal of API is to facilitate implementations of +// INonPersistentBufferAllocator can reuse buffer with some reasonable +// complexity. +TfLiteStatus NonPersistentArenaBufferAllocator::ResetTempAllocations() { + if (!IsAllTempDeallocated()) { + MicroPrintf( + "All temp buffers must be freed before calling ResetTempAllocations()"); + return kTfLiteError; + } + next_temp_ = head_temp_; + return kTfLiteOk; +} + +// Returns a buffer that is resizable viable ResizeBuffer(). +uint8_t* NonPersistentArenaBufferAllocator::AllocateResizableBuffer( + size_t size, size_t alignment) { + // Only supports one resizable buffer, which starts at the buffer head. + uint8_t* expected_resizable_buf = AlignPointerUp(buffer_head_, alignment); + + if (resizable_buffer_allocated_) { + MicroPrintf( + "Cannot allocate a new resizable buffer when one is already allocated"); + return nullptr; + } + + if (ResizeBuffer(expected_resizable_buf, size, alignment) == kTfLiteOk) { + resizable_buffer_allocated_ = true; + return expected_resizable_buf; + } + return nullptr; +} + +// Resizes a buffer that is previously returned by the AllocateResizableBuffer. +// Note that ResizeBuffer(old_resizable_buf, 0, 1) effectively deallocates +// a previous allocated resizable buffer. +TfLiteStatus NonPersistentArenaBufferAllocator::ResizeBuffer( + uint8_t* resizable_buf, size_t size, size_t alignment) { + // Only supports one resizable buffer, which starts at the buffer head. + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + if (resizable_buf != expect_resizable_buf) { + MicroPrintf("Internal error: buffer is not resizable"); + return kTfLiteError; + } + if (head_temp_ != next_temp_) { + MicroPrintf("ResetTempAllocations() is not called before ResizeBuffer()."); + return kTfLiteError; + } + + const size_t available_memory = buffer_tail_ - expect_resizable_buf; + if (available_memory < size) { + MicroPrintf( + "Failed to resize buffer. Requested: %u, available %u, missing: %u", + size, available_memory, size - available_memory); + return kTfLiteError; + } + head_temp_ = expect_resizable_buf + size; + next_temp_ = head_temp_; + + return kTfLiteOk; +} + +// Frees up the memory occupied by the resizable buffer. +TfLiteStatus NonPersistentArenaBufferAllocator::DeallocateResizableBuffer( + uint8_t* resizable_buf) { + TfLiteStatus status = ResizeBuffer(resizable_buf, 0, 1); + if (status == kTfLiteOk) { + resizable_buffer_allocated_ = false; + } + return status; +} + +// Returns a pointer pointing to the start of the overlay memory, which is +// used for activation tensors and scratch buffers by kernels at Invoke stage. +uint8_t* NonPersistentArenaBufferAllocator::GetOverlayMemoryAddress() const { + return buffer_head_; +} + +// Reserves the size of the overlay memory. This overlay is reserved for the +// kernels at Invoke stage. This is referred to as the overlay because before +// Invoket state, the same memory can be used for temp buffers. The layout of +// the memory is planned by the memory planner separately at Invoke stage. +TfLiteStatus +NonPersistentArenaBufferAllocator::ReserveNonPersistentOverlayMemory( + size_t size, size_t alignment) { + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + return ResizeBuffer(expect_resizable_buf, size, alignment); +} + +// Returns the size of non-persistent buffer in use. +size_t NonPersistentArenaBufferAllocator::GetNonPersistentUsedBytes() const { + return (next_temp_ - buffer_head_); +} + +// Returns the number of bytes available with a given alignment. This number +// takes in account any temporary allocations. +size_t NonPersistentArenaBufferAllocator::GetAvailableMemory( + size_t alignment) const { + uint8_t* const aligned_temp = AlignPointerUp(next_temp_, alignment); + uint8_t* const aligned_tail = AlignPointerDown(buffer_tail_, alignment); + return aligned_tail - aligned_temp; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h b/src/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h new file mode 100644 index 0000000..ebd3764 --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h @@ -0,0 +1,104 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ + +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h" +#include "tensorflow/lite/micro/compatibility.h" + +namespace tflite { + +// Implement INonPersistentBufferAllocator on an arena that is dedicated for +// non-persistent buffers. +class NonPersistentArenaBufferAllocator : public INonPersistentBufferAllocator { + public: + NonPersistentArenaBufferAllocator(uint8_t* buffer, size_t buffer_size); + virtual ~NonPersistentArenaBufferAllocator(); + + // Allocates a temporary buffer. This buffer is not resizable. + uint8_t* AllocateTemp(size_t size, size_t alignment) override; + + // Signals that a temporary buffer is no longer needed. + void DeallocateTemp(uint8_t* buf) override; + + // Returns true if all temporary buffers are already deallocated. + bool IsAllTempDeallocated() override; + + // Signals that all temporary allocations can be reclaimed. TFLM calls this + // API when it knows that all temporary buffers that it requested has been + // deallocated. + TfLiteStatus ResetTempAllocations() override; + + // Returns a buffer that is resizable viable ResizeBuffer(). + uint8_t* AllocateResizableBuffer(size_t size, size_t alignment) override; + + // Resizes a buffer that is previously returned by the + // AllocateResizableBuffer. + TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size, + size_t alignment) override; + + // Frees up the memory occupied by the resizable buffer. + TfLiteStatus DeallocateResizableBuffer(uint8_t* resizable_buf) override; + + // Returns a pointer pointing to the start of the overlay memory, which is + // used for activation tensors and scratch buffers by kernels at Invoke stage. + uint8_t* GetOverlayMemoryAddress() const override; + + // Reserves the size of the overlay memory. This overlay is reserved for the + // kernels at Invoke stage. This is referred to as the overlay because before + // Invoket state, the same memory can be used for temp buffers. The layout of + // the memory is planned by the memory planner separately at Invoke stage. + TfLiteStatus ReserveNonPersistentOverlayMemory(size_t size, + size_t alignment) override; + + // Returns the size of non-persistent buffer in use. + size_t GetNonPersistentUsedBytes() const override; + + // Returns the number of bytes available with a given alignment. This number + // takes in account any temporary allocations. + size_t GetAvailableMemory(size_t alignment) const override; + + TF_LITE_REMOVE_VIRTUAL_DELETE + + private: + // The memory arena that this allocator manages. + uint8_t* const buffer_head_; + uint8_t* const buffer_tail_; + + // The whole region is split into two parts: + // buffer_head_ to head_temp_ - 1 belongs to the only resizable buffer. + // head_temp_ to buffer_tail_ can be used for (non-resizable) temp buffers. + uint8_t* head_temp_; + + // next_temp_ points to the next available temp buffer allocation address and + // its range is between head_temp_ and buffer_tail_ + uint8_t* next_temp_; + + // XOR Check sum for outstanding temp buffers. + // If all temp buffers are deallocated OR no temp buffers are allocated, + // temp_buffer_ptr_check_sum_ == nullptr. + intptr_t temp_buffer_ptr_check_sum_ = 0; + // Count of outstanding temp buffers. + int temp_buffer_count_ = 0; + bool resizable_buffer_allocated_ = false; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ diff --git a/src/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.cpp b/src/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.cpp new file mode 100644 index 0000000..a770bc9 --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.cpp @@ -0,0 +1,52 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h" + +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +PersistentArenaBufferAllocator::PersistentArenaBufferAllocator( + uint8_t* buffer, size_t buffer_size) + : buffer_head_(buffer), + buffer_tail_(buffer + buffer_size), + tail_temp_(buffer_tail_) {} + +PersistentArenaBufferAllocator::~PersistentArenaBufferAllocator() {} + +uint8_t* PersistentArenaBufferAllocator::AllocatePersistentBuffer( + size_t size, size_t alignment) { + uint8_t* const aligned_result = + AlignPointerDown(tail_temp_ - size, alignment); + if (aligned_result < buffer_head_) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS + const size_t missing_memory = buffer_head_ - aligned_result; + MicroPrintf( + "Failed to allocate tail memory. Requested: %u, " + "available %u, missing: %u", + size, size - missing_memory, missing_memory); +#endif + return nullptr; + } + tail_temp_ = aligned_result; + return aligned_result; +} + +size_t PersistentArenaBufferAllocator::GetPersistentUsedBytes() const { + return buffer_tail_ - tail_temp_; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h b/src/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h new file mode 100644 index 0000000..2c8e3dc --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h @@ -0,0 +1,58 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ + +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h" +#include "tensorflow/lite/micro/compatibility.h" + +namespace tflite { + +// PersistentArenaBufferAllocator is an implementatation of +// IPersistentBufferAllocator interface on an arena that is dedicated for +// persistent buffers. +class PersistentArenaBufferAllocator : public IPersistentBufferAllocator { + public: + PersistentArenaBufferAllocator(uint8_t* buffer, size_t buffer_size); + virtual ~PersistentArenaBufferAllocator(); + + // Allocates persistent memory. The persistent buffer is never freed. + // Returns nullptr if errors occured. + uint8_t* AllocatePersistentBuffer(size_t size, size_t alignment) override; + + // Returns the size of all persistent allocations in bytes. + size_t GetPersistentUsedBytes() const override; + + TF_LITE_REMOVE_VIRTUAL_DELETE + private: + // The memory arena that this allocator manages. + uint8_t* const buffer_head_; + uint8_t* const buffer_tail_; + + // The whole region is split into two parts: + // tail_temp_ to buffer_tail_ contains allocated buffers; + // buffer_head_ to tail_temp_ - 1 belongs to still available spaces. + // So in essence, the allocated region grows from the bottom and emulates + // SingleArenaBufferAllocator's persistent part. + uint8_t* tail_temp_; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ diff --git a/src/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.cpp b/src/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.cpp new file mode 100644 index 0000000..e21e364 --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.cpp @@ -0,0 +1,85 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h" + +#include + +#include "tensorflow/lite/kernels/internal/compatibility.h" + +namespace tflite { + +RecordingSingleArenaBufferAllocator::RecordingSingleArenaBufferAllocator( + uint8_t* buffer_head, size_t buffer_size) + : SingleArenaBufferAllocator(buffer_head, buffer_size), + requested_head_bytes_(0), + requested_tail_bytes_(0), + used_bytes_(0), + alloc_count_(0) {} + +RecordingSingleArenaBufferAllocator::~RecordingSingleArenaBufferAllocator() {} + +RecordingSingleArenaBufferAllocator* +RecordingSingleArenaBufferAllocator::Create(uint8_t* buffer_head, + size_t buffer_size) { + TFLITE_DCHECK(buffer_head != nullptr); + RecordingSingleArenaBufferAllocator tmp = + RecordingSingleArenaBufferAllocator(buffer_head, buffer_size); + + uint8_t* allocator_buffer = tmp.AllocatePersistentBuffer( + sizeof(RecordingSingleArenaBufferAllocator), + alignof(RecordingSingleArenaBufferAllocator)); + // Use the default copy constructor to populate internal states. + return new (allocator_buffer) RecordingSingleArenaBufferAllocator(tmp); +} + +size_t RecordingSingleArenaBufferAllocator::GetRequestedBytes() const { + return requested_head_bytes_ + requested_tail_bytes_; +} + +size_t RecordingSingleArenaBufferAllocator::GetUsedBytes() const { + return used_bytes_; +} + +size_t RecordingSingleArenaBufferAllocator::GetAllocatedCount() const { + return alloc_count_; +} + +TfLiteStatus RecordingSingleArenaBufferAllocator::ResizeBuffer( + uint8_t* resizable_buf, size_t size, size_t alignment) { + const uint8_t* previous_head = head(); + TfLiteStatus status = + SingleArenaBufferAllocator::ResizeBuffer(resizable_buf, size, alignment); + if (status == kTfLiteOk) { + used_bytes_ += head() - previous_head; + requested_head_bytes_ = size; + } + return status; +} + +uint8_t* RecordingSingleArenaBufferAllocator::AllocatePersistentBuffer( + size_t size, size_t alignment) { + const uint8_t* previous_tail = tail(); + uint8_t* result = + SingleArenaBufferAllocator::AllocatePersistentBuffer(size, alignment); + if (result != nullptr) { + used_bytes_ += previous_tail - tail(); + requested_tail_bytes_ += size; + alloc_count_++; + } + return result; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h b/src/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h new file mode 100644 index 0000000..94e55a3 --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h @@ -0,0 +1,63 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ + +#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" +#include "tensorflow/lite/micro/compatibility.h" + +namespace tflite { + +// Utility class used to log allocations of a SingleArenaBufferAllocator. Should +// only be used in debug/evaluation settings or unit tests to evaluate +// allocation usage. +class RecordingSingleArenaBufferAllocator : public SingleArenaBufferAllocator { + public: + RecordingSingleArenaBufferAllocator(uint8_t* buffer_head, size_t buffer_size); + // TODO(b/157615197): Cleanup constructors/destructor and use factory + // functions. + ~RecordingSingleArenaBufferAllocator() override; + + static RecordingSingleArenaBufferAllocator* Create(uint8_t* buffer_head, + size_t buffer_size); + + // Returns the number of bytes requested from the head or tail. + size_t GetRequestedBytes() const; + + // Returns the number of bytes actually allocated from the head or tail. This + // value will be >= to the number of requested bytes due to padding and + // alignment. + size_t GetUsedBytes() const; + + // Returns the number of alloc calls from the head or tail. + size_t GetAllocatedCount() const; + + TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size, + size_t alignment) override; + uint8_t* AllocatePersistentBuffer(size_t size, size_t alignment) override; + + private: + size_t requested_head_bytes_; + size_t requested_tail_bytes_; + size_t used_bytes_; + size_t alloc_count_; + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ diff --git a/src/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.cpp b/src/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.cpp new file mode 100644 index 0000000..8655cfd --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.cpp @@ -0,0 +1,199 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" + +#include +#include +#include + +#include "tensorflow/lite/c/c_api_types.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +SingleArenaBufferAllocator::SingleArenaBufferAllocator(uint8_t* buffer_head, + uint8_t* buffer_tail) + : buffer_head_(buffer_head), + buffer_tail_(buffer_tail), + head_(buffer_head), + tail_(buffer_tail), + temp_(buffer_head_) {} + +SingleArenaBufferAllocator::SingleArenaBufferAllocator(uint8_t* buffer, + size_t buffer_size) + : SingleArenaBufferAllocator(buffer, buffer + buffer_size) {} + +/* static */ +SingleArenaBufferAllocator* SingleArenaBufferAllocator::Create( + uint8_t* buffer_head, size_t buffer_size) { + TFLITE_DCHECK(buffer_head != nullptr); + SingleArenaBufferAllocator tmp = + SingleArenaBufferAllocator(buffer_head, buffer_size); + + // Allocate enough bytes from the buffer to create a + // SingleArenaBufferAllocator. The new instance will use the current adjusted + // tail buffer from the tmp allocator instance. + uint8_t* allocator_buffer = tmp.AllocatePersistentBuffer( + sizeof(SingleArenaBufferAllocator), alignof(SingleArenaBufferAllocator)); + // Use the default copy constructor to populate internal states. + return new (allocator_buffer) SingleArenaBufferAllocator(tmp); +} + +SingleArenaBufferAllocator::~SingleArenaBufferAllocator() {} + +uint8_t* SingleArenaBufferAllocator::AllocateResizableBuffer(size_t size, + size_t alignment) { + // Only supports one resizable buffer, which starts at the buffer head. + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + if (ResizeBuffer(expect_resizable_buf, size, alignment) == kTfLiteOk) { + return expect_resizable_buf; + } + return nullptr; +} + +TfLiteStatus SingleArenaBufferAllocator::DeallocateResizableBuffer( + uint8_t* resizable_buf) { + return ResizeBuffer(resizable_buf, 0, 1); +} + +TfLiteStatus SingleArenaBufferAllocator::ReserveNonPersistentOverlayMemory( + size_t size, size_t alignment) { + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + return ResizeBuffer(expect_resizable_buf, size, alignment); +} + +TfLiteStatus SingleArenaBufferAllocator::ResizeBuffer(uint8_t* resizable_buf, + size_t size, + size_t alignment) { + // Only supports one resizable buffer, which starts at the buffer head. + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + if (head_ != temp_ || resizable_buf != expect_resizable_buf) { + MicroPrintf( + "Internal error: either buffer is not resizable or " + "ResetTempAllocations() is not called before ResizeBuffer()."); + return kTfLiteError; + } + + uint8_t* const aligned_result = AlignPointerUp(buffer_head_, alignment); + const size_t available_memory = tail_ - aligned_result; + if (available_memory < size) { + MicroPrintf( + "Failed to resize buffer. Requested: %u, available %u, missing: %u", + size, available_memory, size - available_memory); + return kTfLiteError; + } + head_ = aligned_result + size; + temp_ = head_; + + return kTfLiteOk; +} + +uint8_t* SingleArenaBufferAllocator::AllocatePersistentBuffer( + size_t size, size_t alignment) { + uint8_t* const aligned_result = AlignPointerDown(tail_ - size, alignment); + if (aligned_result < head_) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS + const size_t missing_memory = head_ - aligned_result; + MicroPrintf( + "Failed to allocate tail memory. Requested: %u, " + "available %u, missing: %u", + size, size - missing_memory, missing_memory); +#endif + return nullptr; + } + tail_ = aligned_result; + return aligned_result; +} + +uint8_t* SingleArenaBufferAllocator::AllocateTemp(size_t size, + size_t alignment) { + uint8_t* const aligned_result = AlignPointerUp(temp_, alignment); + const size_t available_memory = tail_ - aligned_result; + if (available_memory < size) { + MicroPrintf( + "Failed to allocate temp memory. Requested: %u, " + "available %u, missing: %u", + size, available_memory, size - available_memory); + return nullptr; + } + temp_ = aligned_result + size; + temp_buffer_ptr_check_sum_ ^= (reinterpret_cast(aligned_result)); + temp_buffer_count_++; + return aligned_result; +} + +void SingleArenaBufferAllocator::DeallocateTemp(uint8_t* temp_buf) { + temp_buffer_ptr_check_sum_ ^= (reinterpret_cast(temp_buf)); + temp_buffer_count_--; +} + +bool SingleArenaBufferAllocator::IsAllTempDeallocated() { + if (temp_buffer_count_ != 0 || temp_buffer_ptr_check_sum_ != 0) { + MicroPrintf( + "Number of allocated temp buffers: %d. Checksum passing status: %d", + temp_buffer_count_, !temp_buffer_ptr_check_sum_); + return false; + } + return true; +} + +TfLiteStatus SingleArenaBufferAllocator::ResetTempAllocations() { + // TODO(b/209453859): enable error check based on IsAllTempDeallocated after + // all AllocateTemp have been paird with DeallocateTemp + if (!IsAllTempDeallocated()) { + MicroPrintf( + "All temp buffers must be freed before calling ResetTempAllocations()"); + return kTfLiteError; + } + temp_ = head_; + return kTfLiteOk; +} + +uint8_t* SingleArenaBufferAllocator::GetOverlayMemoryAddress() const { + return buffer_head_; +} + +size_t SingleArenaBufferAllocator::GetNonPersistentUsedBytes() const { + return std::max(head_ - buffer_head_, temp_ - buffer_head_); +} + +size_t SingleArenaBufferAllocator::GetPersistentUsedBytes() const { + return buffer_tail_ - tail_; +} + +size_t SingleArenaBufferAllocator::GetAvailableMemory(size_t alignment) const { + uint8_t* const aligned_temp = AlignPointerUp(temp_, alignment); + uint8_t* const aligned_tail = AlignPointerDown(tail_, alignment); + return aligned_tail - aligned_temp; +} + +size_t SingleArenaBufferAllocator::GetUsedBytes() const { + return GetPersistentUsedBytes() + GetNonPersistentUsedBytes(); +} + +size_t SingleArenaBufferAllocator::GetBufferSize() const { + return buffer_tail_ - buffer_head_; +} + +uint8_t* SingleArenaBufferAllocator::head() const { return head_; } + +uint8_t* SingleArenaBufferAllocator::tail() const { return tail_; } + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h b/src/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h new file mode 100644 index 0000000..a2e3958 --- /dev/null +++ b/src/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h @@ -0,0 +1,144 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ + +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h" +#include "tensorflow/lite/micro/compatibility.h" + +namespace tflite { + +// TODO(petewarden): This allocator never frees up or reuses any memory, even +// though we have enough information about lifetimes of the tensors to do so. +// This makes it pretty wasteful, so we should use a more intelligent method. +class SingleArenaBufferAllocator : public INonPersistentBufferAllocator, + public IPersistentBufferAllocator { + public: + // TODO(b/157615197): Cleanup constructors/destructor and use factory + // functions. + SingleArenaBufferAllocator(uint8_t* buffer_head, uint8_t* buffer_tail); + SingleArenaBufferAllocator(uint8_t* buffer, size_t buffer_size); + virtual ~SingleArenaBufferAllocator(); + + // Creates a new SingleArenaBufferAllocator from a given buffer head and size. + static SingleArenaBufferAllocator* Create(uint8_t* buffer_head, + size_t buffer_size); + + // Resizes a buffer that is previously returned by the + // AllocateResizableBuffer. In current implementation, it Adjusts the head + // (lowest address and moving upwards) memory allocation to a given size. + // Calls to this method will also invalidate all temporary allocation values + // (it sets the location of temp space at the end of the head section). This + // call will fail if a chain of allocations through AllocateTemp() have not + // been cleaned up with a call to ResetTempAllocations(). + virtual TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size, + size_t alignment) override; + + // Returns a buffer that is resizable viable ResizeBuffer(). Only one + // resizable buffer is currently supported. + virtual uint8_t* AllocateResizableBuffer(size_t size, + size_t alignment) override; + + // Frees up the memory occupied by the resizable buffer + virtual TfLiteStatus DeallocateResizableBuffer( + uint8_t* resizable_buf) override; + + // Reserves the non-persistent memory that is planned by the memory planner. + virtual TfLiteStatus ReserveNonPersistentOverlayMemory( + size_t size, size_t alignment) override; + + // Allocates persistent memory starting at the tail of the arena (highest + // address and moving downwards). + virtual uint8_t* AllocatePersistentBuffer(size_t size, + size_t alignment) override; + + // Allocates a temporary buffer from the head of the arena (lowest address and + // moving upwards) but does not update the actual head allocation size or + // position. The returned buffer is guaranteed until either + // ResetTempAllocations() is called or another call to AllocateFromHead(). + // Repeat calls to this function will create a chain of temp allocations. All + // calls to AllocateTemp() must end with a call to ResetTempAllocations(). If + // AllocateFromHead() is called before a call to ResetTempAllocations(), it + // will fail with an error message. + virtual uint8_t* AllocateTemp(size_t size, size_t alignment) override; + + // Signals that a temporary buffer is no longer needed. This is currently for + // book-keeping purpose and the memory region are not immediately available + // for re-use. The deallocated memory region are only reclaimed after + // ResetTempAllocations is called as it is right now. + virtual void DeallocateTemp(uint8_t* buf) override; + + // Returns true if all temporary buffers are already deallocated. + virtual bool IsAllTempDeallocated() override; + + // Resets a chain of temporary allocations back to the current head of the + // arena (lowest address). + virtual TfLiteStatus ResetTempAllocations() override; + + // Returns a pointer to the buffer currently assigned to the head section. + // This buffer is set by calling SetHeadSize(). + uint8_t* GetOverlayMemoryAddress() const override; + + // Returns the size of the head section in bytes. + size_t GetNonPersistentUsedBytes() const override; + + // Returns the size of all allocations in the tail section in bytes. + size_t GetPersistentUsedBytes() const override; + + // Returns the number of bytes available with a given alignment. This number + // takes in account any temporary allocations. + size_t GetAvailableMemory(size_t alignment) const override; + + // Returns the number of used bytes in the allocator. This number takes in + // account any temporary allocations. + size_t GetUsedBytes() const; + + TF_LITE_REMOVE_VIRTUAL_DELETE + + protected: + // Returns a pointer to the current end of the head buffer. + uint8_t* head() const; + + // Returns a pointer to the current end of the tail buffer. + uint8_t* tail() const; + + private: + size_t GetBufferSize() const; + uint8_t* buffer_head_; + uint8_t* buffer_tail_; + uint8_t* head_; + uint8_t* tail_; + uint8_t* temp_; + + // The combination of the checksum of outstanding temporary buffer pointers + // AND the count of outstanding temporary buffer provide a low cost mechanism + // to audit temporary buffers' allocation and deallocation. + // + // XOR Check sum for outstanding temp buffers. + // If all temp buffers are deallocated OR no temp buffers are allocated, + // temp_buffer_ptr_check_sum_ == nullptr. + intptr_t temp_buffer_ptr_check_sum_ = 0; + // Count of outstanding temp buffers. + int temp_buffer_count_ = 0; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ diff --git a/src/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cpp b/src/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cpp deleted file mode 100644 index 254e194..0000000 --- a/src/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cpp +++ /dev/null @@ -1,2845 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h" - -// Keep model aligned to 8 bytes to guarantee aligned 64-bit accesses. -alignas(8) const unsigned char g_keyword_scrambled_model_data[] = { - 0x1c, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xd0, 0x6e, 0x00, 0x00, 0x60, 0x83, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xbc, 0x6e, 0x00, 0x00, 0xac, 0x56, 0x00, 0x00, - 0x9c, 0x52, 0x00, 0x00, 0x8c, 0x51, 0x00, 0x00, 0x7c, 0x4d, 0x00, 0x00, - 0x2c, 0x4d, 0x00, 0x00, 0x1c, 0x49, 0x00, 0x00, 0x0c, 0x45, 0x00, 0x00, - 0xfc, 0x43, 0x00, 0x00, 0xec, 0x3f, 0x00, 0x00, 0x9c, 0x3f, 0x00, 0x00, - 0x8c, 0x3b, 0x00, 0x00, 0x7c, 0x37, 0x00, 0x00, 0x6c, 0x36, 0x00, 0x00, - 0x5c, 0x32, 0x00, 0x00, 0x0c, 0x32, 0x00, 0x00, 0xfc, 0x2d, 0x00, 0x00, - 0xec, 0x29, 0x00, 0x00, 0xdc, 0x28, 0x00, 0x00, 0xcc, 0x24, 0x00, 0x00, - 0x7c, 0x24, 0x00, 0x00, 0x6c, 0x22, 0x00, 0x00, 0x5c, 0x1a, 0x00, 0x00, - 0xcc, 0x19, 0x00, 0x00, 0xbc, 0x15, 0x00, 0x00, 0xac, 0x0d, 0x00, 0x00, - 0x1c, 0x0d, 0x00, 0x00, 0x0c, 0x09, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, - 0x6c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x2a, 0x91, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x34, 0xe1, 0x4f, 0xa1, 0x63, 0xa4, 0x62, 0xbf, 0x3e, 0x91, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xa3, 0xb2, 0x8f, 0xee, - 0x35, 0xe6, 0xf2, 0xcc, 0x68, 0xa0, 0x33, 0xc4, 0x7d, 0x4e, 0xbb, 0xa9, - 0x10, 0x32, 0x8e, 0x3d, 0x76, 0x14, 0x1c, 0x33, 0x0e, 0x77, 0xf7, 0xc8, - 0x7b, 0x45, 0xc7, 0xdb, 0xcf, 0x87, 0xc7, 0x70, 0xa9, 0x29, 0xfd, 0x70, - 0x32, 0x96, 0x35, 0x7d, 0xe9, 0xac, 0x6d, 0x9b, 0xfd, 0xe4, 0xbc, 0x4a, - 0x57, 0xcd, 0x43, 0xcc, 0x73, 0x72, 0xdf, 0x07, 0x68, 0xc5, 0x67, 0xbd, - 0x8a, 0x91, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, - 0xb0, 0xfb, 0x5f, 0xdf, 0x0e, 0xb9, 0xa2, 0xfd, 0x66, 0x86, 0x13, 0x1b, - 0x6d, 0x1d, 0x53, 0xdb, 0x83, 0xbf, 0x44, 0x29, 0x3f, 0x93, 0xee, 0x42, - 0x9a, 0xf4, 0x31, 0x6e, 0xc3, 0x15, 0x7e, 0x48, 0x72, 0x50, 0xc3, 0x53, - 0xef, 0x35, 0x1f, 0xc2, 0x29, 0x42, 0xb4, 0xd7, 0x4b, 0xd7, 0x98, 0x60, - 0xb9, 0x3e, 0xbb, 0x31, 0x35, 0xc3, 0xf6, 0x15, 0x7a, 0x9a, 0x2c, 0xfd, - 0xff, 0x04, 0xd9, 0x04, 0x57, 0x52, 0xae, 0x99, 0xa3, 0x95, 0xae, 0x6a, - 0x66, 0x52, 0x5f, 0x91, 0x17, 0x83, 0x0d, 0x27, 0x16, 0x02, 0x06, 0x64, - 0x80, 0x05, 0x99, 0x1c, 0x6c, 0xab, 0xb1, 0xa1, 0x0e, 0x44, 0x1f, 0x63, - 0xe9, 0xc1, 0xab, 0x8d, 0x08, 0x79, 0x56, 0xe0, 0x90, 0xa5, 0xb8, 0x3b, - 0xc4, 0x1e, 0xa5, 0x1f, 0x64, 0xe4, 0x0b, 0x72, 0x62, 0x19, 0x5f, 0x66, - 0xc0, 0x9b, 0x7b, 0xc4, 0xe5, 0x9f, 0x82, 0xa7, 0x16, 0x92, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x3e, 0x3d, 0xf4, 0x61, - 0x45, 0x2a, 0x48, 0x53, 0x1f, 0x22, 0x74, 0x65, 0xea, 0x5a, 0x00, 0x83, - 0x68, 0xf9, 0xbb, 0xa3, 0xc2, 0x1a, 0x8f, 0xe1, 0xfb, 0x76, 0x6a, 0xe9, - 0x1a, 0x0e, 0x4d, 0x32, 0xc6, 0xf3, 0x8d, 0x85, 0x54, 0xa1, 0xe9, 0xb8, - 0x35, 0xee, 0xba, 0x53, 0x40, 0xa2, 0xea, 0x7f, 0xc3, 0x99, 0x71, 0x17, - 0xdd, 0xd5, 0xfe, 0xdf, 0x5e, 0x15, 0xa0, 0x73, 0xf8, 0x78, 0x49, 0x73, - 0xcc, 0xf0, 0x18, 0x12, 0x06, 0x81, 0xd6, 0x19, 0x2c, 0xa8, 0xd7, 0x80, - 0x19, 0x19, 0xbf, 0x1e, 0x50, 0xb1, 0xfb, 0xb3, 0xa6, 0x56, 0x6f, 0x52, - 0xa6, 0xc0, 0xdd, 0x3f, 0xbb, 0x13, 0x6e, 0x04, 0xdf, 0x79, 0xca, 0x8b, - 0xa5, 0x9c, 0xa1, 0x78, 0x49, 0xca, 0xe5, 0x29, 0xbb, 0x29, 0x7c, 0x96, - 0xc6, 0x29, 0x06, 0x99, 0xec, 0x50, 0xd1, 0xe8, 0x9b, 0xb7, 0x53, 0xd2, - 0x36, 0x89, 0xb1, 0x5c, 0x38, 0xf4, 0x2f, 0xa1, 0xda, 0x6f, 0xd8, 0xd1, - 0x62, 0xd2, 0xd4, 0x97, 0xce, 0xf1, 0xbd, 0x73, 0x2d, 0x92, 0xdb, 0x62, - 0x0c, 0xb0, 0x77, 0xed, 0x32, 0x3a, 0xfc, 0x59, 0x94, 0xef, 0x2b, 0x48, - 0x60, 0xb2, 0x82, 0xa2, 0xb6, 0x51, 0xdb, 0x51, 0x47, 0x99, 0x4c, 0x50, - 0x93, 0x53, 0x9d, 0xa9, 0x3c, 0x94, 0x34, 0x9f, 0xa6, 0x3e, 0x4f, 0x87, - 0xd4, 0xa0, 0x40, 0xeb, 0x7b, 0xfa, 0x1b, 0x7d, 0x03, 0xa8, 0xf8, 0x8b, - 0xa5, 0x32, 0x3a, 0xaf, 0x7e, 0x6b, 0x25, 0x08, 0x97, 0x71, 0x8d, 0x0c, - 0x30, 0xc9, 0xa7, 0x23, 0xe3, 0x51, 0xb3, 0xf2, 0x86, 0xad, 0x12, 0xe2, - 0x79, 0x94, 0x7f, 0xf3, 0xf7, 0x88, 0x67, 0x3e, 0x8e, 0x8e, 0x04, 0x5e, - 0x4f, 0x01, 0x6f, 0x1d, 0x78, 0x42, 0x9e, 0x47, 0x81, 0xdf, 0x03, 0x39, - 0x3d, 0x9b, 0xbd, 0xb6, 0x06, 0x21, 0x82, 0xfe, 0xf2, 0x50, 0xe1, 0x14, - 0xbc, 0xe3, 0x5e, 0xe1, 0xbd, 0x8f, 0xfa, 0x35, 0x31, 0x4e, 0x66, 0xeb, - 0x67, 0x49, 0x1c, 0x07, 0x88, 0xb6, 0x22, 0x0c, 0xeb, 0xd9, 0x9f, 0x9b, - 0x8b, 0xe0, 0x9c, 0x3c, 0xf7, 0x91, 0xab, 0x98, 0x5b, 0x0e, 0x09, 0xdd, - 0xe3, 0x0b, 0x14, 0x55, 0xe9, 0xe4, 0x42, 0xd8, 0xce, 0xd7, 0xfd, 0x4c, - 0x20, 0x9f, 0x44, 0x93, 0xa6, 0x17, 0x8a, 0x68, 0x8f, 0xec, 0x62, 0xd1, - 0x97, 0x9c, 0xcc, 0xc4, 0xd9, 0x42, 0xda, 0xf1, 0x34, 0x04, 0xc6, 0xb6, - 0x0f, 0xc7, 0xe6, 0x2d, 0x26, 0x6e, 0x6f, 0x92, 0x7e, 0xd9, 0xd4, 0x40, - 0xc6, 0x70, 0xfa, 0x12, 0x2a, 0x1b, 0xbc, 0x50, 0xeb, 0x3b, 0x24, 0x96, - 0x8d, 0x7c, 0xae, 0xbe, 0xc3, 0x27, 0xce, 0x97, 0xcf, 0xcd, 0x10, 0x13, - 0x01, 0xc6, 0x48, 0x6a, 0x99, 0x38, 0x79, 0xb9, 0x1c, 0xc9, 0x09, 0xac, - 0x96, 0x8c, 0xf7, 0x82, 0x8f, 0xb8, 0x17, 0x94, 0x2c, 0x5f, 0x40, 0xcc, - 0x80, 0xf4, 0x9f, 0xaa, 0xcb, 0x83, 0x13, 0x7b, 0x3a, 0x78, 0x0a, 0x9f, - 0x79, 0x9e, 0xfc, 0x0e, 0x8f, 0x98, 0x60, 0x39, 0x86, 0x44, 0x8e, 0x4b, - 0xc4, 0xad, 0xe6, 0x98, 0x92, 0x08, 0x84, 0x48, 0x8f, 0x1d, 0x78, 0x10, - 0x9e, 0xf7, 0xb8, 0x61, 0x65, 0x46, 0xdb, 0x4a, 0xcf, 0xc5, 0x37, 0xe3, - 0x77, 0x76, 0xcf, 0x0a, 0x7e, 0x72, 0x3f, 0xe4, 0x51, 0x30, 0x28, 0x57, - 0x13, 0xfd, 0xdb, 0x7e, 0xd6, 0xa3, 0xdd, 0x64, 0xdd, 0x00, 0xd0, 0x7f, - 0xbc, 0x48, 0x1d, 0xaf, 0xde, 0x0e, 0x45, 0xc4, 0xc9, 0xfa, 0xf6, 0xb2, - 0xb7, 0x9a, 0x42, 0x8b, 0x18, 0x08, 0xed, 0xdb, 0xa9, 0xc3, 0x32, 0xf1, - 0x9c, 0xcf, 0x16, 0x74, 0x57, 0xce, 0xe9, 0x44, 0x21, 0xdb, 0x8a, 0x45, - 0x89, 0x70, 0x41, 0x5c, 0xbf, 0x10, 0xdf, 0x83, 0x4a, 0xe4, 0x4c, 0xd8, - 0xc9, 0x2e, 0x5b, 0xa3, 0x05, 0xed, 0x73, 0xb1, 0xb0, 0xb7, 0xc4, 0xd7, - 0x0d, 0xea, 0xf6, 0xb4, 0xc1, 0x5e, 0x12, 0x54, 0x30, 0x73, 0x5c, 0x93, - 0xd9, 0xf7, 0xc9, 0x24, 0x43, 0x8f, 0x4f, 0x8e, 0x94, 0x95, 0xb6, 0xfd, - 0xa3, 0x14, 0x42, 0x50, 0xb8, 0x66, 0xfb, 0xc4, 0xed, 0x72, 0xcf, 0x7b, - 0xa9, 0x73, 0xeb, 0xc4, 0x4a, 0x05, 0xea, 0xb4, 0x47, 0xca, 0x21, 0x56, - 0x28, 0xa8, 0x87, 0xb8, 0x87, 0x0b, 0xe3, 0x8d, 0xfd, 0x70, 0xf7, 0x33, - 0x76, 0xf0, 0x3d, 0xa4, 0x3b, 0x83, 0xab, 0x14, 0x01, 0xe1, 0xb0, 0xa9, - 0x44, 0xe8, 0xd7, 0x50, 0x26, 0x0b, 0xbb, 0x2d, 0x57, 0x39, 0x82, 0x7c, - 0x71, 0xd8, 0x12, 0xaf, 0xf3, 0x9f, 0x46, 0xbd, 0x62, 0xd6, 0x61, 0xf5, - 0xb7, 0x04, 0x94, 0xbf, 0x87, 0xea, 0xc4, 0xc4, 0x33, 0xcf, 0x36, 0x3b, - 0x4f, 0xc7, 0x71, 0xf1, 0x98, 0xe6, 0xb0, 0x96, 0x25, 0xd7, 0xac, 0x75, - 0xfc, 0x92, 0xe0, 0x69, 0x72, 0x37, 0x8d, 0x40, 0x31, 0xaa, 0x2c, 0x86, - 0xfb, 0x95, 0x3f, 0x9c, 0x23, 0xd4, 0x39, 0x99, 0xff, 0xea, 0x95, 0x79, - 0xb9, 0x2e, 0xb0, 0x33, 0xf1, 0xe8, 0xd0, 0x42, 0xb5, 0x70, 0x5c, 0xca, - 0x69, 0x48, 0x28, 0x23, 0x58, 0xb4, 0x07, 0xfc, 0x3e, 0x15, 0x29, 0x00, - 0xa9, 0x22, 0x44, 0x70, 0xd0, 0xc7, 0x01, 0x0d, 0x3e, 0xfc, 0x57, 0xb7, - 0x54, 0x3a, 0xc3, 0x43, 0xd6, 0x2f, 0x55, 0x09, 0x52, 0x4a, 0x6b, 0x8e, - 0x4c, 0x82, 0xbb, 0x4e, 0x3e, 0x38, 0xe1, 0x9e, 0x72, 0x83, 0xec, 0x40, - 0xf5, 0xf7, 0x0e, 0x3c, 0x24, 0xed, 0xda, 0xf2, 0x39, 0x6c, 0xad, 0xeb, - 0xff, 0xfb, 0x4a, 0x38, 0x50, 0x49, 0x28, 0x3d, 0x05, 0xb2, 0x98, 0x44, - 0x2b, 0x61, 0xa2, 0x9b, 0x3a, 0x3c, 0xad, 0xd9, 0x8c, 0xef, 0x3c, 0x72, - 0x50, 0x74, 0x13, 0x80, 0xc4, 0x7e, 0x6e, 0xf3, 0xc9, 0xdf, 0x63, 0xf6, - 0x41, 0xb2, 0x08, 0x78, 0x9b, 0x7c, 0xa9, 0x13, 0xd1, 0x21, 0xe7, 0x5e, - 0x6a, 0x0d, 0x64, 0xf7, 0x52, 0x75, 0xf2, 0x80, 0x69, 0xbe, 0x43, 0xf8, - 0xd4, 0xad, 0x49, 0xfc, 0x97, 0x76, 0x1c, 0xb6, 0x43, 0x9e, 0xcb, 0x45, - 0x4d, 0x75, 0x07, 0xae, 0xdb, 0xbf, 0xf5, 0x8a, 0xeb, 0xb9, 0x6b, 0x12, - 0x06, 0xbf, 0x94, 0xad, 0x77, 0x29, 0xb1, 0xae, 0x24, 0x9b, 0x4d, 0xdc, - 0xe1, 0x5e, 0xd7, 0x57, 0xec, 0xd1, 0xd8, 0xad, 0xf0, 0x06, 0x08, 0x43, - 0x33, 0x99, 0xd2, 0x04, 0xfc, 0xc8, 0xf6, 0x53, 0x3d, 0x73, 0xd4, 0x36, - 0xd3, 0x8e, 0x4a, 0xcd, 0xb1, 0xe9, 0xcb, 0x3a, 0x5f, 0x54, 0xbc, 0xde, - 0x16, 0xa2, 0x85, 0xde, 0x35, 0x27, 0x99, 0x32, 0x4f, 0xb9, 0x2c, 0x16, - 0xa2, 0x6e, 0xae, 0x75, 0x60, 0x77, 0xe9, 0x08, 0x0f, 0x08, 0xc4, 0xd0, - 0x62, 0xc7, 0xd2, 0x1f, 0x3b, 0x29, 0xdd, 0xb7, 0xea, 0xa3, 0x58, 0xaf, - 0x4c, 0x05, 0xd2, 0x82, 0x6a, 0xe0, 0xc4, 0xe9, 0x70, 0x7e, 0xf2, 0xca, - 0x82, 0x6a, 0xae, 0xc1, 0x9a, 0x42, 0x5d, 0x46, 0x4a, 0xb7, 0x8f, 0x4d, - 0x33, 0xfe, 0x6f, 0x47, 0xb5, 0x49, 0xb3, 0x89, 0x51, 0x31, 0x74, 0x68, - 0x14, 0xda, 0x0a, 0x41, 0x3d, 0x1f, 0x8e, 0x30, 0x8c, 0x77, 0xd1, 0xa9, - 0x36, 0x41, 0x78, 0x34, 0xb7, 0x7e, 0x4e, 0x7a, 0x77, 0x12, 0x43, 0x97, - 0x43, 0xba, 0xd6, 0x28, 0x14, 0x2a, 0x9f, 0x98, 0xb4, 0x39, 0x08, 0x5c, - 0xb7, 0xb8, 0x03, 0x63, 0x62, 0x68, 0xc6, 0x9a, 0x4d, 0xf5, 0xdc, 0x7c, - 0x0f, 0x7e, 0x77, 0xdc, 0x85, 0x53, 0x31, 0x8c, 0x53, 0x8b, 0x27, 0xc4, - 0xb7, 0x3d, 0xd0, 0x94, 0x9b, 0x7e, 0x59, 0x59, 0x03, 0x09, 0x8c, 0x30, - 0x70, 0x7d, 0x9c, 0x73, 0x89, 0x6c, 0x5f, 0xbf, 0xf9, 0xc7, 0x72, 0x76, - 0x12, 0x98, 0xe3, 0xbe, 0xc3, 0x67, 0xdf, 0xa1, 0x76, 0xa3, 0xec, 0x44, - 0x30, 0x70, 0x2f, 0x6a, 0x86, 0x28, 0xb9, 0x9d, 0x7f, 0x93, 0xf2, 0x4a, - 0x34, 0x48, 0x1f, 0x2e, 0x2e, 0x95, 0x88, 0xdb, 0x1f, 0x2c, 0x19, 0x46, - 0x2e, 0x91, 0x5f, 0x81, 0x0d, 0x08, 0x9d, 0x03, 0x0b, 0xaf, 0x59, 0x0a, - 0x41, 0xad, 0x4d, 0x6c, 0x09, 0x0e, 0x9f, 0xd1, 0xc4, 0xdb, 0xac, 0x59, - 0x27, 0x04, 0x1c, 0x73, 0xe9, 0xf3, 0xe8, 0x54, 0xd9, 0x11, 0x31, 0xb2, - 0xed, 0x2d, 0x8c, 0xeb, 0x99, 0x26, 0x48, 0x9e, 0xac, 0x88, 0x96, 0xcb, - 0x19, 0x49, 0xfa, 0x4a, 0x82, 0xd5, 0x5d, 0xb8, 0x0f, 0x22, 0x3f, 0xb6, - 0x5c, 0x02, 0x2a, 0xb9, 0xd9, 0xfe, 0x4d, 0x9d, 0xdb, 0x85, 0x90, 0x19, - 0x7f, 0x1a, 0x44, 0xa3, 0x74, 0x68, 0xbf, 0xa2, 0x3b, 0xb4, 0x3b, 0xeb, - 0xab, 0x99, 0xc2, 0x46, 0x50, 0x7e, 0xec, 0xa9, 0xb4, 0x86, 0xfa, 0x50, - 0xcb, 0x71, 0x7e, 0x75, 0xa5, 0xca, 0xa6, 0x2f, 0x40, 0x1d, 0xa1, 0x4a, - 0x5c, 0x91, 0xd7, 0x2a, 0xa6, 0x17, 0x11, 0x4d, 0x19, 0x2b, 0xb3, 0x0f, - 0xf0, 0xb3, 0x06, 0x70, 0x51, 0x5c, 0x52, 0x8c, 0xdf, 0xe3, 0x19, 0x92, - 0x08, 0x40, 0xa2, 0xb4, 0xc0, 0xf2, 0xe8, 0x44, 0xcc, 0x36, 0xaa, 0xf9, - 0xf8, 0xfc, 0x2d, 0x83, 0x79, 0xc6, 0x58, 0xc1, 0xdf, 0x32, 0xb7, 0xde, - 0x0f, 0x3e, 0xc0, 0xa8, 0x7e, 0xeb, 0xf2, 0x30, 0x16, 0xdf, 0x38, 0xcb, - 0x69, 0xd9, 0x44, 0x0d, 0x44, 0xf4, 0x45, 0x9c, 0x81, 0xc8, 0xe7, 0x06, - 0xae, 0x95, 0xaf, 0xff, 0x17, 0x3b, 0x1c, 0x3f, 0xda, 0xa5, 0xf8, 0xfd, - 0x9c, 0xf1, 0x0a, 0xca, 0xda, 0xc0, 0xfa, 0x02, 0xc4, 0xce, 0x78, 0xfb, - 0x35, 0x8c, 0xfe, 0x55, 0xad, 0x0d, 0x9b, 0xeb, 0x10, 0xf1, 0x7b, 0xb1, - 0x09, 0xf8, 0xef, 0xfc, 0xde, 0x7a, 0x69, 0x74, 0x76, 0xef, 0x91, 0x64, - 0x33, 0xc4, 0x08, 0x15, 0x73, 0x85, 0x56, 0xae, 0x9c, 0xf6, 0xdd, 0x55, - 0x19, 0x96, 0xe6, 0x41, 0x12, 0xc9, 0x87, 0x91, 0x9e, 0xc6, 0x18, 0xe8, - 0xbf, 0xa0, 0x59, 0xfd, 0x20, 0xab, 0xb5, 0xcf, 0x0f, 0x6e, 0x30, 0xd3, - 0xc5, 0x70, 0xf2, 0x50, 0xa4, 0x2a, 0xdf, 0xb0, 0x45, 0xfc, 0x82, 0x1a, - 0x3b, 0xfe, 0x0c, 0xad, 0x41, 0x95, 0xf1, 0xd6, 0x85, 0xa2, 0xc9, 0xff, - 0xbe, 0x3a, 0x64, 0x70, 0x43, 0xc0, 0xc5, 0xc8, 0x80, 0x11, 0x0d, 0x20, - 0xcd, 0xf2, 0xa2, 0xbb, 0x43, 0x68, 0x0e, 0xf4, 0x01, 0xb3, 0x73, 0x79, - 0x9f, 0x68, 0x41, 0x63, 0x3e, 0xda, 0xf9, 0xf4, 0x23, 0x57, 0x97, 0x84, - 0x99, 0xe8, 0x5e, 0xdb, 0xaa, 0x24, 0xab, 0x9c, 0x40, 0x83, 0xf9, 0x3f, - 0x4f, 0x5a, 0x53, 0xa6, 0xf1, 0xe8, 0x95, 0xcf, 0xcb, 0x50, 0x13, 0x51, - 0xa7, 0x8c, 0x71, 0x1d, 0xff, 0xcc, 0x66, 0xab, 0xff, 0xca, 0xc5, 0xc3, - 0x73, 0x45, 0xb7, 0x21, 0x1d, 0x65, 0x7a, 0xe5, 0x1f, 0x3f, 0x1a, 0x58, - 0x23, 0x28, 0xc8, 0xf3, 0xbf, 0x98, 0x25, 0xc0, 0x83, 0x68, 0xf0, 0x62, - 0x63, 0x90, 0xcf, 0x1f, 0x20, 0xb8, 0x04, 0x5c, 0xc4, 0x80, 0x5b, 0xf4, - 0x6d, 0xdc, 0xe9, 0xac, 0xd8, 0x13, 0x3b, 0x42, 0xf8, 0x4e, 0xa2, 0x1c, - 0xce, 0x3f, 0x8d, 0x15, 0xd3, 0x87, 0x1b, 0x44, 0x79, 0x52, 0x34, 0x4b, - 0x63, 0x4d, 0xbf, 0x95, 0xec, 0xae, 0xf9, 0xc6, 0x7b, 0x7b, 0x85, 0x8c, - 0x4f, 0x20, 0x58, 0x9d, 0x48, 0x03, 0x2f, 0x77, 0x2e, 0x8b, 0x6f, 0x66, - 0x76, 0xb9, 0xb8, 0xb7, 0x34, 0x5a, 0x63, 0x06, 0x85, 0x82, 0x5f, 0x23, - 0x8f, 0x8d, 0x0c, 0x92, 0x3b, 0xd2, 0x8a, 0x1b, 0x39, 0xee, 0x6a, 0xbc, - 0xf6, 0x94, 0x2a, 0xc6, 0x73, 0xa6, 0x99, 0x98, 0xdc, 0x96, 0xd7, 0xc1, - 0xfe, 0x9b, 0xc8, 0xfb, 0x86, 0x5a, 0xad, 0xce, 0xf8, 0xd5, 0x32, 0x62, - 0x96, 0x63, 0xaf, 0x4c, 0x4a, 0xae, 0xec, 0x26, 0x3d, 0x84, 0x69, 0x50, - 0x5f, 0x37, 0x9b, 0x29, 0xac, 0x15, 0x76, 0x3d, 0x33, 0x96, 0x06, 0xde, - 0xc1, 0x6d, 0xa2, 0xc7, 0xc3, 0x8a, 0x20, 0x2e, 0xf7, 0x08, 0x55, 0x83, - 0x23, 0x9c, 0x23, 0x2d, 0x3a, 0xa1, 0x32, 0xbc, 0x47, 0x48, 0xd5, 0x6a, - 0x71, 0xb9, 0xcc, 0x2d, 0x99, 0xa0, 0x37, 0x07, 0x46, 0x45, 0xbe, 0xf0, - 0x27, 0x5a, 0x25, 0x72, 0x58, 0x47, 0x6d, 0xbf, 0x23, 0xdc, 0x48, 0x44, - 0x45, 0x95, 0xb1, 0x62, 0xf1, 0x7e, 0x4c, 0x95, 0x1c, 0xb4, 0x17, 0x8b, - 0x59, 0x2e, 0xf3, 0x4f, 0x45, 0x3b, 0x5d, 0x67, 0x92, 0x52, 0xd8, 0xc1, - 0x91, 0xfa, 0x53, 0xaa, 0x87, 0xc0, 0xa7, 0xb0, 0x9f, 0x10, 0xe8, 0xac, - 0x45, 0x52, 0xbb, 0x17, 0xee, 0xf6, 0x18, 0xbe, 0x02, 0x70, 0xce, 0x79, - 0x66, 0x72, 0xf9, 0xf6, 0xca, 0x66, 0xff, 0xa4, 0x9a, 0xd9, 0xb7, 0x07, - 0xa9, 0xc1, 0x23, 0x7e, 0x7b, 0x9c, 0xe3, 0x02, 0x7a, 0xcc, 0xa3, 0x67, - 0xb7, 0xb0, 0x37, 0xba, 0xae, 0x12, 0xda, 0x48, 0x6e, 0x7f, 0xde, 0x5f, - 0x75, 0x15, 0xca, 0xd2, 0x46, 0xdd, 0xb0, 0x82, 0xbf, 0x6d, 0xe9, 0x51, - 0x66, 0xa5, 0x9e, 0x0c, 0xd5, 0x03, 0xbd, 0x97, 0x0e, 0x1b, 0x88, 0xf6, - 0x61, 0x5a, 0x8b, 0xe0, 0xdd, 0x3e, 0x59, 0x4c, 0x35, 0xfd, 0xb0, 0x3b, - 0x79, 0x8c, 0x1c, 0x96, 0x97, 0x35, 0x62, 0x36, 0x62, 0x4c, 0x4b, 0x46, - 0xb1, 0x21, 0xf7, 0xf0, 0x34, 0xdc, 0xd9, 0x9f, 0xf8, 0x53, 0x7d, 0xca, - 0xbc, 0x4d, 0xaf, 0xf4, 0xb7, 0x2f, 0xa7, 0x5d, 0x18, 0xf9, 0x3b, 0xa9, - 0xb0, 0xbb, 0xdf, 0xfa, 0x28, 0x2b, 0x58, 0xce, 0x46, 0x01, 0x3f, 0x76, - 0xf2, 0x39, 0x45, 0x8b, 0x3c, 0xda, 0x62, 0x2b, 0x6b, 0xe1, 0x5f, 0x14, - 0xfc, 0x79, 0x17, 0x2d, 0xe2, 0xe5, 0x8c, 0xc5, 0xde, 0x91, 0xfd, 0xf5, - 0x6d, 0x9b, 0x6b, 0xbb, 0xb0, 0x13, 0xae, 0xbe, 0x1e, 0xa8, 0x8f, 0x3c, - 0xfd, 0x24, 0xbe, 0xb8, 0x39, 0x80, 0x03, 0x06, 0x8b, 0xff, 0xca, 0x90, - 0x88, 0x0f, 0x45, 0xc4, 0xeb, 0x50, 0x52, 0xf5, 0x00, 0x8c, 0x16, 0x9d, - 0x26, 0xaa, 0xec, 0xb1, 0x44, 0xd6, 0xfe, 0x67, 0xa3, 0xc1, 0xec, 0x4a, - 0x12, 0xa6, 0x7c, 0x7c, 0xc3, 0x46, 0x1c, 0x64, 0x61, 0x67, 0xec, 0xce, - 0x1e, 0xa2, 0xb4, 0xdd, 0x6e, 0x7f, 0x02, 0x14, 0xf4, 0x1c, 0x17, 0xa7, - 0x31, 0x9f, 0xc2, 0xc6, 0xc0, 0x21, 0x41, 0x88, 0x61, 0xd8, 0xca, 0x06, - 0xa5, 0xe4, 0xef, 0xa4, 0xaa, 0x4d, 0xa3, 0xad, 0x5f, 0xd4, 0x0c, 0x6b, - 0x14, 0x38, 0x2e, 0xe8, 0x87, 0x5a, 0x68, 0x10, 0x51, 0xd8, 0xbb, 0xa6, - 0xd9, 0xdc, 0xd3, 0x7f, 0x1f, 0xea, 0xa8, 0xcc, 0x3f, 0x43, 0xa4, 0x04, - 0x95, 0xb4, 0xde, 0x2f, 0x07, 0x5d, 0x91, 0x1c, 0x8e, 0xc3, 0xbc, 0xaa, - 0x46, 0x8a, 0xa8, 0x42, 0xa7, 0x2c, 0x0f, 0x1f, 0xb3, 0xe2, 0x8a, 0x0b, - 0xa0, 0x3f, 0xfb, 0x87, 0x9e, 0x42, 0xa5, 0x60, 0xce, 0x5a, 0x54, 0x91, - 0x26, 0x51, 0xea, 0x81, 0x6f, 0xf1, 0x54, 0x93, 0xe7, 0xa0, 0xf8, 0x64, - 0xab, 0x1d, 0x0d, 0x9d, 0x64, 0x6a, 0xd5, 0x19, 0x03, 0xbb, 0x94, 0x7f, - 0x0a, 0xb8, 0x6b, 0x87, 0xc3, 0x1a, 0x38, 0xe5, 0xe8, 0xba, 0x13, 0x17, - 0xeb, 0x13, 0xcc, 0xac, 0xcb, 0x1f, 0x96, 0x4c, 0x3b, 0x18, 0xfb, 0xe8, - 0x5c, 0x54, 0xce, 0x1a, 0x91, 0x44, 0xf5, 0x49, 0x6c, 0x38, 0x2a, 0x92, - 0x8a, 0x0d, 0x3d, 0x08, 0xc2, 0x5f, 0x6c, 0xac, 0x48, 0xb3, 0xdc, 0x2e, - 0xa6, 0x5a, 0xa8, 0xee, 0x22, 0x9a, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x96, 0xc5, 0x3a, 0x4e, 0x42, 0x7d, 0x27, 0xce, - 0x44, 0x84, 0xf1, 0x67, 0x8c, 0xc5, 0xdd, 0x75, 0x3b, 0x8a, 0xed, 0x2e, - 0x29, 0x62, 0x7b, 0xb0, 0xe6, 0xa3, 0xb4, 0x61, 0x73, 0x10, 0xff, 0x0e, - 0x0c, 0x98, 0x74, 0xef, 0xbb, 0xc4, 0xca, 0x03, 0x88, 0xa4, 0x96, 0x61, - 0xef, 0x36, 0x6d, 0xa2, 0xb1, 0xc8, 0xf0, 0xac, 0xf1, 0xb2, 0x08, 0x56, - 0xc7, 0x99, 0xcf, 0xae, 0x0a, 0x37, 0x85, 0x60, 0x78, 0x2d, 0x14, 0xda, - 0xb1, 0xa7, 0x00, 0xb6, 0x00, 0x04, 0x76, 0x80, 0x0e, 0x9f, 0x2a, 0x30, - 0x8b, 0x85, 0xd9, 0xc1, 0xaf, 0xee, 0x27, 0x80, 0x20, 0xed, 0xef, 0x25, - 0x5c, 0x98, 0x6b, 0xcc, 0xf8, 0x72, 0xfb, 0x3f, 0x13, 0xe6, 0x9b, 0x47, - 0xee, 0xa1, 0x18, 0x55, 0xa0, 0x68, 0xbe, 0xd4, 0x21, 0x59, 0x72, 0xa8, - 0xa4, 0xd2, 0x33, 0x57, 0x50, 0xfc, 0x6b, 0xa8, 0x49, 0x1b, 0x74, 0xdb, - 0x5a, 0x16, 0xb8, 0x52, 0x0c, 0xda, 0xa0, 0xa3, 0xff, 0x33, 0x56, 0x82, - 0x0f, 0x0a, 0x90, 0x82, 0xee, 0xf1, 0x1b, 0xb3, 0x05, 0x44, 0x39, 0x01, - 0xf7, 0x1e, 0xff, 0xcb, 0xea, 0xd0, 0xb6, 0x20, 0xbc, 0x84, 0xb1, 0xf9, - 0xa2, 0xc1, 0x56, 0xe6, 0xfa, 0x47, 0xc9, 0xfd, 0x45, 0x77, 0x51, 0x8e, - 0x01, 0xe4, 0x17, 0x20, 0x6f, 0x99, 0xe3, 0x90, 0x2f, 0xcc, 0xaf, 0xd9, - 0x61, 0x32, 0x91, 0x62, 0x58, 0xf4, 0x98, 0xf5, 0xf4, 0xeb, 0x13, 0xeb, - 0xdc, 0x8a, 0xac, 0xb2, 0x9e, 0xcf, 0xe7, 0xa7, 0xd4, 0x97, 0x22, 0x12, - 0x08, 0x10, 0x6d, 0x40, 0xea, 0x26, 0xea, 0x42, 0x29, 0x6e, 0x75, 0x62, - 0x47, 0x08, 0x17, 0xa8, 0x69, 0x0f, 0xf7, 0x35, 0x59, 0x23, 0x86, 0x83, - 0xfd, 0xb5, 0x61, 0x98, 0x9c, 0x4d, 0x37, 0xda, 0x9f, 0xfc, 0xfb, 0x16, - 0xb7, 0x6c, 0x52, 0xee, 0xa8, 0x9c, 0x3e, 0x93, 0x43, 0xc5, 0x2b, 0xd4, - 0xd0, 0x9f, 0x69, 0x2c, 0xc9, 0x1f, 0x2e, 0xdf, 0x5b, 0xe6, 0xc6, 0x5f, - 0x71, 0xd1, 0xd7, 0xb2, 0x8f, 0x3a, 0xba, 0x60, 0x75, 0x3d, 0x34, 0x41, - 0x43, 0x9b, 0x13, 0xc0, 0x3b, 0x30, 0xc5, 0xe9, 0x84, 0x81, 0xde, 0x85, - 0x4e, 0x65, 0x7b, 0x21, 0x37, 0xb8, 0xef, 0x24, 0x19, 0xaa, 0x26, 0x0c, - 0x27, 0xa7, 0xd9, 0x29, 0x47, 0x1a, 0x15, 0x42, 0x1e, 0x30, 0x79, 0x79, - 0x96, 0x09, 0x62, 0x26, 0xad, 0x98, 0x8b, 0xcb, 0x3d, 0xeb, 0x66, 0x83, - 0x77, 0xd9, 0x79, 0x4d, 0x05, 0x81, 0x72, 0xe9, 0xe0, 0x6f, 0x13, 0x00, - 0x7e, 0xa3, 0x92, 0x82, 0x1c, 0x90, 0x83, 0x4b, 0x15, 0x97, 0x0f, 0x92, - 0xe2, 0xd3, 0x3d, 0xd7, 0x6c, 0xb9, 0x60, 0x9a, 0x23, 0x52, 0xbe, 0x59, - 0xc9, 0x36, 0x9e, 0xf7, 0x77, 0x09, 0x79, 0x01, 0xcc, 0xec, 0x17, 0xd1, - 0x74, 0xbc, 0x58, 0x65, 0x45, 0x3c, 0x86, 0xf1, 0xbc, 0xbd, 0x95, 0x54, - 0x46, 0x45, 0x7b, 0x4c, 0xa2, 0xea, 0x2a, 0x6e, 0xa8, 0xd1, 0x66, 0x03, - 0xb2, 0x6a, 0xe0, 0xd3, 0x07, 0x8d, 0xe0, 0x09, 0x81, 0x42, 0xe3, 0x97, - 0xc4, 0xe7, 0x37, 0xc5, 0x82, 0xcf, 0xb1, 0xec, 0xba, 0xbd, 0xf4, 0xb6, - 0x41, 0xb2, 0xb8, 0xa6, 0x3a, 0x85, 0x4b, 0x4f, 0x46, 0x48, 0xe9, 0x9b, - 0x72, 0xf5, 0xb0, 0x64, 0x66, 0x75, 0x42, 0xb4, 0x00, 0xbe, 0x11, 0x6d, - 0x86, 0x93, 0x07, 0x50, 0xa7, 0xef, 0x55, 0x42, 0xcf, 0xe8, 0x61, 0xd0, - 0x9b, 0x11, 0x84, 0x8c, 0x74, 0xe4, 0xb8, 0x3f, 0x48, 0xb3, 0x61, 0xe3, - 0xea, 0x66, 0x86, 0x94, 0x95, 0x12, 0x77, 0x26, 0x75, 0x30, 0xb5, 0xd3, - 0x7a, 0xad, 0x2d, 0x58, 0x46, 0x1b, 0x4b, 0xd9, 0x2d, 0x1e, 0x0b, 0xff, - 0xd7, 0x03, 0x56, 0x3b, 0xbd, 0x65, 0xb0, 0xf9, 0xfe, 0x43, 0x1c, 0x9c, - 0x18, 0x82, 0x78, 0x5e, 0x06, 0x02, 0x21, 0x70, 0xb2, 0x7f, 0xb5, 0x63, - 0x71, 0x85, 0x95, 0x79, 0xae, 0x1e, 0xc6, 0x62, 0x7a, 0x7c, 0x63, 0x46, - 0x70, 0x1c, 0x58, 0x72, 0x1d, 0xde, 0xca, 0xb4, 0xfc, 0xc8, 0x56, 0x38, - 0x32, 0xf4, 0x0b, 0x56, 0x87, 0x6b, 0x5b, 0x53, 0xd2, 0x2c, 0x35, 0xef, - 0x5b, 0x33, 0x59, 0x13, 0x76, 0x82, 0x30, 0x80, 0x23, 0x10, 0x07, 0x4c, - 0x3f, 0xac, 0x9c, 0x58, 0x2d, 0x04, 0xe6, 0x6a, 0xd3, 0x5c, 0xf9, 0xb6, - 0x59, 0x4e, 0x85, 0xfe, 0x01, 0x71, 0xf0, 0xf7, 0xf2, 0x1f, 0x46, 0xd5, - 0x20, 0x3c, 0x9b, 0xc2, 0x1e, 0x73, 0x1c, 0x56, 0x9c, 0x76, 0x8c, 0x12, - 0x95, 0x51, 0xd4, 0x6f, 0x5b, 0x3a, 0xa7, 0x5f, 0xa7, 0xe4, 0xfa, 0xb7, - 0x1a, 0xdd, 0xb6, 0x4c, 0x01, 0x02, 0xae, 0x9c, 0x02, 0x0d, 0x66, 0x2f, - 0x40, 0x87, 0xa1, 0xbc, 0xf3, 0xde, 0xf4, 0xdb, 0x65, 0xee, 0xcc, 0xca, - 0xe1, 0x7a, 0xa2, 0xf4, 0xf7, 0xf5, 0x7c, 0x2a, 0x3f, 0xa4, 0x67, 0xbb, - 0x07, 0x50, 0x7a, 0x29, 0x8a, 0xcf, 0x2c, 0x7a, 0x0e, 0x0d, 0xc7, 0x95, - 0x8b, 0xf4, 0xe2, 0x50, 0xe1, 0xc1, 0x40, 0x16, 0x99, 0x5c, 0x72, 0xe7, - 0xe4, 0x01, 0xeb, 0x29, 0x6a, 0x99, 0xf2, 0x67, 0x23, 0x46, 0x1f, 0xaa, - 0xea, 0xc1, 0x51, 0x30, 0xeb, 0x7d, 0x34, 0x52, 0x91, 0x37, 0x2d, 0xc6, - 0x5c, 0x3a, 0x7c, 0x54, 0xc0, 0x79, 0xdc, 0xf9, 0xbf, 0x08, 0x2a, 0xf6, - 0xe1, 0x1e, 0xee, 0xc6, 0xd2, 0xe9, 0x30, 0x27, 0x60, 0x0c, 0xa2, 0x63, - 0x16, 0x06, 0x3d, 0xe2, 0xf5, 0x6f, 0xea, 0xe4, 0x4d, 0x9f, 0x2d, 0x36, - 0x62, 0x95, 0x47, 0x5d, 0x00, 0x22, 0x9f, 0x0c, 0xbb, 0x71, 0xad, 0xea, - 0xe7, 0x62, 0x59, 0x21, 0xd1, 0xaf, 0x04, 0x5a, 0xfc, 0x1f, 0x28, 0x6b, - 0x6f, 0x71, 0xec, 0xd4, 0xbd, 0x9c, 0x88, 0xfb, 0x3f, 0x04, 0xea, 0xd6, - 0xb2, 0x24, 0xe5, 0x28, 0xfe, 0xc5, 0x3e, 0x15, 0x00, 0x8c, 0xa2, 0xdf, - 0x18, 0x3d, 0x10, 0x9a, 0xb1, 0xcd, 0x64, 0xda, 0x87, 0x41, 0xc8, 0xa1, - 0x1c, 0x97, 0xd5, 0x44, 0xd9, 0x51, 0xd2, 0x96, 0xed, 0xad, 0x28, 0x1f, - 0x03, 0x89, 0x21, 0xbd, 0x79, 0x91, 0x48, 0x9c, 0x8e, 0x17, 0xfd, 0x36, - 0x72, 0xf6, 0x69, 0x4f, 0x3f, 0x02, 0x57, 0xcc, 0x3f, 0x1c, 0x49, 0x82, - 0x00, 0x45, 0x9e, 0x29, 0x83, 0x14, 0x12, 0xbb, 0xd2, 0xd0, 0x1a, 0x66, - 0x0f, 0x57, 0x24, 0xd4, 0x9f, 0x46, 0x0c, 0xf4, 0xb8, 0x28, 0x85, 0x52, - 0xe2, 0xa1, 0xc2, 0x3a, 0x8c, 0x34, 0x4a, 0x81, 0xe3, 0xbc, 0xa2, 0x67, - 0x67, 0x12, 0x13, 0xc4, 0xe7, 0xd7, 0x2c, 0x4e, 0xa9, 0xf5, 0xed, 0x63, - 0xf2, 0x18, 0x9c, 0x0c, 0xe2, 0x4d, 0x25, 0x23, 0x30, 0x3e, 0x49, 0x29, - 0xa6, 0x37, 0xdf, 0xc2, 0xdc, 0xf6, 0x5e, 0xae, 0x45, 0xd7, 0x8d, 0x56, - 0xba, 0x29, 0x4f, 0xee, 0xc9, 0x26, 0xd7, 0xbf, 0x10, 0x4d, 0x0a, 0x3b, - 0x3d, 0x1f, 0xd5, 0x72, 0xe1, 0xe6, 0xf5, 0x23, 0x4a, 0x17, 0x2d, 0xe4, - 0x40, 0x55, 0x9b, 0x39, 0x66, 0x36, 0xe4, 0x6d, 0x6d, 0xb6, 0x8d, 0x2a, - 0x7e, 0x76, 0x73, 0xa5, 0x86, 0x20, 0x3d, 0x18, 0xa0, 0x6c, 0x35, 0x59, - 0xc8, 0x1c, 0xef, 0x0f, 0x36, 0x1d, 0x6f, 0xba, 0x89, 0xb9, 0x9e, 0x7a, - 0x58, 0x1d, 0x43, 0xad, 0x85, 0x8b, 0x6b, 0xcc, 0x25, 0xb8, 0xe4, 0xdd, - 0xa1, 0x35, 0xd9, 0xef, 0xc4, 0xb1, 0xf6, 0x99, 0x27, 0x17, 0xb7, 0xbe, - 0xd1, 0x4f, 0xa1, 0x81, 0x4e, 0xb6, 0x19, 0xcd, 0xa0, 0x92, 0xeb, 0x56, - 0x41, 0x4f, 0x37, 0xca, 0x3b, 0x43, 0x85, 0x86, 0xdf, 0x5d, 0x5a, 0x8c, - 0xd4, 0x5b, 0xc4, 0x28, 0xdb, 0x16, 0xea, 0x3a, 0x2e, 0x9e, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xea, 0x59, 0x40, 0xc4, - 0x40, 0x8b, 0x6a, 0x8a, 0xb8, 0x7f, 0x1e, 0x0b, 0xfe, 0xab, 0xa4, 0xac, - 0x42, 0x91, 0xc5, 0xfa, 0x2c, 0x7e, 0xb4, 0xf9, 0x5c, 0xd5, 0x4c, 0x6a, - 0x74, 0x82, 0x90, 0x81, 0x96, 0xb0, 0xf4, 0xd4, 0xba, 0xc9, 0xa3, 0x2e, - 0x26, 0x0a, 0xc9, 0x55, 0x65, 0xac, 0xde, 0x83, 0x37, 0xec, 0x0e, 0xf6, - 0xdc, 0x8c, 0x34, 0xe6, 0x57, 0xde, 0x32, 0x0a, 0x02, 0x62, 0x4f, 0x6a, - 0x92, 0xa5, 0xb4, 0x40, 0xde, 0x57, 0xf4, 0xd1, 0xa3, 0x1c, 0xd3, 0xf7, - 0x4a, 0x15, 0xcc, 0x27, 0x26, 0x00, 0xba, 0xf3, 0xfa, 0x4e, 0xc6, 0xe9, - 0xc3, 0x05, 0x3d, 0x3a, 0x89, 0x96, 0x7d, 0x41, 0xac, 0xca, 0x28, 0x7f, - 0x69, 0x02, 0x40, 0x03, 0x93, 0x86, 0x85, 0x85, 0x73, 0x00, 0x09, 0x5a, - 0xcf, 0x5f, 0x1d, 0xaa, 0x46, 0x41, 0x9d, 0x08, 0xbf, 0xea, 0x45, 0x9b, - 0x93, 0xda, 0x9e, 0x81, 0xba, 0x9e, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x08, 0x00, 0x00, 0x6a, 0x1f, 0x9b, 0x03, 0xdd, 0xe4, 0x16, 0x07, - 0x7f, 0x5b, 0xb0, 0xee, 0xac, 0x55, 0xc4, 0x50, 0xe6, 0x2b, 0x17, 0xed, - 0x7f, 0x50, 0x4d, 0x71, 0x73, 0xae, 0xe0, 0x4d, 0xce, 0x08, 0xd9, 0x8b, - 0x83, 0x2c, 0x01, 0x48, 0x02, 0xd3, 0xbb, 0xca, 0x86, 0xd7, 0xca, 0x5f, - 0xc7, 0xce, 0x59, 0xdf, 0xc1, 0xcc, 0xf7, 0x7b, 0x54, 0xf8, 0x0d, 0x4f, - 0x81, 0x9e, 0x50, 0x6a, 0x65, 0x66, 0x4a, 0xec, 0x7a, 0x1b, 0x92, 0xb2, - 0x39, 0x8f, 0x5d, 0x41, 0x33, 0xcf, 0xe6, 0x1b, 0x34, 0x5d, 0xe1, 0xf6, - 0xef, 0xcb, 0xa0, 0x55, 0x7e, 0x1f, 0x45, 0x38, 0xb9, 0x56, 0x15, 0x3b, - 0x70, 0xab, 0xc8, 0x2f, 0x1c, 0xb9, 0x7d, 0x37, 0xe1, 0xb4, 0x03, 0x44, - 0x5a, 0xf6, 0x57, 0x97, 0x03, 0x54, 0x4c, 0x22, 0x88, 0xc3, 0x82, 0xfd, - 0x91, 0xc1, 0xf1, 0x63, 0xb4, 0x50, 0x46, 0x11, 0x64, 0x07, 0xfd, 0x85, - 0xe5, 0x78, 0x57, 0xdd, 0x19, 0x2a, 0x6b, 0x64, 0x3e, 0xec, 0xb8, 0xf3, - 0xb5, 0x95, 0x29, 0x72, 0xf1, 0x9d, 0xdd, 0xb9, 0xad, 0xd0, 0x78, 0x26, - 0x86, 0x10, 0x10, 0x19, 0xe4, 0x79, 0xae, 0xdc, 0x56, 0xb7, 0x54, 0x4f, - 0x94, 0xc6, 0x26, 0x9a, 0x93, 0xa8, 0x2e, 0x1b, 0x1c, 0xda, 0x87, 0x3a, - 0xa2, 0x44, 0xb9, 0x0b, 0x0f, 0xab, 0x70, 0x3b, 0xb7, 0x6c, 0xbf, 0x58, - 0x67, 0x32, 0x7d, 0xa3, 0x2a, 0xcb, 0x4e, 0x02, 0x92, 0xa1, 0x26, 0x0e, - 0x20, 0x5e, 0xb3, 0xec, 0xc4, 0x04, 0x5b, 0x7f, 0xe5, 0xbd, 0x30, 0xeb, - 0xc8, 0xdd, 0xf1, 0x72, 0x5a, 0x7e, 0xcb, 0x93, 0x22, 0xa0, 0x01, 0x9f, - 0xbb, 0x24, 0x9f, 0x50, 0x01, 0x1f, 0x24, 0x02, 0x85, 0x6d, 0xe6, 0x4d, - 0x55, 0xc4, 0x07, 0xe9, 0x87, 0x38, 0xbf, 0x1a, 0x3b, 0x05, 0x82, 0xc4, - 0x73, 0x4b, 0x87, 0x3c, 0xb4, 0x0a, 0x48, 0x8c, 0x06, 0x67, 0xe7, 0xbf, - 0xcc, 0xe7, 0xe5, 0xc3, 0xb2, 0x81, 0x60, 0xe2, 0xd1, 0xb1, 0x8f, 0x98, - 0xbd, 0x7d, 0xbd, 0x4e, 0x9a, 0xca, 0xbe, 0xcb, 0x81, 0x47, 0x25, 0xaa, - 0xfa, 0x91, 0xcf, 0x78, 0xce, 0xcb, 0x1a, 0x11, 0x79, 0xcf, 0x97, 0xa3, - 0x95, 0x95, 0x6f, 0xd7, 0xae, 0x80, 0xc9, 0xd5, 0x95, 0xb7, 0xcf, 0xe2, - 0x9d, 0x98, 0x65, 0x80, 0xfd, 0x2e, 0xee, 0x46, 0x5e, 0x46, 0x8c, 0xde, - 0x52, 0xb4, 0xdc, 0xce, 0xa8, 0xab, 0x4e, 0x0c, 0x12, 0x9f, 0x89, 0x9c, - 0x84, 0x80, 0xfe, 0x08, 0x64, 0x12, 0x12, 0x95, 0x62, 0xea, 0x65, 0xcc, - 0x34, 0x80, 0xcf, 0x92, 0x5f, 0xc2, 0xae, 0x76, 0xe7, 0x2f, 0xbb, 0xa8, - 0xdb, 0x6a, 0x66, 0x60, 0xaf, 0x88, 0xba, 0x65, 0x32, 0xcf, 0xf7, 0x6e, - 0xd8, 0xd0, 0x69, 0xb0, 0x12, 0x23, 0xd6, 0xc2, 0x32, 0xe5, 0x8e, 0x51, - 0xc5, 0x61, 0x28, 0x45, 0xf7, 0xf9, 0xea, 0x73, 0xce, 0x04, 0x2d, 0x56, - 0x43, 0x10, 0x8b, 0x4f, 0x6b, 0xfa, 0x32, 0xa8, 0x92, 0x8f, 0xd9, 0xb4, - 0xfd, 0xa4, 0x74, 0xa8, 0xea, 0xca, 0xd3, 0x84, 0xbb, 0x5a, 0x34, 0x57, - 0xf9, 0xda, 0x25, 0x40, 0x1f, 0x5e, 0xc2, 0x66, 0x43, 0x05, 0xdd, 0x13, - 0x88, 0x91, 0x60, 0xa1, 0x75, 0xd3, 0xc4, 0x27, 0xff, 0xda, 0x24, 0x3d, - 0xd9, 0xd7, 0x47, 0x46, 0x30, 0xd0, 0x76, 0xc4, 0x9e, 0x97, 0xe3, 0x43, - 0xd7, 0x45, 0xaf, 0x49, 0x36, 0xf2, 0x18, 0xdd, 0x3f, 0x86, 0x9a, 0xec, - 0x9a, 0x70, 0xeb, 0x5a, 0xe2, 0xa0, 0x4b, 0x45, 0x21, 0xb3, 0x32, 0x3d, - 0x0c, 0x8c, 0x03, 0x13, 0xae, 0x46, 0xb5, 0x1a, 0x0a, 0x03, 0x36, 0xfe, - 0xfe, 0xfa, 0xc9, 0x4d, 0x46, 0xf8, 0xfe, 0x6f, 0x99, 0x8c, 0xe4, 0x77, - 0x0c, 0x27, 0x59, 0xf7, 0xc3, 0xfc, 0x32, 0xb3, 0xa5, 0xae, 0xdc, 0x49, - 0xac, 0x31, 0x27, 0xa6, 0x14, 0x92, 0xfb, 0xe3, 0x69, 0x35, 0x8d, 0xa0, - 0x50, 0x55, 0x09, 0x90, 0xdf, 0x67, 0x08, 0x4c, 0x0e, 0xaf, 0x71, 0xc2, - 0xe8, 0xb8, 0xdc, 0x45, 0xe3, 0x6d, 0x58, 0x3f, 0x19, 0x8d, 0xcd, 0xeb, - 0xe3, 0x02, 0x49, 0xd8, 0xc8, 0x8b, 0x29, 0xb3, 0xef, 0x2b, 0xf0, 0x39, - 0x5c, 0x11, 0xaa, 0x52, 0x44, 0x0d, 0x1a, 0x3a, 0x7a, 0x62, 0xda, 0x6d, - 0xe3, 0xdd, 0x03, 0x30, 0x6d, 0x3e, 0x18, 0x30, 0x1d, 0xc0, 0xd0, 0x05, - 0x67, 0x98, 0xf5, 0x2a, 0xc7, 0xa1, 0x58, 0xd7, 0xf8, 0x6f, 0x7d, 0x07, - 0x59, 0x27, 0x95, 0xb9, 0x8d, 0x4d, 0xd7, 0xc8, 0x5e, 0x8b, 0x89, 0x14, - 0xb7, 0x1b, 0x35, 0xaa, 0x72, 0x02, 0x39, 0x3c, 0x41, 0x7c, 0x91, 0x93, - 0x81, 0xe1, 0xad, 0xbe, 0x77, 0x28, 0x80, 0xa2, 0x9c, 0xa8, 0x00, 0x18, - 0xa5, 0x70, 0xec, 0xec, 0x96, 0x95, 0x37, 0xa3, 0xee, 0x15, 0xa0, 0x69, - 0x0e, 0x05, 0xb5, 0xb4, 0xb6, 0xa7, 0x8b, 0xb9, 0x41, 0x88, 0x4f, 0x56, - 0x39, 0xa7, 0xbe, 0x24, 0xce, 0x4c, 0xe0, 0x9c, 0x24, 0x5a, 0xa1, 0xab, - 0xcd, 0x82, 0xf1, 0x16, 0x3f, 0xc0, 0xaf, 0xe1, 0x42, 0xe0, 0x7d, 0x1b, - 0xd9, 0x8f, 0xb8, 0x04, 0xa1, 0x88, 0xd9, 0xc3, 0xaf, 0x4f, 0xda, 0xfd, - 0x0b, 0x5c, 0xc3, 0x04, 0xf3, 0xdb, 0xe6, 0x76, 0x6e, 0xe9, 0xdc, 0xea, - 0x6f, 0xa2, 0xa5, 0x75, 0x2c, 0xc7, 0x91, 0x7d, 0x4b, 0xd5, 0x68, 0x55, - 0xbb, 0x2d, 0x14, 0xdb, 0x06, 0x76, 0xf7, 0xcc, 0x0a, 0x88, 0x6c, 0x2b, - 0xa1, 0x57, 0xd6, 0x15, 0x9c, 0x46, 0xcf, 0x5b, 0x6f, 0x9e, 0x7e, 0xc5, - 0x39, 0xda, 0x97, 0x26, 0x5e, 0xf5, 0x25, 0x06, 0xed, 0x8e, 0x9b, 0x1d, - 0x1b, 0x91, 0x07, 0x89, 0x08, 0xce, 0xd7, 0x38, 0x43, 0x64, 0x8e, 0xf5, - 0x3a, 0x52, 0x4a, 0xfb, 0x3e, 0xff, 0x2c, 0xb3, 0x78, 0x40, 0xb5, 0xdd, - 0xb2, 0x8a, 0xd3, 0x6a, 0xc5, 0xb0, 0xa3, 0x4a, 0xb8, 0xe7, 0x27, 0xa0, - 0x5a, 0x8f, 0x0f, 0xda, 0x53, 0x49, 0xc9, 0x77, 0x2a, 0xef, 0x78, 0xc6, - 0xec, 0xaf, 0x10, 0xe5, 0x71, 0xc5, 0x7a, 0x85, 0xdf, 0xb2, 0x85, 0x02, - 0xe3, 0x55, 0x7a, 0x91, 0x3a, 0x68, 0xb2, 0x9d, 0x3d, 0xd9, 0x01, 0xc5, - 0x5f, 0x3c, 0xa8, 0x1d, 0x99, 0xc6, 0xe7, 0xad, 0x09, 0xd1, 0x39, 0x3a, - 0x92, 0xc5, 0x77, 0x9c, 0xdf, 0x99, 0x56, 0x9f, 0xfe, 0xf8, 0xfd, 0xc8, - 0x4f, 0x19, 0xa3, 0xa0, 0xdf, 0xff, 0x17, 0xac, 0xa9, 0x03, 0x32, 0x85, - 0x4c, 0x29, 0xca, 0x89, 0x58, 0xdc, 0x88, 0xdd, 0xeb, 0x79, 0x68, 0x5e, - 0x0f, 0x37, 0x1a, 0xf7, 0x05, 0xfd, 0x39, 0x91, 0x25, 0x61, 0xf3, 0x04, - 0xda, 0x97, 0xfc, 0x7b, 0xcc, 0x40, 0x63, 0xfd, 0x5b, 0x3b, 0x27, 0x8e, - 0x92, 0x6d, 0x98, 0x0f, 0xcc, 0x9c, 0x9b, 0xda, 0xb2, 0xc6, 0xca, 0x56, - 0xff, 0x7e, 0xcc, 0xa2, 0xc0, 0x45, 0x3e, 0xf6, 0xdf, 0xa7, 0xe8, 0x2a, - 0xef, 0x0c, 0xde, 0xec, 0xa4, 0x1d, 0x2c, 0x3e, 0x03, 0xfd, 0xa4, 0x44, - 0x60, 0x4a, 0xf5, 0x83, 0x8f, 0x09, 0x2d, 0xe8, 0xd5, 0x46, 0xf6, 0x1c, - 0x2d, 0x39, 0x28, 0x0c, 0xdf, 0xa1, 0x2b, 0x05, 0x6e, 0x3c, 0x36, 0xdd, - 0x91, 0x81, 0x52, 0xf1, 0x56, 0xdc, 0xbb, 0x79, 0x62, 0xd8, 0x2e, 0x27, - 0x5d, 0x9f, 0x3c, 0xce, 0x81, 0x5c, 0x70, 0xe5, 0x4d, 0x33, 0x06, 0xd5, - 0x14, 0x04, 0xb7, 0xbc, 0x7b, 0x7a, 0xb4, 0xf7, 0x4a, 0x48, 0x8f, 0x97, - 0x85, 0x96, 0x69, 0xc9, 0x40, 0x52, 0xb1, 0x1c, 0x28, 0x82, 0xb3, 0x63, - 0xee, 0x94, 0x2f, 0xcb, 0x40, 0xad, 0xd7, 0x78, 0xb1, 0xc4, 0x21, 0x05, - 0x36, 0xd9, 0x46, 0xf0, 0x83, 0xcd, 0xee, 0x52, 0x7a, 0xa6, 0xa4, 0x40, - 0xb0, 0x2f, 0xf0, 0x1c, 0xfa, 0x42, 0x98, 0x54, 0x5b, 0xfe, 0x5e, 0xd6, - 0x84, 0x73, 0xca, 0x39, 0xbe, 0x87, 0xf2, 0x92, 0xee, 0x3d, 0x21, 0xcc, - 0x69, 0x81, 0xe5, 0xe8, 0x8a, 0xc3, 0x23, 0x64, 0x98, 0xd5, 0x1d, 0xcd, - 0x5c, 0x6c, 0x37, 0xc8, 0x8b, 0x08, 0x22, 0x12, 0x9f, 0x85, 0xc9, 0xed, - 0xb4, 0xa6, 0x07, 0xe1, 0x62, 0x79, 0x35, 0x5d, 0x26, 0x11, 0x4a, 0x6b, - 0x33, 0x37, 0x91, 0x78, 0xe8, 0xe2, 0xba, 0x8b, 0x8a, 0xb7, 0xbb, 0x0f, - 0xd2, 0xb3, 0xa2, 0x02, 0x0c, 0x57, 0x35, 0x99, 0x88, 0x6b, 0x9b, 0x64, - 0x79, 0x1f, 0x4a, 0x48, 0xd4, 0x3b, 0x5c, 0xeb, 0xb4, 0x83, 0xc3, 0xad, - 0x9c, 0x6a, 0xb0, 0xcf, 0x7f, 0x70, 0xe8, 0x22, 0x46, 0x25, 0xfe, 0x7e, - 0x02, 0x44, 0x83, 0x02, 0xb3, 0x08, 0x2e, 0x34, 0x08, 0x4b, 0xff, 0xa2, - 0xc1, 0x60, 0xbb, 0xd8, 0x89, 0x16, 0xf8, 0xaa, 0xab, 0xea, 0xf7, 0xa0, - 0x10, 0x9a, 0xc9, 0xe9, 0xa4, 0x81, 0xa7, 0x87, 0x32, 0x5b, 0xc1, 0xd0, - 0xd9, 0x70, 0x6f, 0xb6, 0x7c, 0x65, 0xd5, 0x0e, 0x65, 0x93, 0xfe, 0x6d, - 0x66, 0xaa, 0xab, 0xd0, 0x03, 0x07, 0xf2, 0xbe, 0x39, 0xd6, 0xc8, 0xac, - 0xf2, 0x06, 0x58, 0x58, 0x46, 0xc0, 0x1a, 0xbd, 0xa4, 0x96, 0x38, 0x31, - 0x32, 0x89, 0x04, 0xdf, 0xcd, 0x3c, 0x2e, 0x98, 0xb8, 0x39, 0xba, 0xe2, - 0xca, 0x6b, 0xd0, 0x53, 0xce, 0x4a, 0xc8, 0x95, 0x81, 0x84, 0x17, 0xce, - 0x7f, 0x1d, 0xc1, 0x5a, 0xc4, 0xc2, 0x73, 0x30, 0x6d, 0x0b, 0x8c, 0xf8, - 0x66, 0x38, 0x4e, 0xa3, 0x14, 0x84, 0x15, 0x36, 0x9e, 0x0d, 0x56, 0x6b, - 0xa6, 0x77, 0x65, 0xa4, 0x2c, 0x77, 0x00, 0x8b, 0x43, 0x57, 0xc6, 0x25, - 0xc5, 0xd0, 0x17, 0x79, 0x6b, 0x5d, 0xbc, 0xcd, 0xc8, 0x25, 0x8f, 0x20, - 0x09, 0xcc, 0xbd, 0x80, 0x10, 0xdf, 0x35, 0xf6, 0x9c, 0x04, 0x80, 0x23, - 0xdc, 0x97, 0xe0, 0xba, 0x29, 0x48, 0x2e, 0x95, 0x0f, 0xb1, 0x9b, 0xc7, - 0xe6, 0x0b, 0x89, 0x16, 0xe2, 0x81, 0x3b, 0x32, 0x69, 0xc4, 0xde, 0xc6, - 0x12, 0x09, 0x47, 0xff, 0x50, 0xe4, 0x45, 0xb7, 0x35, 0xd2, 0x61, 0x9b, - 0x52, 0x6e, 0xbe, 0xaf, 0xd2, 0xeb, 0x0c, 0x50, 0xf1, 0x57, 0x9f, 0x59, - 0xe1, 0xc1, 0x4f, 0x8c, 0x79, 0x07, 0x05, 0xce, 0x8d, 0x64, 0xb2, 0xf0, - 0xd3, 0x4f, 0xe1, 0x7b, 0xfa, 0x30, 0x0a, 0xc2, 0x5d, 0x0c, 0x47, 0x6c, - 0x17, 0x77, 0x1f, 0xe5, 0xd8, 0x14, 0xfd, 0xc1, 0x01, 0x70, 0x51, 0x60, - 0xb2, 0x20, 0xfd, 0x86, 0xbc, 0x19, 0x5e, 0x01, 0xa6, 0x19, 0x3a, 0x21, - 0xa5, 0x0a, 0x1c, 0xd9, 0xa9, 0x78, 0xbb, 0xc9, 0x01, 0x65, 0xe4, 0xb3, - 0x48, 0xb8, 0xe1, 0xe7, 0xb5, 0xf4, 0x4e, 0xa9, 0xb6, 0xe2, 0x5b, 0xeb, - 0xf5, 0x76, 0x06, 0x1a, 0xd9, 0x08, 0x40, 0xff, 0x72, 0xb2, 0xe3, 0x01, - 0x50, 0xb1, 0xad, 0xb3, 0xa3, 0xf6, 0xef, 0x72, 0x05, 0x0c, 0xf4, 0xce, - 0x24, 0x2c, 0x63, 0x89, 0x63, 0x9e, 0x21, 0xb8, 0xb0, 0xbe, 0xc7, 0x45, - 0xae, 0x47, 0x2b, 0x9e, 0x61, 0x81, 0x4c, 0x76, 0x96, 0x7b, 0x18, 0x37, - 0x74, 0xcb, 0x00, 0xef, 0x38, 0x72, 0x24, 0x0a, 0x63, 0xc1, 0x64, 0xd6, - 0x41, 0xc8, 0x6a, 0xf1, 0xe7, 0x11, 0x20, 0x4b, 0xc2, 0x95, 0x70, 0xb8, - 0xf8, 0x8f, 0xd9, 0xae, 0x8c, 0x12, 0xd8, 0x6f, 0x63, 0x30, 0xca, 0x56, - 0x46, 0x11, 0xda, 0x49, 0x1f, 0x84, 0x3d, 0xae, 0xab, 0x78, 0x29, 0x02, - 0x6c, 0x43, 0xa3, 0xef, 0x9d, 0x97, 0x59, 0x15, 0x53, 0xcd, 0xc7, 0x47, - 0x65, 0x30, 0xc7, 0xae, 0x31, 0x4a, 0x41, 0xb4, 0x66, 0x9c, 0xbb, 0x51, - 0x0b, 0xbd, 0xe2, 0x7d, 0x41, 0x2c, 0xd0, 0x75, 0x57, 0x93, 0xce, 0x2e, - 0xeb, 0x31, 0x7f, 0x56, 0xb2, 0xa4, 0x2b, 0x9f, 0xcc, 0xef, 0x6f, 0xf0, - 0x77, 0x19, 0xad, 0x4d, 0x2e, 0x37, 0x00, 0x75, 0x53, 0xae, 0x22, 0x44, - 0x69, 0x1c, 0x8a, 0x90, 0xf2, 0xcd, 0x0f, 0x6b, 0x37, 0xdb, 0xfd, 0x71, - 0x64, 0x80, 0xd8, 0x57, 0x1b, 0x8f, 0xff, 0x14, 0xd4, 0x5f, 0xe1, 0xd1, - 0x0f, 0x06, 0x13, 0x61, 0x29, 0xa9, 0x80, 0x9d, 0xc7, 0x8a, 0xa0, 0xb5, - 0xaa, 0xfc, 0xe0, 0xb4, 0xb4, 0xf0, 0x31, 0xf0, 0xec, 0x78, 0x03, 0x28, - 0xb9, 0xf7, 0xd9, 0xa7, 0xc8, 0xad, 0x2e, 0x16, 0xb8, 0x18, 0x82, 0x43, - 0x66, 0x8b, 0xae, 0xb2, 0x45, 0x2b, 0x0c, 0x9d, 0x69, 0xbd, 0x1b, 0xc5, - 0x20, 0xc6, 0x41, 0xe7, 0x4f, 0x4b, 0x7b, 0x46, 0x3d, 0x7a, 0x6d, 0x9f, - 0x13, 0x2e, 0x0f, 0xf3, 0x85, 0x3e, 0x5b, 0x12, 0xe5, 0xbf, 0x1b, 0x20, - 0xc3, 0x5f, 0x6b, 0xf7, 0xf7, 0xa3, 0xd7, 0x33, 0xd2, 0xcb, 0x18, 0xa5, - 0xa4, 0xa2, 0xd3, 0x59, 0x91, 0x9a, 0x04, 0xfa, 0x9d, 0xa5, 0x55, 0xad, - 0x09, 0x5a, 0x1e, 0x0b, 0x10, 0xd0, 0x46, 0x18, 0xe4, 0x09, 0xe8, 0x1b, - 0x44, 0xd3, 0x78, 0x45, 0xc0, 0xdf, 0xa2, 0xef, 0xfc, 0x59, 0x8a, 0x1b, - 0x22, 0x60, 0xc9, 0x58, 0x7d, 0x65, 0x45, 0xa9, 0xac, 0xd5, 0xd4, 0xc4, - 0x44, 0xd3, 0x08, 0x44, 0x40, 0x4d, 0x3d, 0x7e, 0x39, 0x81, 0x72, 0x15, - 0x49, 0xd7, 0x2c, 0xda, 0x33, 0xaf, 0xc5, 0xb5, 0x8a, 0x3c, 0xbf, 0x81, - 0x88, 0x4f, 0x12, 0xe4, 0xe8, 0xe6, 0x00, 0xb6, 0xd9, 0xcd, 0xb2, 0x70, - 0x08, 0x15, 0x72, 0xf6, 0x46, 0xc7, 0x98, 0x7c, 0x1d, 0x54, 0xd0, 0x66, - 0x2d, 0xa1, 0xd8, 0xda, 0xb0, 0xe5, 0x9f, 0xa3, 0x2f, 0x2c, 0xfb, 0x34, - 0xb3, 0x21, 0x8b, 0x61, 0xf4, 0xce, 0x60, 0x2b, 0xb5, 0x5e, 0x3d, 0x14, - 0x2c, 0xbe, 0x19, 0x9d, 0x5f, 0x01, 0xe1, 0x21, 0x34, 0x11, 0x6b, 0x10, - 0xd4, 0x17, 0x58, 0xb3, 0x0a, 0x30, 0xe4, 0x17, 0x51, 0x0b, 0xf2, 0xbb, - 0xa6, 0xb7, 0x00, 0xa2, 0xe8, 0xa5, 0xa3, 0x41, 0x1d, 0x65, 0x2d, 0x26, - 0x93, 0x26, 0x7d, 0xdc, 0xad, 0x6f, 0x83, 0xeb, 0x66, 0x55, 0xde, 0x60, - 0x21, 0x56, 0x19, 0x4f, 0x9b, 0x7b, 0x26, 0x4a, 0x80, 0xf5, 0xab, 0x8b, - 0xbf, 0xe4, 0xb1, 0xa1, 0xd6, 0x33, 0x32, 0xbf, 0x86, 0x8c, 0x3c, 0xd0, - 0x12, 0x03, 0xd4, 0xb9, 0x23, 0x54, 0x1b, 0x94, 0x2f, 0xa5, 0x34, 0x4d, - 0x59, 0x18, 0x33, 0x8e, 0x8c, 0xf7, 0x1f, 0xc9, 0x6d, 0x75, 0xfb, 0x2a, - 0x22, 0x6c, 0x64, 0xb7, 0x79, 0xd8, 0x3b, 0xf6, 0x4e, 0x98, 0xd8, 0xa8, - 0x2c, 0x06, 0xd1, 0x92, 0x32, 0x44, 0xec, 0x38, 0x40, 0x3b, 0x53, 0x16, - 0x40, 0x8f, 0x92, 0x72, 0x87, 0xa8, 0xb8, 0xc0, 0x8f, 0x25, 0x4c, 0x4f, - 0x24, 0xfc, 0x8d, 0xc6, 0xa6, 0xeb, 0x2f, 0xdf, 0x2f, 0x0d, 0x2f, 0xd3, - 0x6e, 0x70, 0x71, 0xfe, 0xf0, 0x2e, 0xe9, 0x84, 0xd3, 0xc1, 0xd1, 0x70, - 0x4b, 0x8f, 0x7b, 0x60, 0xb0, 0xb7, 0xe3, 0x79, 0x52, 0x6a, 0x6b, 0x26, - 0x03, 0x8f, 0x6a, 0x0f, 0x8d, 0x85, 0xd7, 0x5f, 0xf7, 0x39, 0x31, 0x0e, - 0x26, 0x73, 0x84, 0x3f, 0x9b, 0x10, 0x6f, 0x29, 0x63, 0x14, 0x36, 0xa2, - 0xec, 0x44, 0x7d, 0x84, 0xc6, 0x4a, 0xec, 0xfe, 0xac, 0xcb, 0xe4, 0xfa, - 0xf6, 0x68, 0x83, 0x68, 0xe0, 0x8f, 0xd3, 0x8a, 0x60, 0x73, 0xf1, 0x5c, - 0x71, 0x02, 0x0c, 0xa2, 0x88, 0x2c, 0xa2, 0x35, 0x35, 0x5c, 0x3f, 0xb1, - 0xbe, 0xb3, 0x6b, 0x5c, 0xe1, 0x78, 0x75, 0x40, 0x20, 0x87, 0x67, 0xca, - 0x07, 0x1c, 0x9c, 0x02, 0xc7, 0xf2, 0x9d, 0x1c, 0xda, 0x1b, 0x86, 0x1b, - 0xc6, 0xa6, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x93, 0xca, 0x30, 0xae, 0xea, 0x26, 0x6a, 0x1b, 0x15, 0x46, 0x0a, 0xe3, - 0x57, 0x23, 0x4c, 0x0c, 0x98, 0x8e, 0x3e, 0xbb, 0x43, 0x14, 0x73, 0xdf, - 0x17, 0x91, 0xe2, 0xee, 0x39, 0xf9, 0xc2, 0x2f, 0xdc, 0xad, 0x0e, 0x00, - 0xf5, 0xdd, 0xe3, 0x97, 0xba, 0x8c, 0xee, 0x53, 0xc4, 0x70, 0x37, 0x46, - 0xcf, 0x04, 0xc3, 0xc8, 0x56, 0x38, 0x2e, 0x39, 0x75, 0x32, 0x6d, 0x98, - 0xc4, 0x14, 0xae, 0xa4, 0x29, 0xa3, 0xc6, 0xb6, 0x66, 0x45, 0x48, 0xdf, - 0xc0, 0xa9, 0x4b, 0x4f, 0xef, 0xb9, 0xb4, 0x89, 0x0d, 0x64, 0x00, 0x5c, - 0xd1, 0xc8, 0x2b, 0xf7, 0xc5, 0x1a, 0x1b, 0x06, 0xb7, 0x49, 0xb1, 0xe3, - 0x4d, 0x87, 0xf9, 0x3f, 0xba, 0x39, 0xa3, 0x56, 0x7f, 0x43, 0xcc, 0x15, - 0x9c, 0x3d, 0xba, 0x71, 0x7b, 0xeb, 0x45, 0x0f, 0x15, 0x1b, 0x6c, 0x84, - 0x75, 0x6d, 0x43, 0x0b, 0x27, 0x12, 0x6b, 0xbc, 0x0a, 0x6d, 0xe4, 0xf6, - 0x4f, 0xc7, 0xbb, 0x9e, 0x91, 0xb5, 0x09, 0x5f, 0x79, 0x2a, 0xbf, 0xda, - 0x34, 0x91, 0x44, 0x47, 0x52, 0x64, 0x00, 0x89, 0x27, 0x17, 0x5c, 0xe9, - 0x90, 0x8b, 0xcb, 0xbe, 0x21, 0x47, 0x65, 0x1c, 0x54, 0x61, 0x48, 0x17, - 0x66, 0xb7, 0xa1, 0x60, 0x27, 0x31, 0x04, 0x42, 0x3b, 0x33, 0x3d, 0xda, - 0xf7, 0x61, 0x3d, 0x4b, 0x91, 0xa5, 0x74, 0x4b, 0xde, 0x16, 0xf2, 0x79, - 0x3e, 0xf7, 0x89, 0x87, 0xb3, 0xdd, 0xa2, 0x49, 0xd7, 0x54, 0x1b, 0x39, - 0xff, 0xb5, 0xec, 0x9d, 0x1d, 0x09, 0x7e, 0x5a, 0x3c, 0xd1, 0xdc, 0x0e, - 0x2a, 0x0e, 0x2c, 0x40, 0x4e, 0xa5, 0x8c, 0x9d, 0xc8, 0x9b, 0xa5, 0xb2, - 0x40, 0xa4, 0xaa, 0x3b, 0xac, 0x93, 0x19, 0xf7, 0xa1, 0x8b, 0xf8, 0x4a, - 0x40, 0x08, 0x5d, 0x1d, 0xb0, 0xae, 0x0f, 0x67, 0xa7, 0x21, 0xaf, 0xe3, - 0xb1, 0xfc, 0xff, 0xa0, 0x95, 0x66, 0x2b, 0xf7, 0x82, 0x2d, 0x8a, 0x26, - 0x0f, 0xc3, 0xed, 0x62, 0xb6, 0xcb, 0x4c, 0x86, 0xe9, 0x20, 0x78, 0x3f, - 0x08, 0x53, 0x8f, 0x41, 0xf1, 0xa1, 0x04, 0x77, 0xd9, 0xe6, 0xea, 0x26, - 0x6d, 0x33, 0x48, 0xb3, 0xbb, 0xed, 0xfc, 0xd7, 0xa3, 0x2b, 0xe2, 0x39, - 0xcf, 0x78, 0x4e, 0x11, 0x26, 0xad, 0x39, 0x83, 0x6e, 0x72, 0xbf, 0xc6, - 0x34, 0x23, 0x97, 0x5d, 0x7b, 0x64, 0x1e, 0x78, 0x00, 0x34, 0x92, 0x5d, - 0x3f, 0x23, 0x28, 0x60, 0x7f, 0x88, 0xf0, 0xca, 0x96, 0x4a, 0x15, 0xbf, - 0x8a, 0xb7, 0xd0, 0xd9, 0x99, 0x8b, 0xdb, 0x26, 0xdc, 0x7e, 0x8d, 0x35, - 0x53, 0x60, 0x07, 0x85, 0x80, 0xc4, 0x9c, 0x0d, 0x81, 0xe2, 0x93, 0x85, - 0x76, 0x2d, 0x85, 0x21, 0x6e, 0xda, 0x29, 0xe5, 0xb1, 0x08, 0x46, 0x09, - 0x1b, 0x8a, 0xd9, 0xd2, 0xd7, 0x16, 0x74, 0xee, 0x26, 0x3e, 0xc4, 0x8c, - 0x2e, 0x6b, 0x0c, 0xbc, 0x95, 0xea, 0x4a, 0xb2, 0xd6, 0x6f, 0x43, 0xd1, - 0x3a, 0x8f, 0xbd, 0x77, 0xb4, 0x67, 0x63, 0x6b, 0xd2, 0xe0, 0xf0, 0x81, - 0x74, 0xb7, 0xc5, 0x11, 0x60, 0x10, 0x6b, 0xc6, 0x0f, 0xfd, 0x84, 0x2e, - 0x5c, 0x8f, 0x3b, 0xf5, 0x68, 0xa7, 0x62, 0xc6, 0x4f, 0xa6, 0xee, 0x19, - 0x44, 0xea, 0xc0, 0xe4, 0x64, 0x12, 0x71, 0x2f, 0xfb, 0xa3, 0x4d, 0xb0, - 0x8e, 0x5e, 0xe1, 0x79, 0x65, 0xd4, 0xf3, 0xed, 0x73, 0x04, 0xf1, 0x6d, - 0xc6, 0x75, 0x54, 0x28, 0x13, 0xe2, 0xd6, 0xa1, 0x26, 0xf9, 0xa4, 0x29, - 0x20, 0x5b, 0xd0, 0x3c, 0x3d, 0xf3, 0x7a, 0x18, 0x9a, 0x3d, 0xec, 0x6a, - 0x4c, 0xfd, 0xa5, 0x00, 0xdf, 0xec, 0xfd, 0x64, 0x38, 0x66, 0xa7, 0xba, - 0x59, 0xb3, 0x9b, 0x9c, 0x44, 0xfb, 0x10, 0x08, 0xb8, 0x79, 0xea, 0x85, - 0xbf, 0xa4, 0x14, 0xce, 0xce, 0x85, 0x22, 0x3f, 0x16, 0x00, 0x1c, 0x57, - 0xc8, 0x5a, 0x1b, 0xf5, 0xff, 0xde, 0x7e, 0xa9, 0xcc, 0xf3, 0xb5, 0x1d, - 0x57, 0x06, 0xda, 0xbb, 0x6c, 0x0a, 0x1e, 0xd4, 0x09, 0x74, 0x84, 0x1d, - 0xfa, 0xdf, 0x33, 0x1e, 0xe2, 0x8f, 0x10, 0xf7, 0x73, 0xab, 0x71, 0xb8, - 0x64, 0xce, 0xc0, 0x49, 0xc0, 0x36, 0xd3, 0x39, 0x31, 0x4c, 0x12, 0x5b, - 0xf3, 0xf9, 0xb4, 0x2c, 0x88, 0xba, 0xd4, 0x1a, 0xbd, 0x0c, 0x99, 0xbd, - 0x0e, 0xad, 0x51, 0xe0, 0xca, 0xdb, 0x25, 0x66, 0x83, 0xe0, 0x55, 0x18, - 0xeb, 0xa6, 0x4e, 0x56, 0xcb, 0x2f, 0xa5, 0xf2, 0x42, 0x7a, 0xa1, 0x05, - 0xf0, 0x3a, 0x71, 0x5a, 0x78, 0x3a, 0x7a, 0x6d, 0x12, 0x9f, 0x43, 0xc5, - 0xcc, 0xb3, 0xfd, 0xf2, 0xbf, 0x05, 0x16, 0xef, 0x07, 0xf9, 0xde, 0x0d, - 0x51, 0xf0, 0x33, 0x86, 0x43, 0x57, 0x40, 0xbc, 0xa9, 0xbd, 0xa0, 0x23, - 0xff, 0xbb, 0xe6, 0x15, 0xa1, 0xeb, 0xe9, 0x78, 0x0d, 0x72, 0x76, 0xf2, - 0xb6, 0x6e, 0x46, 0xe2, 0x86, 0xab, 0x3c, 0x52, 0x2c, 0xc6, 0x77, 0xdd, - 0x57, 0xf7, 0x4d, 0x36, 0xbb, 0x41, 0x08, 0x21, 0xaa, 0xe6, 0x44, 0x50, - 0xed, 0xaf, 0x18, 0xb3, 0xdd, 0x6b, 0x57, 0x46, 0x9e, 0x44, 0x93, 0x20, - 0xe0, 0x62, 0x95, 0xcd, 0xcf, 0xe4, 0x96, 0x92, 0xc3, 0x0d, 0x16, 0xb2, - 0xc3, 0xf4, 0x0f, 0x3f, 0x87, 0x17, 0xb9, 0x7b, 0x60, 0x60, 0xfa, 0xfb, - 0x81, 0x5c, 0xb3, 0xb7, 0x89, 0x73, 0xf7, 0x35, 0xf7, 0x27, 0xf1, 0x0e, - 0xa4, 0xa1, 0xba, 0xea, 0x6a, 0xe3, 0x5c, 0x0f, 0xf7, 0x15, 0xbc, 0x28, - 0x57, 0x27, 0x8f, 0xd8, 0xca, 0x82, 0x19, 0xd0, 0xa3, 0x9d, 0xe5, 0xe0, - 0x44, 0xbf, 0x78, 0xa4, 0x09, 0x69, 0x27, 0xa0, 0x69, 0xb5, 0xd4, 0xbe, - 0x00, 0xe6, 0x03, 0x97, 0xbc, 0x8b, 0xfc, 0x25, 0x70, 0xb3, 0x49, 0x30, - 0xe3, 0x24, 0x19, 0x77, 0xb4, 0x93, 0x46, 0x03, 0xe6, 0x22, 0xaf, 0x76, - 0xd2, 0x90, 0x00, 0x05, 0x46, 0xb8, 0xa4, 0xf5, 0x4c, 0xaa, 0x04, 0x63, - 0xa0, 0x57, 0xe0, 0x20, 0x6e, 0x1a, 0xed, 0x21, 0x86, 0xd0, 0x38, 0x5b, - 0xe6, 0xa7, 0xb0, 0xe7, 0x75, 0xe3, 0x76, 0xb3, 0x15, 0x8b, 0xdc, 0x10, - 0x52, 0x15, 0x21, 0x7b, 0xd0, 0xc4, 0x75, 0x26, 0x1d, 0x6e, 0x0d, 0x4c, - 0x08, 0x5b, 0x95, 0x9a, 0xd0, 0xda, 0xbe, 0x23, 0x98, 0xde, 0x60, 0x2a, - 0xe9, 0xa4, 0x92, 0xf0, 0x92, 0x84, 0xdc, 0x86, 0x60, 0xf5, 0x23, 0x31, - 0xf5, 0xe9, 0xd6, 0x00, 0xc1, 0x78, 0xab, 0x05, 0x94, 0xd3, 0x47, 0x4d, - 0x32, 0x0f, 0x82, 0xa0, 0x99, 0x0b, 0xfe, 0x6b, 0x58, 0xf9, 0x24, 0xf6, - 0x17, 0xa0, 0x5f, 0x24, 0x6a, 0xc6, 0x01, 0xa8, 0xfa, 0xca, 0xdc, 0xb6, - 0x83, 0xcb, 0xd2, 0x3b, 0xb7, 0x0b, 0x04, 0x3e, 0x6a, 0xaf, 0x23, 0x17, - 0x3e, 0x14, 0xce, 0x52, 0x1c, 0xe3, 0x06, 0x66, 0x29, 0x17, 0x6f, 0x7e, - 0x66, 0x06, 0xa9, 0x68, 0x7f, 0xca, 0xad, 0xa8, 0xb7, 0x2d, 0xa4, 0x5d, - 0xa6, 0x16, 0xcd, 0xed, 0xee, 0x14, 0x96, 0xc8, 0x12, 0x69, 0x4e, 0x70, - 0x72, 0x2a, 0x75, 0x82, 0x08, 0x3f, 0x3e, 0x27, 0xa0, 0xea, 0x43, 0x84, - 0xa9, 0x9a, 0x91, 0x87, 0x4f, 0x20, 0x61, 0x55, 0x8d, 0x70, 0xad, 0x6c, - 0x59, 0x5d, 0x13, 0x80, 0xbb, 0x52, 0x55, 0x81, 0x8b, 0x59, 0x94, 0x0f, - 0xc2, 0x54, 0x79, 0x59, 0xe8, 0x9d, 0x58, 0xe5, 0x91, 0x10, 0xb3, 0xef, - 0x1c, 0xda, 0xaa, 0xdd, 0x91, 0x0b, 0xb0, 0x14, 0x3b, 0xad, 0x02, 0x98, - 0x40, 0x3c, 0x54, 0xc4, 0x23, 0xb9, 0x40, 0x54, 0x7e, 0x88, 0x10, 0x3e, - 0x24, 0xe5, 0xf6, 0xdf, 0x5c, 0x9e, 0x7a, 0x9f, 0xd0, 0xff, 0x5e, 0x9c, - 0xb6, 0x30, 0x17, 0x94, 0xd2, 0xaa, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x96, 0xff, 0x2f, 0x01, 0x60, 0x2c, 0x1b, 0xe3, - 0xc6, 0xcb, 0xa4, 0x41, 0xa1, 0x44, 0x13, 0x14, 0xe2, 0x44, 0x77, 0x1c, - 0x96, 0xe8, 0xe6, 0x4f, 0x70, 0x99, 0x3a, 0xef, 0xa1, 0x6f, 0x1f, 0x7f, - 0xb9, 0xe9, 0x1e, 0x35, 0x37, 0x5b, 0x94, 0x90, 0x78, 0xcc, 0x8d, 0xcd, - 0x6c, 0x9f, 0xf6, 0x73, 0xed, 0x23, 0xa2, 0x28, 0x64, 0x58, 0x50, 0x64, - 0x05, 0xbc, 0xc9, 0x9b, 0x5a, 0xec, 0x3f, 0x2b, 0x61, 0xcf, 0xa7, 0x35, - 0x56, 0x8c, 0x77, 0x68, 0xd6, 0xcf, 0x9b, 0xc5, 0x62, 0xee, 0x3a, 0xb2, - 0xfe, 0x78, 0xba, 0x02, 0xe7, 0x26, 0x8a, 0x89, 0x30, 0x19, 0xcc, 0xb0, - 0x98, 0xbf, 0x30, 0x2c, 0xae, 0x13, 0x6c, 0x93, 0x86, 0x19, 0x84, 0x13, - 0x01, 0x2f, 0x39, 0x4e, 0x33, 0xd1, 0x15, 0x99, 0xf7, 0x1e, 0xb8, 0x86, - 0xdb, 0xb6, 0xf9, 0x56, 0x42, 0x0e, 0x4a, 0xb1, 0x5e, 0xf0, 0x9a, 0x06, - 0x5e, 0xab, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, - 0xcd, 0xde, 0xad, 0x40, 0x34, 0xcd, 0x79, 0x0a, 0x29, 0x84, 0x05, 0x3f, - 0xb5, 0xbe, 0x49, 0x84, 0x43, 0xcc, 0xa6, 0xe3, 0xe9, 0xdc, 0x84, 0x14, - 0xe7, 0xb3, 0x1b, 0x96, 0xe8, 0xda, 0x35, 0x15, 0x38, 0xf5, 0xb3, 0xb5, - 0x91, 0xc3, 0xc3, 0x94, 0xc6, 0x79, 0xeb, 0xf5, 0x22, 0x78, 0xf0, 0x0b, - 0xda, 0xb0, 0x91, 0xa7, 0x43, 0x71, 0x8e, 0xa6, 0x52, 0x0f, 0x81, 0x06, - 0xc8, 0xdf, 0xb5, 0x1f, 0x92, 0xb0, 0xfe, 0x93, 0x38, 0x4c, 0xf4, 0x17, - 0x66, 0x31, 0xea, 0x08, 0x72, 0xb9, 0xaa, 0xfd, 0x40, 0x8d, 0xbf, 0x56, - 0x19, 0xb1, 0xb5, 0x8e, 0x4e, 0x4e, 0x73, 0x7f, 0x4b, 0x0c, 0x70, 0x94, - 0x7c, 0x9f, 0xfc, 0x23, 0x35, 0xba, 0xd2, 0x23, 0x88, 0x1d, 0x83, 0x28, - 0x45, 0xd7, 0x1b, 0x63, 0xfb, 0x36, 0x86, 0x06, 0xf3, 0x99, 0x81, 0x6e, - 0xd7, 0xf1, 0xd4, 0x53, 0x6d, 0x30, 0x3c, 0x8d, 0xac, 0xc6, 0x9a, 0xd5, - 0xe8, 0x4f, 0x11, 0x58, 0xba, 0xfd, 0x67, 0x06, 0xe7, 0x1a, 0xb4, 0xa1, - 0x45, 0x13, 0xf2, 0x3b, 0xdc, 0x71, 0xf0, 0xc6, 0x53, 0xfc, 0x8b, 0x2f, - 0x14, 0xe4, 0xe0, 0xd6, 0x8c, 0x96, 0x4c, 0x48, 0xc0, 0x30, 0x6e, 0x00, - 0x0f, 0x42, 0xfe, 0xa7, 0x9d, 0x0f, 0xf2, 0x52, 0x58, 0xf9, 0x35, 0x33, - 0x99, 0xda, 0xd5, 0x9d, 0x61, 0x26, 0x6b, 0x80, 0xff, 0x08, 0x51, 0x54, - 0x26, 0xfa, 0x8d, 0xfc, 0x67, 0x60, 0x93, 0x0e, 0xcd, 0x78, 0x41, 0x5a, - 0x31, 0x47, 0x14, 0xb0, 0x65, 0x89, 0x30, 0xcb, 0x0c, 0xc5, 0xa0, 0x37, - 0xa8, 0xe0, 0xcf, 0x24, 0xa4, 0x2f, 0xad, 0xa7, 0x9c, 0xa2, 0xe8, 0x81, - 0x17, 0xbe, 0x2f, 0xd5, 0xd1, 0xa8, 0xff, 0x9d, 0x5e, 0x7f, 0xd9, 0x6c, - 0x56, 0xe6, 0xc4, 0x60, 0x8d, 0xa5, 0x47, 0x5e, 0x43, 0x1e, 0x34, 0x23, - 0xb3, 0x6a, 0xdf, 0x6c, 0xf8, 0xd1, 0x85, 0x11, 0xaa, 0x74, 0x85, 0x71, - 0x27, 0xc5, 0x80, 0x37, 0x60, 0xb4, 0x2b, 0x53, 0x5a, 0xc4, 0x35, 0xd1, - 0xe8, 0x4b, 0x01, 0x58, 0x1f, 0xdb, 0x73, 0xf3, 0x2c, 0x8b, 0xbb, 0x17, - 0x36, 0x76, 0x35, 0x6b, 0xa0, 0x82, 0x47, 0xf5, 0x16, 0x21, 0x41, 0x43, - 0xc9, 0x1f, 0x53, 0xf9, 0xe9, 0x47, 0xf0, 0x9c, 0x6d, 0xe3, 0x23, 0x59, - 0x74, 0xdc, 0x1a, 0x8f, 0x4e, 0x6c, 0x71, 0x83, 0x7e, 0xd0, 0x2b, 0x50, - 0x44, 0x86, 0x5f, 0xbf, 0x60, 0x92, 0xeb, 0x9a, 0x9b, 0xa2, 0xc9, 0x2b, - 0xa8, 0xc4, 0x77, 0x4e, 0x3f, 0xf8, 0xa6, 0x39, 0x50, 0x5c, 0x7e, 0x2a, - 0x70, 0xb0, 0x5d, 0x28, 0xb2, 0x81, 0xa9, 0xaf, 0x16, 0x5e, 0x27, 0xeb, - 0x03, 0x0e, 0x82, 0xad, 0x28, 0x51, 0x16, 0xd1, 0xf4, 0x58, 0x75, 0x1a, - 0xf9, 0x6a, 0xbf, 0x73, 0xd7, 0x84, 0x07, 0x7f, 0x4c, 0x4e, 0x29, 0x02, - 0x9b, 0x60, 0x81, 0x85, 0xa9, 0xbf, 0xc7, 0xa0, 0x8f, 0x8a, 0xdc, 0xa4, - 0xc5, 0x17, 0x51, 0x24, 0x15, 0x28, 0x9e, 0x5e, 0x78, 0x84, 0x21, 0x02, - 0xca, 0x26, 0x61, 0x4e, 0x95, 0xa6, 0x8d, 0xa6, 0x98, 0x7d, 0x1f, 0x84, - 0x19, 0x24, 0x8b, 0x31, 0x76, 0x89, 0x2a, 0x5f, 0xa9, 0xfb, 0xaa, 0x8a, - 0x8c, 0xce, 0xe4, 0x30, 0xd6, 0xec, 0x5b, 0x39, 0xb7, 0x09, 0x80, 0x23, - 0x4c, 0xe1, 0x6e, 0x8f, 0x7c, 0x10, 0xe8, 0x8a, 0x60, 0x35, 0xd7, 0xa3, - 0xe0, 0x5f, 0xcd, 0xfa, 0x3d, 0x8f, 0xd8, 0x5d, 0xec, 0xc9, 0xc5, 0xa0, - 0x73, 0x41, 0x89, 0xe5, 0x39, 0xf2, 0x42, 0xff, 0x08, 0xa0, 0x12, 0xb7, - 0x4a, 0x5e, 0x46, 0x06, 0x31, 0xbd, 0x88, 0x5e, 0x9e, 0x05, 0x17, 0x51, - 0xb3, 0xe7, 0x88, 0x10, 0x19, 0x32, 0xff, 0x8a, 0x1e, 0xce, 0x66, 0xbc, - 0x84, 0x1f, 0xed, 0x52, 0x52, 0x77, 0xe1, 0x5e, 0xa6, 0x21, 0xe4, 0xad, - 0x59, 0xca, 0xa3, 0x77, 0xea, 0x66, 0x28, 0x15, 0x73, 0x3a, 0xfd, 0xe4, - 0x75, 0x46, 0x99, 0x59, 0x5c, 0x7a, 0x9b, 0x9d, 0x11, 0xb4, 0x76, 0x45, - 0x06, 0x45, 0x41, 0x1e, 0x94, 0xb7, 0xd9, 0xb8, 0xcb, 0xbf, 0x71, 0xec, - 0xba, 0x9f, 0x4a, 0x1b, 0xbc, 0xfd, 0x5c, 0x06, 0x64, 0xfd, 0x31, 0x52, - 0xc0, 0xe4, 0xa7, 0x21, 0x2f, 0x22, 0x92, 0xf0, 0x51, 0x33, 0x92, 0x1d, - 0x40, 0x3c, 0x01, 0x81, 0x3b, 0xa8, 0x2e, 0x4e, 0xb6, 0x60, 0xcd, 0xd4, - 0x36, 0x3b, 0x2e, 0x1d, 0x5e, 0x43, 0xd9, 0x94, 0xf1, 0x51, 0xd3, 0x59, - 0x94, 0x6a, 0xd5, 0x5f, 0x1f, 0xd3, 0xa6, 0x55, 0xda, 0x15, 0xf1, 0x3e, - 0x2c, 0x60, 0xb8, 0xc3, 0xda, 0x0e, 0x56, 0x53, 0xea, 0xcd, 0x39, 0x27, - 0x94, 0x86, 0x94, 0xb2, 0x5b, 0xd8, 0x9a, 0x12, 0x94, 0xb0, 0xb6, 0x77, - 0x28, 0xba, 0xde, 0xb6, 0x60, 0x4d, 0x2b, 0x6e, 0x3d, 0xf6, 0xf1, 0x48, - 0xf7, 0x77, 0xa1, 0x49, 0xe0, 0x9f, 0x1e, 0xc9, 0xe6, 0xcb, 0x95, 0x26, - 0x61, 0x5a, 0xc9, 0xed, 0x49, 0x40, 0x17, 0x57, 0x15, 0xfc, 0x3c, 0xb8, - 0x28, 0x79, 0xb8, 0x42, 0x2a, 0xf9, 0xd4, 0x19, 0xb9, 0x5f, 0x41, 0xc2, - 0x25, 0xd7, 0x88, 0x34, 0xb3, 0x25, 0x4e, 0xca, 0xff, 0x9e, 0x59, 0x9a, - 0x33, 0xc8, 0x12, 0xf9, 0xd5, 0x70, 0xc0, 0x8b, 0x43, 0x13, 0xc4, 0x8d, - 0x45, 0x99, 0xaa, 0xd7, 0xeb, 0xb1, 0xe9, 0xb7, 0x5b, 0xab, 0x48, 0xd1, - 0x26, 0x60, 0x8c, 0x13, 0x55, 0x8a, 0x41, 0xd3, 0x68, 0x58, 0xd4, 0xa6, - 0x30, 0x6e, 0x88, 0x3e, 0x81, 0x6e, 0x61, 0x06, 0x13, 0x66, 0xd5, 0x8e, - 0x5d, 0x87, 0x4f, 0xd9, 0xb1, 0x66, 0xb3, 0xc5, 0x88, 0xa9, 0xc0, 0x73, - 0xcb, 0x7f, 0x42, 0xec, 0x96, 0x64, 0xad, 0x72, 0x85, 0x72, 0xaf, 0xeb, - 0xa9, 0xc4, 0x17, 0x86, 0xab, 0xe7, 0x23, 0xd7, 0x96, 0xf7, 0xb2, 0xb3, - 0x51, 0xe1, 0x9a, 0x3b, 0x0e, 0xaf, 0x89, 0xca, 0x7b, 0xf1, 0x70, 0x7b, - 0xc7, 0x82, 0xfc, 0xc7, 0x6c, 0x37, 0xd9, 0x7b, 0x82, 0x0f, 0x94, 0xcf, - 0xd1, 0xa9, 0x33, 0xc2, 0xa4, 0xab, 0xed, 0xad, 0xee, 0x64, 0x5d, 0x04, - 0xf2, 0xcb, 0x8e, 0x99, 0x22, 0x33, 0x69, 0x85, 0x85, 0xb6, 0x1a, 0x9b, - 0x09, 0x18, 0xbe, 0xcd, 0x63, 0xf6, 0x5d, 0x52, 0xbc, 0x26, 0x99, 0x3e, - 0x52, 0xe5, 0x0c, 0xc5, 0xee, 0xdd, 0xbb, 0x07, 0xbc, 0x38, 0xc1, 0x67, - 0x96, 0x8c, 0xe6, 0xe4, 0x18, 0xfa, 0x07, 0x91, 0x48, 0xef, 0x9c, 0x70, - 0x9d, 0x5b, 0x1c, 0x0e, 0xd5, 0xd3, 0x59, 0xee, 0x44, 0x13, 0xf7, 0x00, - 0xa6, 0x20, 0xad, 0x65, 0x1d, 0xb7, 0x96, 0x2f, 0x79, 0x7b, 0x04, 0xa3, - 0x10, 0x90, 0x29, 0x8c, 0xa3, 0x2e, 0x14, 0x39, 0xd3, 0xe4, 0x6e, 0x46, - 0xf7, 0x6e, 0x96, 0x68, 0xd9, 0xef, 0x45, 0xf7, 0x3c, 0xcd, 0xc7, 0xca, - 0x33, 0x64, 0x8e, 0x31, 0x80, 0x48, 0x7b, 0x7c, 0x81, 0x9a, 0x48, 0xff, - 0xd5, 0x0d, 0x74, 0xe7, 0x77, 0x46, 0x61, 0x9b, 0xde, 0xed, 0x83, 0xe9, - 0x4f, 0x92, 0xc1, 0x16, 0xad, 0x44, 0x40, 0x23, 0xce, 0x04, 0x31, 0xbf, - 0xcf, 0xe2, 0x5a, 0x68, 0x5a, 0xf4, 0x0f, 0xe1, 0x87, 0x79, 0xb0, 0x32, - 0x0b, 0x09, 0x6b, 0x72, 0x2b, 0x16, 0x06, 0x67, 0x82, 0x0b, 0x92, 0x35, - 0xdb, 0x4c, 0xe2, 0x4a, 0x60, 0x99, 0xaf, 0x52, 0x10, 0x4b, 0xa5, 0xcf, - 0xac, 0x66, 0x49, 0x56, 0x04, 0xc0, 0xd6, 0x6f, 0x62, 0x53, 0x6f, 0xcb, - 0x62, 0xe9, 0xa5, 0xca, 0x18, 0x8e, 0x86, 0x3f, 0x36, 0xfd, 0xea, 0x55, - 0x16, 0x6d, 0x6c, 0x6a, 0x8f, 0xa7, 0x9c, 0x70, 0x15, 0xd7, 0xf4, 0x57, - 0x68, 0x04, 0x84, 0x60, 0x3b, 0xb0, 0x32, 0xc4, 0xea, 0x9d, 0x70, 0xb9, - 0xa6, 0x34, 0xe5, 0xfa, 0xa1, 0x24, 0x54, 0x7f, 0xef, 0xac, 0xb4, 0x5f, - 0xa0, 0xc0, 0x40, 0x3f, 0x73, 0xdf, 0x56, 0xa6, 0xd9, 0x17, 0xf4, 0xff, - 0x50, 0xae, 0x21, 0x0d, 0x5a, 0xe0, 0xb0, 0xf9, 0x5b, 0x7a, 0x61, 0x6e, - 0xa6, 0x85, 0x85, 0xbf, 0x19, 0x03, 0xe2, 0x74, 0x1f, 0x03, 0x70, 0x76, - 0x3c, 0xed, 0x02, 0x7d, 0xfa, 0xf9, 0x1e, 0x17, 0xdd, 0x42, 0x30, 0xf0, - 0x32, 0x47, 0x46, 0xae, 0xf5, 0x64, 0xe6, 0x5e, 0x2b, 0x40, 0x86, 0x97, - 0xb1, 0x24, 0x52, 0x69, 0x67, 0x79, 0x8e, 0x0d, 0xcc, 0x07, 0xcb, 0x72, - 0x29, 0xe9, 0xba, 0x2d, 0xf7, 0xcb, 0xe3, 0x86, 0x06, 0xaa, 0x6d, 0x79, - 0xf8, 0xb6, 0x93, 0x0a, 0x9c, 0x97, 0xef, 0x47, 0x37, 0x13, 0x2e, 0x6b, - 0xfd, 0x59, 0x0c, 0xc9, 0x5e, 0x5e, 0xcd, 0x71, 0x6f, 0x99, 0x0d, 0x88, - 0x9d, 0xbb, 0x7c, 0x2b, 0x22, 0xd5, 0xbe, 0xee, 0x26, 0x1c, 0xe1, 0xad, - 0xc8, 0x4d, 0x5f, 0x6b, 0xd1, 0xf4, 0x30, 0x4d, 0x46, 0x1d, 0x54, 0x11, - 0x4b, 0xa0, 0x7f, 0x94, 0x71, 0xc0, 0x44, 0x4a, 0x42, 0x11, 0xf5, 0x89, - 0xec, 0xb5, 0x24, 0x45, 0xf1, 0xf0, 0x30, 0x54, 0xf8, 0x62, 0xdb, 0x58, - 0x3d, 0x7c, 0x2a, 0x82, 0xe5, 0xbe, 0x13, 0xcf, 0xdc, 0x88, 0xfb, 0xd3, - 0x1e, 0x4d, 0xa5, 0x3e, 0xad, 0x95, 0xa2, 0xe6, 0x48, 0x73, 0xb2, 0xbe, - 0x96, 0xef, 0x8e, 0x0b, 0x28, 0xf9, 0xbe, 0x2a, 0xd6, 0x68, 0x9e, 0x9c, - 0x7b, 0x5a, 0xaf, 0x20, 0xf6, 0xa5, 0x3f, 0x99, 0x61, 0x57, 0xe8, 0x1c, - 0xb2, 0xc3, 0xd0, 0x7f, 0x2c, 0xb5, 0xe9, 0x66, 0x8e, 0x88, 0xec, 0x13, - 0x51, 0xbc, 0x8e, 0xb6, 0xe2, 0x91, 0xbf, 0x5e, 0x8c, 0x1c, 0xdd, 0x0e, - 0x0a, 0x13, 0x06, 0xc6, 0x62, 0x1c, 0x41, 0x8d, 0xa1, 0xc0, 0xf2, 0xfa, - 0x76, 0x35, 0xaa, 0x77, 0x06, 0x3f, 0x76, 0x50, 0xf6, 0x43, 0xf2, 0x25, - 0x00, 0x79, 0xde, 0xca, 0xa1, 0x06, 0x6f, 0xb4, 0x17, 0x4b, 0x99, 0x5a, - 0x00, 0x32, 0xd6, 0xb0, 0x1f, 0x80, 0x53, 0x16, 0xaa, 0x87, 0x72, 0xa2, - 0x34, 0xaf, 0x90, 0x3d, 0x60, 0xde, 0x0e, 0x6d, 0x83, 0xda, 0xb2, 0x11, - 0x2f, 0x39, 0xdc, 0x1a, 0xfe, 0x51, 0x74, 0x10, 0x3c, 0x41, 0xd5, 0x41, - 0x65, 0x4a, 0xa0, 0x11, 0xde, 0x95, 0x34, 0xef, 0xa0, 0xc9, 0xa8, 0xd3, - 0xcb, 0xb9, 0x7d, 0x51, 0x7d, 0xff, 0x26, 0x88, 0xd8, 0x29, 0x0e, 0xa0, - 0xd4, 0xa7, 0x07, 0x33, 0xe7, 0x7d, 0x59, 0x9f, 0x35, 0xc1, 0xb5, 0xf7, - 0x78, 0x78, 0x84, 0xf0, 0x20, 0x41, 0x3f, 0x02, 0x7d, 0x41, 0x90, 0x01, - 0x8d, 0xa4, 0xd8, 0xd7, 0xeb, 0x56, 0x7f, 0x38, 0xbc, 0x1e, 0x15, 0xdf, - 0xfc, 0x34, 0xe7, 0x99, 0xd4, 0x92, 0xd5, 0xf3, 0x9e, 0x16, 0x0b, 0x5c, - 0xeb, 0xb6, 0x78, 0xac, 0x84, 0x06, 0x8e, 0xfe, 0xd0, 0x7c, 0xce, 0x4a, - 0x43, 0x49, 0x3b, 0xe1, 0xab, 0x57, 0xc0, 0x12, 0xd6, 0x9d, 0xa4, 0xee, - 0x91, 0x10, 0x81, 0xe2, 0xfc, 0x02, 0x26, 0x7a, 0xca, 0x81, 0x5b, 0x2f, - 0x34, 0x51, 0xdd, 0x25, 0x4d, 0xc8, 0xf9, 0x3e, 0x59, 0x0f, 0x3d, 0x64, - 0x51, 0xbf, 0x42, 0xc4, 0x92, 0x9d, 0x8f, 0x39, 0x8a, 0x31, 0x09, 0x24, - 0x19, 0x44, 0xc0, 0xf4, 0xea, 0xca, 0x59, 0xcb, 0x86, 0x6c, 0x02, 0x7a, - 0xe5, 0x30, 0x79, 0xe2, 0x2c, 0x76, 0x08, 0x8f, 0x98, 0x0d, 0x4d, 0x12, - 0xc3, 0x98, 0xb4, 0x24, 0x04, 0x4f, 0x51, 0xec, 0x4e, 0xec, 0xbd, 0x8c, - 0xc4, 0x79, 0x51, 0x7f, 0xe1, 0xce, 0x76, 0x28, 0x0b, 0x7b, 0xc5, 0x3f, - 0x5b, 0x48, 0x19, 0x76, 0x68, 0x31, 0x8e, 0x28, 0xff, 0x18, 0x24, 0xe3, - 0x91, 0xe7, 0x49, 0x0d, 0x10, 0xbd, 0x00, 0xc6, 0x58, 0xfd, 0xb6, 0x88, - 0x63, 0xbd, 0xb4, 0x4b, 0xb8, 0xed, 0xdd, 0xb7, 0x53, 0xce, 0x89, 0xdb, - 0x7f, 0xf4, 0xc3, 0x21, 0x31, 0xad, 0x20, 0x78, 0x06, 0x71, 0xaf, 0xc0, - 0xe3, 0xdc, 0xb8, 0xf4, 0x80, 0xc8, 0x33, 0x1d, 0x8b, 0xff, 0x5a, 0x92, - 0x68, 0x4d, 0xc1, 0x5b, 0x58, 0x3e, 0xf6, 0x7f, 0xba, 0x42, 0xa5, 0x6d, - 0xec, 0x03, 0x36, 0xc9, 0x3f, 0x83, 0x1f, 0x0c, 0x33, 0x57, 0x6a, 0x43, - 0x5f, 0x11, 0x72, 0x19, 0x2c, 0xda, 0x71, 0x58, 0xf2, 0x50, 0x50, 0x06, - 0x97, 0xd0, 0xdf, 0xd1, 0x4f, 0x0b, 0x00, 0x1a, 0xea, 0x85, 0x3b, 0x37, - 0x2f, 0xf0, 0x40, 0x52, 0xd9, 0x2a, 0xe8, 0x54, 0xa5, 0xee, 0x0f, 0x49, - 0x74, 0x39, 0x96, 0x5d, 0x60, 0x8f, 0x14, 0x59, 0x86, 0x59, 0x86, 0xfb, - 0x67, 0x71, 0x5c, 0x26, 0x5f, 0xe9, 0xab, 0x32, 0x77, 0x83, 0xdf, 0x02, - 0x19, 0x85, 0xae, 0x4d, 0x7d, 0x9c, 0x8d, 0x4f, 0x61, 0x05, 0x3c, 0x0c, - 0xc6, 0x74, 0x9e, 0x36, 0x33, 0xb8, 0x14, 0x85, 0xab, 0xa2, 0x0b, 0x5d, - 0x22, 0xf2, 0x50, 0x3e, 0xa4, 0x88, 0xac, 0x67, 0xf9, 0x06, 0xe5, 0x30, - 0x8e, 0xf9, 0x67, 0x34, 0xd5, 0x94, 0x5b, 0x35, 0xb7, 0x3d, 0x39, 0x5f, - 0x4e, 0xae, 0xfe, 0xf7, 0x57, 0xd3, 0x95, 0x7b, 0x0a, 0xd9, 0x92, 0x4a, - 0x66, 0x29, 0xa0, 0x18, 0x35, 0x54, 0x14, 0x44, 0x79, 0x72, 0xc3, 0xbc, - 0xa8, 0x1a, 0xd3, 0xa3, 0xbe, 0x6f, 0x9e, 0xcc, 0x68, 0xb6, 0x5f, 0xd4, - 0x42, 0xab, 0xe8, 0x09, 0x60, 0x57, 0x2e, 0xb2, 0x9a, 0x5b, 0x62, 0x38, - 0xfb, 0x0a, 0x35, 0x9c, 0x4f, 0xf7, 0xe0, 0xd2, 0x06, 0x04, 0x1f, 0x79, - 0x7f, 0xa7, 0x7b, 0xd3, 0x63, 0xc9, 0xbd, 0x16, 0x58, 0x38, 0x7b, 0xaa, - 0x08, 0xf3, 0x14, 0x6c, 0x25, 0xf8, 0xa5, 0xe9, 0x4b, 0x45, 0x34, 0x89, - 0x76, 0x74, 0xcb, 0x41, 0x9c, 0x2a, 0xd9, 0xca, 0xb3, 0x12, 0x46, 0x6d, - 0x85, 0x4d, 0x63, 0x2d, 0x24, 0x1b, 0x19, 0x6b, 0x3f, 0x61, 0x6b, 0x4b, - 0x15, 0x83, 0x2d, 0x8f, 0x61, 0xab, 0xd1, 0x55, 0x93, 0x4e, 0x26, 0xd6, - 0x7a, 0x0a, 0x8a, 0xff, 0x58, 0x44, 0xf7, 0x39, 0x31, 0x1a, 0xab, 0xa6, - 0x98, 0x31, 0x41, 0x03, 0xb6, 0xc9, 0xf5, 0x50, 0xe3, 0x7b, 0xc0, 0x59, - 0x74, 0x60, 0x91, 0xb4, 0x79, 0x02, 0x25, 0xc1, 0xb5, 0xbd, 0xcb, 0x6e, - 0x40, 0x61, 0xfe, 0x68, 0x29, 0x83, 0x1b, 0xd2, 0x49, 0xe1, 0x31, 0xde, - 0xdd, 0x53, 0xb0, 0xb8, 0x96, 0xa2, 0xce, 0xea, 0x8b, 0x66, 0x2c, 0x5a, - 0x80, 0x51, 0x0b, 0xc1, 0x2d, 0x9a, 0xfa, 0x9d, 0xc6, 0xcc, 0x2b, 0xbb, - 0xaa, 0xce, 0x98, 0xaa, 0x26, 0x15, 0x8f, 0x4a, 0xe7, 0xdb, 0x17, 0x6c, - 0xe5, 0x58, 0xc9, 0xae, 0xe4, 0x9c, 0x1d, 0xab, 0x59, 0x84, 0x3e, 0x27, - 0x76, 0x03, 0xe3, 0x82, 0x64, 0x6f, 0x6e, 0x6f, 0x63, 0xd2, 0x12, 0x84, - 0xe3, 0x9b, 0x9d, 0x7e, 0x53, 0x1a, 0x54, 0x8d, 0xc1, 0xf0, 0x94, 0xae, - 0xad, 0x8f, 0x6a, 0x12, 0x4e, 0xa7, 0x30, 0xdb, 0x55, 0xbe, 0x09, 0xe2, - 0x56, 0x08, 0xc4, 0x3a, 0xb0, 0x55, 0xb0, 0x24, 0x96, 0xa6, 0x3e, 0x28, - 0xd0, 0x35, 0xfb, 0x58, 0x47, 0xba, 0x2d, 0x51, 0xbb, 0x72, 0x20, 0x59, - 0xd2, 0xdd, 0x9c, 0xe2, 0xb5, 0x31, 0x90, 0xac, 0x74, 0x5d, 0x9f, 0x3d, - 0x8c, 0x1c, 0x96, 0xc0, 0x60, 0x61, 0xa8, 0xbb, 0x3c, 0xb3, 0x6d, 0x6d, - 0x92, 0x4a, 0xca, 0xbb, 0x60, 0x5e, 0x82, 0x0d, 0x7f, 0xab, 0x4b, 0x36, - 0x4c, 0x93, 0x0d, 0x88, 0x71, 0xaf, 0xb6, 0x53, 0xb0, 0x38, 0xb4, 0x1c, - 0xb4, 0x7b, 0xd4, 0x13, 0x32, 0x6c, 0xe4, 0xee, 0x6a, 0xb3, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x88, 0x83, 0x91, 0x4c, - 0x2e, 0x1e, 0xbe, 0xa4, 0xb5, 0x96, 0xff, 0x67, 0x50, 0xe9, 0x81, 0x0e, - 0x5d, 0x0e, 0xad, 0xc4, 0x1f, 0xeb, 0x98, 0x38, 0xcc, 0x54, 0x9d, 0x27, - 0xa6, 0xf1, 0x37, 0x23, 0xce, 0xb4, 0x5b, 0xff, 0x12, 0xb1, 0xb8, 0x35, - 0x5e, 0x03, 0x02, 0x04, 0xad, 0xa6, 0x6f, 0x43, 0xfc, 0xe4, 0xbe, 0x0c, - 0xe0, 0x93, 0xd5, 0xef, 0x09, 0xfa, 0x04, 0xe9, 0x5a, 0x22, 0xd4, 0x81, - 0xc1, 0x27, 0x4f, 0x5f, 0x6e, 0x83, 0x5a, 0x8a, 0x2d, 0xbb, 0x8f, 0xa4, - 0x91, 0xcc, 0x82, 0x37, 0x3b, 0x14, 0x98, 0x58, 0x86, 0x44, 0xb7, 0xa9, - 0x58, 0xf3, 0x3d, 0x49, 0x71, 0x7a, 0x37, 0xcd, 0xc5, 0xb9, 0xc9, 0x46, - 0xd5, 0xd4, 0x17, 0x60, 0x1a, 0xbf, 0x93, 0xa9, 0xe9, 0x08, 0x25, 0x40, - 0xd1, 0x65, 0xae, 0xdd, 0x85, 0xa6, 0xcc, 0x06, 0xca, 0x91, 0xe1, 0x63, - 0xf9, 0x6b, 0x15, 0xa8, 0x04, 0x61, 0xd2, 0xa6, 0x59, 0x21, 0x1a, 0x1c, - 0xc9, 0xa9, 0xa9, 0xc8, 0x54, 0x86, 0xac, 0xa5, 0xd6, 0x95, 0x39, 0x83, - 0x4b, 0x6b, 0x69, 0xa6, 0x94, 0xd8, 0xc0, 0xfb, 0x66, 0x0f, 0x3a, 0xbe, - 0xc7, 0xf3, 0xcc, 0xd5, 0xb7, 0x1b, 0x60, 0x02, 0x95, 0x45, 0x4a, 0x12, - 0xc9, 0xfe, 0x75, 0x7c, 0x1b, 0xb2, 0x86, 0x96, 0x28, 0x07, 0xa2, 0x18, - 0x7a, 0x6c, 0x90, 0x6f, 0x32, 0x0c, 0xc8, 0x34, 0xbc, 0x75, 0x4d, 0x96, - 0x03, 0xa6, 0x0f, 0x3d, 0x35, 0x1b, 0x64, 0x76, 0x95, 0x55, 0xff, 0x25, - 0xd4, 0x71, 0xcf, 0x8a, 0x73, 0x6d, 0x9b, 0x74, 0xfe, 0xff, 0x9e, 0x31, - 0x9e, 0x5e, 0x89, 0x5a, 0x1a, 0xeb, 0x8d, 0x06, 0x3b, 0xf2, 0xf6, 0x06, - 0x5d, 0xc3, 0xba, 0x04, 0xca, 0x0f, 0x07, 0x2c, 0xbd, 0x54, 0x52, 0xd9, - 0x1c, 0x2f, 0x0e, 0x13, 0x5e, 0x25, 0x13, 0xe5, 0xd7, 0x8e, 0x19, 0x42, - 0x1b, 0x52, 0x2e, 0xd2, 0x8f, 0xc5, 0x8e, 0x1c, 0x34, 0x2e, 0x4d, 0xd5, - 0x51, 0x7d, 0x91, 0x64, 0xbc, 0xb4, 0x0d, 0xc9, 0xe7, 0x1c, 0x6c, 0x47, - 0xe9, 0xbb, 0x67, 0x9a, 0x96, 0xde, 0xad, 0xff, 0xba, 0x35, 0x25, 0x6d, - 0x57, 0xa1, 0x93, 0xfe, 0xe2, 0x8d, 0x02, 0xeb, 0xf0, 0x2f, 0x54, 0xfd, - 0x46, 0xc0, 0x8f, 0xea, 0x32, 0x7b, 0x57, 0xda, 0xe0, 0x29, 0x1c, 0x19, - 0xba, 0xa4, 0xa6, 0x1c, 0x6e, 0xeb, 0x7a, 0xa8, 0x8a, 0xe1, 0xc6, 0x12, - 0xf5, 0xa3, 0x24, 0x1a, 0x96, 0xe1, 0x02, 0xc0, 0xf4, 0x7d, 0x14, 0x72, - 0xd6, 0x12, 0x8e, 0x6c, 0x8c, 0xd2, 0xfd, 0x88, 0x78, 0x48, 0xf3, 0x74, - 0x38, 0x86, 0x04, 0x68, 0x6d, 0x7c, 0xf4, 0x4c, 0x40, 0x17, 0xf6, 0x8f, - 0xb2, 0x6c, 0xd7, 0x66, 0x66, 0x3b, 0x38, 0xa1, 0xbb, 0x1e, 0xff, 0x72, - 0x1f, 0x64, 0x56, 0xc2, 0x53, 0x1c, 0x6f, 0x84, 0x2b, 0xbd, 0x23, 0xd9, - 0xb4, 0x6b, 0x87, 0x79, 0x99, 0xec, 0x81, 0x8d, 0x1a, 0x58, 0x00, 0xf0, - 0x2c, 0xc1, 0xc4, 0x57, 0x74, 0x0f, 0xce, 0x32, 0xe2, 0x5e, 0xae, 0x02, - 0x1c, 0xe8, 0x94, 0xc6, 0x44, 0xaa, 0x7b, 0x9a, 0x32, 0xb5, 0x33, 0xac, - 0xfc, 0x41, 0x65, 0xf2, 0xca, 0xcc, 0xc6, 0x74, 0x36, 0xb2, 0xc9, 0x0e, - 0x26, 0x73, 0xae, 0x68, 0x98, 0xa4, 0x36, 0xe8, 0x98, 0x39, 0xad, 0x05, - 0x3f, 0xca, 0x12, 0xcc, 0x86, 0xfd, 0xc6, 0x57, 0xf0, 0x02, 0x4e, 0x45, - 0xcb, 0x54, 0x34, 0xdd, 0x66, 0x26, 0xab, 0xda, 0x95, 0xa5, 0x85, 0xec, - 0x02, 0x03, 0xb6, 0x29, 0x30, 0x11, 0x40, 0x54, 0x9a, 0x6a, 0x87, 0x2e, - 0x97, 0xa1, 0x7e, 0xeb, 0x34, 0x39, 0x78, 0x3b, 0xbc, 0x5f, 0x8e, 0xc5, - 0x0e, 0x21, 0x29, 0x4b, 0xb7, 0x1b, 0xe7, 0x14, 0x08, 0x34, 0xb7, 0x9a, - 0x0a, 0xb2, 0x6c, 0x25, 0x76, 0xb5, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xe2, 0x7d, 0x48, 0xdd, 0x1a, 0xcb, 0xb6, 0x5c, - 0x6f, 0xbe, 0x32, 0x9d, 0xd2, 0x2b, 0x9e, 0x10, 0x65, 0xd7, 0x1e, 0xec, - 0xc8, 0xb5, 0x10, 0x64, 0x8f, 0x5d, 0xef, 0xfe, 0x9b, 0x6c, 0x9b, 0x02, - 0x6a, 0x6d, 0xf7, 0x98, 0x7b, 0xf7, 0x17, 0xfd, 0x49, 0x1b, 0x6a, 0xc5, - 0x3c, 0xa0, 0xfc, 0xa8, 0x94, 0x95, 0xed, 0x48, 0x81, 0x04, 0x53, 0x8c, - 0xbe, 0xe4, 0x4e, 0xaf, 0xc1, 0x9d, 0xc3, 0xdf, 0xc2, 0xb5, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xae, 0xb0, 0x67, 0x5b, - 0x99, 0x26, 0x07, 0xfb, 0x6c, 0x98, 0xfe, 0xbb, 0x35, 0xf1, 0x5b, 0x02, - 0xc6, 0x03, 0xfc, 0x97, 0x21, 0x16, 0x8d, 0x48, 0xd4, 0x4f, 0x03, 0xd9, - 0x7c, 0x9f, 0xa6, 0x1e, 0x6f, 0x5a, 0x58, 0x17, 0x6d, 0x26, 0xb4, 0xc5, - 0x4c, 0xe5, 0x93, 0x0a, 0x9c, 0xb2, 0x40, 0xbc, 0x60, 0xc7, 0x2b, 0xdb, - 0x3b, 0xc0, 0x3c, 0x5c, 0x44, 0x4b, 0xdd, 0x58, 0xbe, 0xdc, 0xc5, 0xb5, - 0x6a, 0xf9, 0x5e, 0x73, 0x07, 0x58, 0x8f, 0x45, 0x7b, 0xac, 0xba, 0x82, - 0x96, 0x49, 0x4d, 0x22, 0x70, 0x7a, 0x3d, 0x69, 0x26, 0x8b, 0x88, 0x13, - 0xf1, 0x8d, 0xfc, 0xdf, 0x73, 0xd5, 0x20, 0x3c, 0x52, 0x92, 0x16, 0xb1, - 0x6e, 0xb7, 0x41, 0xbe, 0x23, 0x9b, 0x51, 0xf7, 0xc9, 0x38, 0x8a, 0xc7, - 0x6e, 0x68, 0x82, 0xd1, 0x59, 0x50, 0x09, 0x4b, 0x44, 0x3b, 0x28, 0x06, - 0x60, 0x75, 0x7a, 0xe5, 0xa1, 0x36, 0xbb, 0x62, 0x44, 0xe3, 0xd0, 0x68, - 0x14, 0xea, 0xad, 0xf9, 0x18, 0xcc, 0xd5, 0x42, 0x5d, 0x18, 0x53, 0xe6, - 0x4a, 0xfe, 0xde, 0x32, 0xe1, 0xe7, 0xf8, 0x8c, 0x9d, 0x35, 0xf4, 0x4a, - 0xcb, 0x23, 0x2f, 0x91, 0xb5, 0xb0, 0xb2, 0x01, 0x5c, 0x22, 0x8c, 0x42, - 0x42, 0xd5, 0xf0, 0x82, 0x6f, 0x9f, 0x64, 0xe5, 0x99, 0x4d, 0x36, 0x0b, - 0xfc, 0x78, 0x38, 0x30, 0x47, 0x8f, 0x0b, 0x57, 0x86, 0x4f, 0x1b, 0xc9, - 0x05, 0x0e, 0x08, 0xc4, 0xf4, 0xab, 0x9e, 0x90, 0xb4, 0x4f, 0x36, 0x54, - 0xe8, 0xa1, 0x3f, 0x90, 0xd2, 0xf3, 0xb4, 0xb4, 0xdd, 0xf3, 0x43, 0x2f, - 0xc4, 0x43, 0xbb, 0x99, 0x8e, 0xb8, 0x61, 0x59, 0x5e, 0xfa, 0x1b, 0x3c, - 0xc1, 0xeb, 0x9d, 0x35, 0x62, 0x34, 0x82, 0x45, 0xef, 0x41, 0xe9, 0xfc, - 0x35, 0xae, 0xb4, 0x0b, 0xce, 0x52, 0x5b, 0x40, 0x7d, 0xdd, 0x86, 0x83, - 0x52, 0x74, 0x77, 0x11, 0xc2, 0x9b, 0x8c, 0xa3, 0x63, 0xc2, 0x2d, 0xdd, - 0x8c, 0x76, 0x13, 0xc5, 0xc0, 0xde, 0x3e, 0x6b, 0xe1, 0x0f, 0xeb, 0x0f, - 0x0a, 0x25, 0x41, 0x2f, 0x8b, 0x4a, 0x98, 0x30, 0xcb, 0x1a, 0x43, 0xa3, - 0xc1, 0xcc, 0x44, 0x9a, 0x6c, 0xdc, 0x92, 0x40, 0xc4, 0x7a, 0x1f, 0x8a, - 0x6f, 0x74, 0xf3, 0xf5, 0x52, 0x72, 0xf7, 0x81, 0x6e, 0x74, 0x75, 0xe6, - 0xea, 0xd9, 0x57, 0x91, 0xae, 0xf2, 0x3f, 0x35, 0x4b, 0x99, 0xd9, 0x3f, - 0x85, 0xe0, 0x92, 0xaa, 0x35, 0xac, 0x28, 0xbf, 0x43, 0xb8, 0xad, 0xc7, - 0xc5, 0xf6, 0x15, 0x2f, 0x7c, 0xfb, 0x34, 0x48, 0xf3, 0x04, 0x12, 0xf4, - 0x2f, 0x92, 0x74, 0xc8, 0xea, 0xbc, 0x24, 0x6e, 0x3b, 0x0e, 0x9e, 0xf0, - 0xaf, 0x02, 0x97, 0x95, 0xbc, 0x90, 0x7f, 0xc4, 0xf8, 0xe2, 0x04, 0x9a, - 0x8f, 0xfc, 0xbc, 0x50, 0xfe, 0xf7, 0x89, 0x17, 0x2c, 0xdb, 0xd6, 0x5e, - 0xbf, 0xd9, 0x8e, 0x89, 0x8b, 0x06, 0x1d, 0x0b, 0x81, 0x2a, 0x55, 0x5c, - 0x5f, 0xb6, 0xa6, 0xa5, 0xd2, 0xaa, 0x79, 0x9c, 0x39, 0x31, 0x76, 0x03, - 0x98, 0x42, 0xd6, 0xb7, 0x37, 0x1f, 0xc8, 0x51, 0x8a, 0x1c, 0x5d, 0xcd, - 0x9c, 0x78, 0xa4, 0x22, 0x6e, 0x12, 0x10, 0x0a, 0x33, 0xc9, 0xe0, 0xfe, - 0xfc, 0xe8, 0x15, 0xe7, 0xef, 0xd8, 0x6d, 0xc7, 0xc9, 0xc2, 0x8e, 0x18, - 0x82, 0x2f, 0xa6, 0x09, 0x8a, 0xdc, 0x41, 0x6b, 0x89, 0xea, 0xd9, 0xd6, - 0x96, 0xfd, 0xba, 0x6e, 0xae, 0x2d, 0x0c, 0xf9, 0x3c, 0x4c, 0x1a, 0xfa, - 0x98, 0x83, 0x51, 0x45, 0x9d, 0x1e, 0xa5, 0xc1, 0x81, 0x54, 0x37, 0x5d, - 0x28, 0xca, 0xa6, 0xfe, 0x48, 0xf4, 0x77, 0x17, 0x92, 0x1d, 0x0c, 0xb3, - 0x39, 0x77, 0x22, 0xd9, 0xc7, 0xc2, 0xaf, 0x70, 0x0a, 0xd3, 0xa6, 0x57, - 0x69, 0xfb, 0xb9, 0xe0, 0xc4, 0x73, 0x7a, 0x68, 0xee, 0x27, 0x6e, 0x3a, - 0x6e, 0xae, 0x32, 0xf6, 0x09, 0xb3, 0x0b, 0x40, 0x72, 0xc6, 0x26, 0x6e, - 0xc5, 0x88, 0x6b, 0xce, 0x99, 0x88, 0x60, 0x6f, 0x6e, 0xa9, 0xe6, 0xd7, - 0x35, 0x5e, 0x3b, 0x36, 0x0d, 0x14, 0xb8, 0x2f, 0xde, 0x67, 0xc8, 0x2e, - 0x52, 0xc1, 0xf1, 0x58, 0x87, 0x32, 0x2a, 0x52, 0x21, 0x27, 0x1e, 0x04, - 0xed, 0xc4, 0x82, 0xd7, 0xeb, 0x85, 0x12, 0x3e, 0xea, 0xd0, 0x07, 0xa0, - 0x80, 0x48, 0xe9, 0xbd, 0x9b, 0x3a, 0x8e, 0x8b, 0xa0, 0xfc, 0x07, 0xf0, - 0x69, 0x4e, 0xc7, 0x1d, 0xd9, 0x9a, 0x73, 0x18, 0x63, 0xb8, 0xe6, 0x4a, - 0xa0, 0x81, 0xf0, 0xdb, 0xb9, 0x88, 0xf4, 0x2b, 0x1f, 0x0d, 0xda, 0x31, - 0xc0, 0xb0, 0x55, 0x79, 0x56, 0x48, 0x22, 0xbb, 0x49, 0x7f, 0xb1, 0xf1, - 0xf6, 0x6f, 0x42, 0xd3, 0xba, 0x68, 0x3a, 0x8f, 0xe7, 0xac, 0x53, 0x30, - 0x96, 0xec, 0x51, 0x7d, 0xfc, 0xc0, 0x35, 0xe9, 0x59, 0xe7, 0x0e, 0xed, - 0x29, 0x46, 0x50, 0x3c, 0x4b, 0x36, 0xc6, 0x2a, 0xaa, 0x3b, 0xbe, 0xce, - 0xd3, 0xda, 0x4d, 0x65, 0xb0, 0xe8, 0x52, 0x68, 0xf0, 0x23, 0xde, 0x02, - 0x77, 0xb3, 0xcc, 0xce, 0x78, 0xdd, 0x8c, 0xf8, 0xbe, 0x5d, 0x0d, 0xa9, - 0xb6, 0x96, 0x85, 0xbf, 0x92, 0x2a, 0x6b, 0x1b, 0xe8, 0x76, 0x05, 0x13, - 0x30, 0xd8, 0x3d, 0x80, 0xaa, 0xa2, 0xa3, 0xbc, 0x07, 0xba, 0x9c, 0x75, - 0x5b, 0x42, 0x03, 0xd8, 0xde, 0x42, 0x44, 0xf7, 0x29, 0x43, 0x29, 0x0d, - 0x48, 0x2b, 0x02, 0xd0, 0xcc, 0xe9, 0x17, 0x47, 0x23, 0x73, 0x6d, 0xc5, - 0x91, 0x6d, 0x4e, 0xc5, 0xcf, 0xc3, 0x58, 0xaf, 0x6e, 0xa2, 0x9e, 0xe7, - 0xe1, 0x88, 0xac, 0x62, 0xff, 0xbc, 0x69, 0x57, 0xad, 0x0f, 0x08, 0xf8, - 0x32, 0xfd, 0x79, 0xcb, 0x30, 0xbc, 0xd2, 0xe5, 0x20, 0xd9, 0x0f, 0xd1, - 0x33, 0xbf, 0xe4, 0x49, 0x7a, 0x2b, 0x5c, 0xb3, 0x63, 0x13, 0x4d, 0xed, - 0x17, 0xe7, 0x5b, 0xf4, 0x36, 0x9d, 0x3c, 0x4e, 0x51, 0xb2, 0xf7, 0xf2, - 0xcd, 0xfb, 0xec, 0x42, 0x79, 0x46, 0xae, 0x18, 0x50, 0xdf, 0xbf, 0x5b, - 0xb1, 0x9a, 0x49, 0x22, 0xae, 0xe9, 0xf3, 0x86, 0x3f, 0xe0, 0xb4, 0xc6, - 0x9c, 0x08, 0xd6, 0xd9, 0xf4, 0x68, 0xbb, 0x33, 0x0e, 0x59, 0x3d, 0x76, - 0xf0, 0xd7, 0x54, 0x04, 0x19, 0x66, 0xee, 0x61, 0x11, 0x0d, 0x48, 0x10, - 0x21, 0x16, 0x7c, 0xac, 0x49, 0xab, 0xe0, 0x19, 0x85, 0x93, 0x48, 0x65, - 0x7c, 0x5e, 0x6c, 0x1a, 0xf5, 0xb0, 0xc6, 0x80, 0xa1, 0x2a, 0xd5, 0x71, - 0x42, 0xec, 0x2f, 0x25, 0xf7, 0xb8, 0x84, 0xcd, 0xf0, 0x5c, 0xcd, 0xee, - 0x44, 0xcb, 0xeb, 0x74, 0x96, 0x3c, 0xb0, 0x56, 0xcb, 0xaf, 0x7e, 0x9e, - 0x4a, 0x12, 0x06, 0xae, 0x57, 0x43, 0x2d, 0xb2, 0x11, 0x96, 0x05, 0xdb, - 0xb3, 0x1a, 0x01, 0xa7, 0x1d, 0x02, 0x81, 0x1c, 0x36, 0x41, 0x65, 0xf0, - 0x67, 0xd6, 0xd0, 0x0f, 0xec, 0x34, 0x7d, 0xd3, 0x89, 0xac, 0x60, 0x67, - 0x95, 0x81, 0x84, 0xe7, 0xbb, 0x9a, 0x59, 0x36, 0x3b, 0xde, 0xa4, 0x88, - 0xda, 0xf2, 0xd2, 0xa2, 0x0c, 0xba, 0xfb, 0x93, 0xbf, 0xc8, 0xad, 0xe8, - 0x57, 0xa0, 0x2b, 0xbb, 0x4e, 0xa9, 0x38, 0xe7, 0x86, 0x6b, 0x95, 0x34, - 0x24, 0x96, 0xc0, 0x09, 0xd9, 0xfd, 0x5f, 0x1c, 0x93, 0xd9, 0x72, 0xfa, - 0xc4, 0x14, 0x72, 0x9c, 0x19, 0x6f, 0xee, 0x12, 0x17, 0xee, 0x65, 0xb4, - 0x8c, 0x83, 0x39, 0x3c, 0x0f, 0xbf, 0x25, 0xcf, 0xee, 0x05, 0x8c, 0x6a, - 0x56, 0x18, 0xf0, 0x20, 0x72, 0xc1, 0xbf, 0xe4, 0xce, 0x37, 0xbf, 0x2b, - 0xba, 0x70, 0x1e, 0xc2, 0xc8, 0xcd, 0x58, 0xb9, 0x60, 0xc7, 0xfb, 0xd0, - 0xce, 0xb9, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x7c, 0x63, 0x50, 0x90, 0xcb, 0x9c, 0xce, 0x59, 0xb1, 0x47, 0xb0, 0x49, - 0x9b, 0xfc, 0xfb, 0x3d, 0x3d, 0x62, 0xcf, 0x58, 0x4c, 0x2a, 0x79, 0xf0, - 0x72, 0x7f, 0x81, 0x41, 0xac, 0x82, 0x2d, 0xa9, 0xf0, 0x0e, 0x4d, 0xd2, - 0xe0, 0xbd, 0xca, 0x17, 0xb7, 0x59, 0x9f, 0xdb, 0xfe, 0x51, 0x90, 0x88, - 0xb9, 0xeb, 0x4e, 0xac, 0x80, 0x30, 0x64, 0xc4, 0x49, 0xd1, 0xb6, 0x65, - 0x67, 0xef, 0x9d, 0x5c, 0x04, 0xe0, 0x9d, 0xbe, 0x47, 0x75, 0x9b, 0x6e, - 0x30, 0x76, 0xad, 0x37, 0x9a, 0x56, 0xff, 0xcd, 0x40, 0x26, 0x3e, 0xe2, - 0x7d, 0x30, 0x55, 0x09, 0x92, 0x25, 0x36, 0x2f, 0xf8, 0x55, 0xb8, 0x9b, - 0x66, 0x49, 0x41, 0x9d, 0x78, 0x6d, 0x3f, 0x54, 0x41, 0x01, 0x93, 0x9c, - 0x5e, 0x0c, 0x4a, 0x38, 0x79, 0x76, 0xb4, 0x98, 0xae, 0xf9, 0x99, 0x21, - 0x05, 0x6a, 0xfb, 0xbc, 0x44, 0xf7, 0xdc, 0x85, 0x5e, 0x5f, 0x18, 0x49, - 0x22, 0x11, 0x6d, 0xa5, 0x9e, 0x6b, 0x59, 0x60, 0xf8, 0x73, 0x8b, 0xcb, - 0x38, 0xbb, 0xc9, 0xbf, 0x49, 0x0e, 0x57, 0x65, 0x48, 0x41, 0x41, 0xa2, - 0x40, 0x67, 0x91, 0x1d, 0x54, 0xac, 0xa7, 0xef, 0x16, 0x8b, 0xc7, 0xd1, - 0xe6, 0xdb, 0xc5, 0x9c, 0xd4, 0x04, 0x67, 0xd8, 0x75, 0x21, 0x2b, 0x1d, - 0x11, 0xc1, 0x79, 0x45, 0xb4, 0x91, 0x7a, 0x97, 0x00, 0xde, 0xc6, 0xc5, - 0x8a, 0xd1, 0xd7, 0xea, 0xc1, 0x22, 0xe1, 0x58, 0x61, 0xf2, 0x89, 0x3d, - 0xdb, 0x04, 0x3d, 0xe4, 0xe9, 0xe7, 0xbf, 0x4b, 0x99, 0x8a, 0xc6, 0xf2, - 0x09, 0xc4, 0xe2, 0x6d, 0x0b, 0xda, 0x13, 0xfb, 0xff, 0xbf, 0x0b, 0xfc, - 0x78, 0x33, 0xb8, 0x7b, 0x3e, 0xd8, 0xba, 0x27, 0xba, 0xae, 0xdf, 0xce, - 0xea, 0x80, 0x08, 0x38, 0xd8, 0x33, 0x00, 0xa9, 0xb6, 0x88, 0x48, 0xa9, - 0x3b, 0x54, 0xf0, 0x95, 0xda, 0xba, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xb1, 0xd7, 0x8d, 0x6c, 0xb9, 0x96, 0xdc, 0x64, - 0x9b, 0x0c, 0x74, 0x54, 0x59, 0x82, 0xf6, 0x6e, 0x7c, 0x4e, 0x23, 0x83, - 0x04, 0x2e, 0x49, 0xfb, 0x56, 0x4b, 0xcd, 0x0d, 0x76, 0x29, 0xb1, 0xce, - 0x40, 0xa3, 0xd0, 0x02, 0x16, 0x8e, 0x1c, 0x0a, 0x00, 0x5b, 0x8c, 0x06, - 0xf9, 0x07, 0x97, 0x12, 0x0c, 0x33, 0xd5, 0x48, 0x6d, 0xae, 0x7d, 0x2c, - 0x8f, 0x74, 0x32, 0x24, 0xcf, 0x91, 0xd7, 0xbe, 0xb2, 0x05, 0xcf, 0x2f, - 0x93, 0xd5, 0x43, 0x90, 0xce, 0x02, 0x97, 0xf8, 0x51, 0xb3, 0xba, 0x56, - 0x5d, 0x94, 0x41, 0xa4, 0x11, 0xf3, 0x21, 0xc0, 0xcc, 0x28, 0xf8, 0x5a, - 0x00, 0x0a, 0xd4, 0x53, 0xdd, 0xac, 0xfe, 0x25, 0x03, 0xea, 0x2b, 0x6b, - 0x9d, 0x7e, 0x1a, 0xe1, 0x5f, 0x5c, 0xa7, 0x47, 0xa2, 0x72, 0x4f, 0x92, - 0x60, 0x25, 0x7c, 0x1c, 0xa5, 0x34, 0xa6, 0x86, 0x0e, 0xda, 0x8f, 0x3f, - 0xec, 0xe2, 0xe4, 0xad, 0xa9, 0x41, 0xcc, 0x3d, 0x94, 0x43, 0xfd, 0x28, - 0xd8, 0xb0, 0x0f, 0x05, 0x9e, 0x2b, 0x27, 0x3f, 0xe0, 0x84, 0xbc, 0x9e, - 0x7a, 0xa5, 0x83, 0x3d, 0x3b, 0xac, 0x83, 0xd3, 0x16, 0x92, 0x8c, 0xd2, - 0x4a, 0x81, 0xdd, 0xba, 0x0a, 0xb7, 0xc5, 0x9f, 0x83, 0x0f, 0x78, 0xb8, - 0xab, 0x2d, 0xca, 0xf8, 0x6c, 0x06, 0xd7, 0x82, 0xb8, 0x61, 0x7d, 0x2a, - 0x31, 0x3a, 0x39, 0x97, 0x5f, 0xc7, 0x00, 0x6e, 0x46, 0xf2, 0xc5, 0x12, - 0x71, 0x55, 0x5b, 0x10, 0xaf, 0xbb, 0x07, 0x4c, 0x2f, 0xa3, 0x51, 0x53, - 0x22, 0x20, 0xab, 0xed, 0x02, 0x95, 0xc6, 0x5f, 0xaa, 0xb8, 0xc0, 0xcb, - 0xe5, 0xe0, 0x25, 0x97, 0xf7, 0xda, 0x1d, 0xd8, 0x5a, 0xff, 0x76, 0x0c, - 0x3e, 0x33, 0x1b, 0x7a, 0x15, 0xb8, 0x34, 0x75, 0xcf, 0xe9, 0xf3, 0x53, - 0x61, 0x03, 0x2d, 0x52, 0x29, 0x69, 0x3a, 0xc3, 0xd9, 0x22, 0xc0, 0x2d, - 0x80, 0xed, 0x66, 0xc4, 0xf4, 0x89, 0x60, 0x14, 0xdb, 0xec, 0x7d, 0xcc, - 0x99, 0x5c, 0x94, 0x27, 0xab, 0xed, 0xd2, 0x17, 0xf4, 0x36, 0xfc, 0x7e, - 0x99, 0x98, 0xb6, 0x86, 0xb6, 0x7c, 0x54, 0xd6, 0xec, 0xb5, 0xad, 0x62, - 0xcc, 0xb0, 0xf7, 0x8c, 0x52, 0x99, 0xf2, 0x44, 0x27, 0x3a, 0xb0, 0xff, - 0x8f, 0x09, 0xae, 0xe1, 0x61, 0xd8, 0x9f, 0xdd, 0x2f, 0x6b, 0xea, 0xd0, - 0x12, 0x70, 0x8c, 0x9d, 0x8f, 0x4c, 0x36, 0x98, 0x1e, 0x2e, 0xb5, 0x50, - 0x63, 0x33, 0x9c, 0x4b, 0xc3, 0xd4, 0xa0, 0xe6, 0x96, 0x96, 0x75, 0xfd, - 0x8a, 0xc4, 0x0c, 0xa7, 0xea, 0x9d, 0xf1, 0x23, 0x9e, 0x38, 0xff, 0x1a, - 0x67, 0x36, 0x5f, 0x5f, 0x17, 0x88, 0x1a, 0x43, 0x25, 0xea, 0x76, 0xb5, - 0xcd, 0xce, 0x43, 0xf8, 0x71, 0x2b, 0xdb, 0xf0, 0xcd, 0x76, 0xbd, 0x94, - 0x57, 0xdb, 0x77, 0xcd, 0xb2, 0x8f, 0xd1, 0xc0, 0xeb, 0x00, 0x61, 0x7f, - 0x66, 0xb0, 0x43, 0x6e, 0xe0, 0x9f, 0x11, 0x0e, 0x65, 0xf7, 0x4e, 0x00, - 0x74, 0xc3, 0xeb, 0xb1, 0xeb, 0x0c, 0x24, 0x5d, 0x15, 0x56, 0x16, 0x47, - 0x87, 0xcf, 0x34, 0xbe, 0x2a, 0xdd, 0x77, 0x55, 0xa4, 0x09, 0x15, 0x79, - 0x8c, 0xaa, 0xce, 0x32, 0x90, 0x9b, 0x16, 0x40, 0x94, 0x7f, 0x19, 0x27, - 0xbc, 0xbf, 0x45, 0x4b, 0xa5, 0xf0, 0xd0, 0x9e, 0x5b, 0xb9, 0x46, 0x6e, - 0x72, 0x8f, 0x49, 0x3b, 0x7a, 0xc1, 0x92, 0xb0, 0xd5, 0x25, 0x1b, 0x0b, - 0xf3, 0xd0, 0x8a, 0x47, 0x8b, 0xbe, 0xa4, 0xf9, 0x6a, 0x09, 0x84, 0x9a, - 0x5b, 0x5b, 0xea, 0xbb, 0x6f, 0xd8, 0xaf, 0xcd, 0x67, 0x9b, 0x79, 0x7c, - 0x8f, 0xcc, 0xd7, 0x5f, 0x3a, 0xc3, 0xd0, 0xb7, 0xba, 0x28, 0x83, 0x81, - 0x4a, 0x05, 0x51, 0xaf, 0xa0, 0x52, 0x34, 0xe3, 0x4f, 0xec, 0x82, 0xdc, - 0x97, 0xd8, 0x69, 0xb2, 0x0d, 0x68, 0x35, 0x87, 0x58, 0xc0, 0xcf, 0x58, - 0x0d, 0xf6, 0x6b, 0x6d, 0x2a, 0xc0, 0x72, 0xe4, 0x90, 0x8c, 0x7b, 0x45, - 0xba, 0xf1, 0x13, 0x6f, 0x8c, 0xd2, 0xdd, 0xc5, 0x8e, 0xc8, 0xec, 0xf9, - 0xfb, 0xde, 0xe5, 0xaa, 0xcb, 0xc0, 0xff, 0x77, 0x2d, 0x99, 0xb1, 0x69, - 0x7f, 0xe3, 0x38, 0x61, 0x35, 0xb6, 0x45, 0xdd, 0x73, 0x45, 0x84, 0x89, - 0x1b, 0x96, 0x7e, 0x6a, 0x1d, 0xd9, 0xe6, 0x76, 0xa8, 0x16, 0x0f, 0x42, - 0xc9, 0x41, 0xec, 0x5d, 0x25, 0x01, 0xb0, 0x45, 0xa6, 0xaa, 0x69, 0x87, - 0x11, 0xa1, 0xb8, 0x9e, 0x68, 0x48, 0x68, 0xe9, 0xb5, 0xc2, 0xff, 0x83, - 0x8f, 0x71, 0xb9, 0xd7, 0xbb, 0xae, 0x59, 0x8b, 0x1b, 0x4c, 0x44, 0xd8, - 0xe3, 0xce, 0xab, 0x88, 0xfb, 0x64, 0xd9, 0x61, 0x5a, 0x7d, 0xce, 0x3a, - 0x27, 0xb5, 0xa3, 0xfd, 0x5d, 0xa3, 0xb8, 0xa1, 0x15, 0x63, 0x0b, 0x75, - 0x39, 0xc3, 0xa4, 0xfb, 0x60, 0x53, 0xfd, 0x11, 0x21, 0x35, 0x0f, 0x19, - 0x28, 0x14, 0xcd, 0x8a, 0xcf, 0x33, 0xaa, 0x4f, 0x6a, 0x1e, 0x56, 0x87, - 0xd5, 0x6e, 0x43, 0x9b, 0xa3, 0x72, 0x95, 0x8c, 0x34, 0xa2, 0xac, 0x11, - 0x76, 0x95, 0xd7, 0xdd, 0xbf, 0x10, 0xf4, 0x0f, 0x2a, 0x64, 0xd2, 0x4d, - 0x7b, 0xc6, 0x9b, 0x7d, 0xf7, 0xa5, 0xb3, 0x84, 0x9a, 0x9a, 0x5e, 0xcf, - 0x7f, 0x95, 0x6d, 0x44, 0xd1, 0xb2, 0x19, 0xbb, 0xed, 0x37, 0x42, 0x4b, - 0x4b, 0x6d, 0xb7, 0x10, 0x02, 0x5f, 0x00, 0x1f, 0x24, 0xce, 0xb2, 0x8b, - 0x3e, 0x7d, 0xc6, 0x6e, 0x6c, 0x90, 0x75, 0xad, 0x3f, 0x9d, 0x63, 0x04, - 0x76, 0x20, 0x7a, 0x56, 0x48, 0xa1, 0x6a, 0x37, 0x74, 0xd2, 0xb7, 0x4f, - 0xa3, 0x64, 0x62, 0xaa, 0xce, 0x75, 0x8c, 0x15, 0x75, 0x79, 0xa0, 0xbd, - 0xdd, 0x01, 0x46, 0xca, 0xa0, 0x31, 0x1a, 0x16, 0x1f, 0xef, 0x8b, 0xc6, - 0x54, 0x57, 0xfa, 0x6e, 0x43, 0xdf, 0xb0, 0x99, 0xed, 0xa4, 0xcb, 0xeb, - 0x91, 0x35, 0x14, 0x0c, 0xa9, 0x1d, 0xb5, 0xa9, 0x32, 0x99, 0xe3, 0x89, - 0x74, 0xaa, 0xa4, 0x65, 0x1e, 0x82, 0x47, 0xfa, 0x37, 0x23, 0xe5, 0x86, - 0xb6, 0xc0, 0xb6, 0x89, 0x9a, 0xd9, 0xae, 0x29, 0x39, 0x7b, 0x66, 0xc7, - 0x5b, 0x02, 0x08, 0x86, 0xd4, 0xf0, 0x75, 0xc2, 0x05, 0x86, 0xc3, 0x75, - 0xd2, 0x2a, 0x1e, 0xec, 0x6e, 0x75, 0x29, 0x58, 0x8c, 0x25, 0x3b, 0x95, - 0x21, 0xde, 0x42, 0xd5, 0xb7, 0x15, 0x30, 0x09, 0x49, 0x78, 0x55, 0xd5, - 0xf2, 0x30, 0x80, 0x93, 0x8a, 0xce, 0x84, 0x27, 0xdb, 0x4a, 0x09, 0x30, - 0x0c, 0x7f, 0x4d, 0xd1, 0x0f, 0xda, 0x66, 0x58, 0xe1, 0x01, 0xfd, 0x75, - 0x83, 0xf5, 0x39, 0x2e, 0xe2, 0x6b, 0xde, 0xff, 0x20, 0x8a, 0xf7, 0xcc, - 0x81, 0x8e, 0x99, 0xb4, 0xeb, 0x76, 0x74, 0x38, 0x2b, 0xe0, 0x6d, 0x61, - 0x8f, 0x39, 0x59, 0x10, 0x7d, 0xb5, 0xd3, 0x14, 0x96, 0x04, 0x1d, 0x22, - 0x89, 0xef, 0x15, 0x7c, 0x28, 0x5a, 0xd6, 0x8d, 0xf3, 0xb7, 0x6a, 0x9a, - 0xce, 0x21, 0x77, 0xfd, 0x4f, 0x22, 0x26, 0x28, 0xb8, 0xb5, 0xb3, 0x73, - 0xfd, 0x2a, 0x7b, 0x42, 0x26, 0x77, 0x41, 0x93, 0xed, 0xf9, 0x8f, 0xa9, - 0x92, 0xd5, 0x9f, 0x2e, 0x60, 0xec, 0x60, 0x98, 0xf1, 0xd5, 0x11, 0xe2, - 0xe0, 0xd7, 0x45, 0xa7, 0xe4, 0xf2, 0x82, 0x61, 0x2f, 0x41, 0x1b, 0xd9, - 0x8e, 0x78, 0xd5, 0x6b, 0x68, 0x74, 0xf0, 0xc3, 0x83, 0x01, 0x16, 0x60, - 0x6e, 0x34, 0x88, 0x45, 0x8a, 0x86, 0x44, 0x5b, 0xa5, 0xa8, 0x55, 0xbc, - 0xfa, 0x8f, 0xbd, 0x93, 0x95, 0x3f, 0xab, 0x19, 0x54, 0x8f, 0x06, 0x8e, - 0xca, 0x0b, 0x4a, 0x18, 0x3f, 0x7a, 0x9c, 0x3f, 0xe6, 0xbe, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x81, 0x32, 0x41, 0x46, - 0x59, 0x26, 0xf4, 0xef, 0x93, 0x9f, 0x04, 0xc2, 0x67, 0x13, 0x32, 0x45, - 0xc0, 0x79, 0x70, 0x27, 0x21, 0x2b, 0xaf, 0x35, 0xf3, 0xc4, 0x88, 0x52, - 0x28, 0xea, 0xca, 0x8a, 0x08, 0x01, 0x6f, 0x61, 0xab, 0x10, 0xa3, 0xf0, - 0x6b, 0x3b, 0x54, 0x64, 0xf1, 0x63, 0x83, 0x38, 0x2b, 0x26, 0x18, 0x5a, - 0x67, 0xc4, 0x67, 0x38, 0x3f, 0x2c, 0x9a, 0xc9, 0x48, 0x33, 0x77, 0xb4, - 0xb2, 0xc2, 0xc7, 0x08, 0x21, 0x5e, 0xc4, 0x19, 0x59, 0xe1, 0xfa, 0x32, - 0xa4, 0x4c, 0x3e, 0xba, 0x65, 0x92, 0x98, 0x39, 0x71, 0x2f, 0x99, 0x08, - 0xf8, 0xb3, 0x7a, 0x03, 0x53, 0xd7, 0x68, 0xb2, 0x5e, 0xb0, 0xef, 0xe0, - 0x1e, 0x7d, 0xb2, 0x23, 0x5d, 0x2b, 0xd7, 0x09, 0xa6, 0x78, 0xa4, 0x7c, - 0x08, 0xed, 0x8a, 0xf6, 0x96, 0xa0, 0x10, 0x17, 0x62, 0x8b, 0x8a, 0xa0, - 0xac, 0x22, 0x67, 0x02, 0xa8, 0x66, 0x1a, 0xb5, 0x02, 0xde, 0xa5, 0xfa, - 0x69, 0x29, 0x5f, 0x24, 0x89, 0x46, 0x68, 0xd6, 0x51, 0x2a, 0xfe, 0x88, - 0xf0, 0x40, 0xde, 0xd1, 0x12, 0x2e, 0xed, 0x13, 0x7b, 0x49, 0xf6, 0xe1, - 0x7a, 0xcf, 0x61, 0xcb, 0x70, 0x9d, 0xaa, 0x51, 0x07, 0xc2, 0x54, 0x76, - 0x89, 0x29, 0x94, 0x29, 0x8b, 0x0e, 0xf5, 0xe8, 0x81, 0xc7, 0xdb, 0x59, - 0x1e, 0x75, 0xda, 0x6a, 0x94, 0x18, 0x16, 0xae, 0xbb, 0x43, 0x87, 0x56, - 0x66, 0x8b, 0x84, 0xe9, 0xa9, 0xd0, 0xd2, 0x8f, 0x5b, 0xbf, 0x1d, 0x24, - 0x3a, 0xb7, 0x64, 0xff, 0xe9, 0x22, 0x21, 0x65, 0xaf, 0x2b, 0x45, 0x8d, - 0x28, 0xea, 0xbc, 0x07, 0x10, 0x6e, 0xfb, 0x4d, 0x6f, 0x35, 0xe5, 0xeb, - 0x5d, 0x29, 0x72, 0xe1, 0x94, 0xad, 0xed, 0x25, 0xd7, 0x39, 0x63, 0x32, - 0x37, 0x0b, 0xb2, 0xd7, 0x54, 0x1f, 0xe4, 0x0d, 0xe7, 0xb3, 0xd1, 0xa6, - 0x2a, 0xcf, 0x8e, 0x97, 0xf1, 0xa8, 0xfc, 0xb1, 0x61, 0xdc, 0xb4, 0x8f, - 0x29, 0xa2, 0x68, 0x4a, 0xe6, 0x2f, 0x8a, 0x69, 0x2c, 0xa1, 0x1d, 0xe2, - 0x9e, 0x65, 0x71, 0xb7, 0x83, 0xef, 0x63, 0xf5, 0x36, 0xdc, 0xa0, 0x94, - 0x5a, 0x45, 0x8a, 0x85, 0x5e, 0x28, 0x86, 0x21, 0xd2, 0xbf, 0x7a, 0x2f, - 0x76, 0x1c, 0x2a, 0x15, 0xb2, 0xe8, 0xaf, 0x63, 0x37, 0xbe, 0xd8, 0x0a, - 0xef, 0x54, 0xee, 0xe6, 0xd9, 0xb3, 0xdb, 0x41, 0x55, 0xba, 0xd8, 0x14, - 0x7c, 0x10, 0x61, 0x06, 0x40, 0x45, 0x69, 0x37, 0x60, 0xf7, 0x6a, 0x7a, - 0x23, 0x70, 0x30, 0x57, 0x3e, 0xe5, 0x12, 0x24, 0xbc, 0x5e, 0x82, 0x89, - 0xd8, 0x37, 0xc9, 0x33, 0xb9, 0x38, 0xa5, 0xba, 0xed, 0xdd, 0x93, 0x58, - 0x81, 0x15, 0xec, 0x15, 0x70, 0x2f, 0x30, 0xfa, 0xaf, 0xf7, 0xf5, 0xcb, - 0x41, 0x74, 0xea, 0xc0, 0x91, 0xbe, 0x53, 0x4c, 0xc2, 0x74, 0x1b, 0x5b, - 0x8c, 0x74, 0xd8, 0xc3, 0x4a, 0x12, 0xaa, 0x57, 0xd6, 0x61, 0xb1, 0xb8, - 0x81, 0x5d, 0x81, 0x37, 0x1e, 0x5b, 0x3d, 0x5a, 0xbc, 0xa6, 0xb2, 0x27, - 0xe3, 0x01, 0x4c, 0xf0, 0xad, 0x7b, 0xdf, 0x50, 0xf9, 0xd7, 0xb7, 0xcc, - 0xa8, 0x5c, 0x3d, 0x9a, 0xb7, 0x60, 0x3e, 0x63, 0x3f, 0x6a, 0x08, 0x0b, - 0x82, 0xdc, 0x3e, 0xfa, 0x24, 0x33, 0xd3, 0x01, 0xbf, 0xef, 0xeb, 0x52, - 0x3f, 0x91, 0x61, 0xda, 0xe2, 0x26, 0x10, 0xdf, 0xe4, 0x9b, 0x77, 0x91, - 0x22, 0xc5, 0x4e, 0x9c, 0x0b, 0x32, 0xff, 0x27, 0x85, 0x85, 0x0c, 0x99, - 0x50, 0x8f, 0xad, 0x5d, 0x06, 0x18, 0x52, 0xb4, 0x64, 0x09, 0xc4, 0xa4, - 0x84, 0xd4, 0x81, 0x07, 0x0a, 0x97, 0x55, 0xf8, 0x96, 0x52, 0xb2, 0x9a, - 0xf4, 0x06, 0x2c, 0x9a, 0x3b, 0x8b, 0xaa, 0x67, 0x18, 0x3a, 0xee, 0xbc, - 0xca, 0x8f, 0x46, 0xf6, 0x4a, 0x33, 0x5b, 0x56, 0x09, 0xb2, 0x72, 0x87, - 0xdb, 0xbb, 0x57, 0x67, 0x53, 0x82, 0x77, 0x31, 0x66, 0xbb, 0xf1, 0x33, - 0x6d, 0x55, 0x82, 0xaa, 0x80, 0xd4, 0x4d, 0xb8, 0xab, 0xbd, 0x2a, 0xda, - 0x10, 0x3a, 0xc8, 0xf0, 0x14, 0x1e, 0xcb, 0x8e, 0x76, 0x6c, 0xc8, 0x74, - 0x05, 0xb3, 0x51, 0xbd, 0x63, 0x06, 0x69, 0x05, 0x2a, 0x21, 0xd6, 0x2f, - 0xe4, 0x38, 0xae, 0xf8, 0xd4, 0xe9, 0xa7, 0xe8, 0xc8, 0x5a, 0x65, 0x7d, - 0x54, 0x34, 0x33, 0x0d, 0xf6, 0x07, 0xd6, 0x8c, 0xe5, 0x72, 0x9b, 0xfb, - 0x60, 0x49, 0xd2, 0xaf, 0xb4, 0x17, 0xc4, 0x74, 0x8d, 0xe5, 0x54, 0xda, - 0x96, 0x56, 0x7d, 0x97, 0x62, 0xe8, 0xec, 0x0d, 0x2b, 0x02, 0x2e, 0x59, - 0xf8, 0xa1, 0x06, 0x6a, 0xb6, 0x3e, 0x15, 0xeb, 0x64, 0x1a, 0x48, 0x3d, - 0x53, 0x2c, 0x42, 0x3b, 0x97, 0xa1, 0x3f, 0x47, 0x8b, 0x74, 0x87, 0x8b, - 0x96, 0x63, 0x08, 0x4c, 0x99, 0x38, 0x5a, 0xb6, 0x93, 0xa8, 0xcc, 0xee, - 0x62, 0x3a, 0x00, 0x6d, 0x5c, 0xab, 0x77, 0x3c, 0x46, 0xae, 0x6e, 0xeb, - 0xf1, 0xf9, 0x63, 0xf1, 0xa2, 0x31, 0x21, 0x38, 0xc3, 0x4f, 0xe2, 0x3a, - 0x33, 0x7f, 0xe7, 0xc6, 0x69, 0xd5, 0x1c, 0x7e, 0x5b, 0x4f, 0xb1, 0x50, - 0x3b, 0xbe, 0x31, 0xa7, 0x42, 0xa3, 0x97, 0x7b, 0xe3, 0x90, 0xd0, 0x07, - 0xfd, 0x05, 0xb9, 0xf2, 0x47, 0xc4, 0xc8, 0xdd, 0x1c, 0x3c, 0xa4, 0x22, - 0x96, 0x04, 0xca, 0x28, 0x17, 0xcc, 0x5c, 0x49, 0x7e, 0xc6, 0x93, 0x98, - 0xd3, 0x8b, 0xd2, 0xf6, 0x4a, 0xb6, 0xbe, 0x8d, 0xa2, 0xdd, 0xb6, 0x7c, - 0x66, 0x0c, 0x29, 0xcb, 0x1d, 0x98, 0xf6, 0xe4, 0xe5, 0x30, 0x4c, 0x84, - 0xbf, 0x6f, 0x71, 0x4e, 0xc2, 0x12, 0x9f, 0x35, 0xd6, 0xf8, 0xc6, 0x30, - 0xe9, 0x9e, 0x1a, 0x8a, 0x2f, 0xd1, 0x96, 0xb3, 0x3c, 0x0f, 0xf5, 0x78, - 0xa7, 0xe0, 0xbd, 0x4b, 0xe0, 0xd8, 0x3d, 0x57, 0xa5, 0x44, 0xa0, 0xd9, - 0x10, 0x79, 0xd2, 0x10, 0x50, 0xc7, 0x77, 0x73, 0x09, 0xf8, 0xb4, 0xcf, - 0x66, 0xe3, 0x0c, 0xfb, 0x96, 0xf8, 0x52, 0xb3, 0x7e, 0x44, 0xf0, 0x03, - 0x54, 0xd4, 0xa2, 0x57, 0x38, 0x8a, 0x96, 0xfc, 0x7c, 0x4c, 0x9f, 0x3a, - 0xf2, 0xa2, 0x48, 0xbb, 0x3e, 0xd1, 0x11, 0x2c, 0xab, 0xdf, 0x53, 0x96, - 0xac, 0x58, 0x33, 0xb9, 0xdd, 0xd2, 0x4f, 0x8a, 0x0a, 0x89, 0x0e, 0xd3, - 0x6f, 0x58, 0x8c, 0xa1, 0x0a, 0x0b, 0xa7, 0xd7, 0x1f, 0x0a, 0x70, 0xe3, - 0x43, 0x12, 0x56, 0xb8, 0x6c, 0xf8, 0x75, 0x4e, 0x2b, 0xb0, 0x17, 0x29, - 0xe4, 0x95, 0x85, 0xd8, 0x85, 0x95, 0x63, 0x55, 0xa8, 0x82, 0xf0, 0xe7, - 0x7d, 0xf3, 0xf1, 0x78, 0x66, 0xd1, 0x92, 0x71, 0x99, 0xad, 0x30, 0x94, - 0xe9, 0x54, 0x2c, 0xe1, 0x57, 0xf3, 0x6a, 0xe6, 0x0c, 0x5e, 0xc7, 0x58, - 0xba, 0xb7, 0x61, 0xd3, 0x74, 0x72, 0x96, 0x06, 0x0b, 0x01, 0x3d, 0xc2, - 0xa1, 0xb4, 0x38, 0x81, 0x19, 0x44, 0xbc, 0x84, 0x52, 0x22, 0xc9, 0x67, - 0x81, 0x99, 0xfb, 0x0a, 0xc2, 0xff, 0x50, 0x67, 0xbe, 0x38, 0x5e, 0x13, - 0x16, 0x60, 0x83, 0x35, 0xb9, 0x2f, 0xa9, 0x55, 0xbb, 0x30, 0x6b, 0x19, - 0xfc, 0x2a, 0x40, 0x24, 0x74, 0x20, 0x57, 0x78, 0xb9, 0x55, 0xb7, 0x70, - 0x86, 0x65, 0x43, 0x1c, 0x76, 0x2e, 0x91, 0x83, 0x5e, 0x33, 0xc2, 0xd4, - 0xcc, 0xb5, 0x1c, 0x45, 0xaf, 0xa3, 0x87, 0x95, 0x9b, 0x77, 0x50, 0x44, - 0x7e, 0xdd, 0xca, 0x3f, 0x51, 0x21, 0xae, 0xf2, 0x15, 0xa9, 0x32, 0x94, - 0xca, 0xde, 0x3b, 0x97, 0x13, 0x6b, 0xff, 0xe0, 0x79, 0x39, 0x40, 0xf0, - 0x66, 0x7d, 0x5e, 0xef, 0xec, 0x0a, 0x35, 0xd2, 0x0d, 0x09, 0x19, 0x13, - 0xf2, 0xc2, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xdc, 0x07, 0x2e, 0x46, 0xab, 0x4d, 0x6d, 0xf7, 0x24, 0xba, 0x02, 0xe3, - 0xc5, 0xe3, 0xed, 0x64, 0xc6, 0x77, 0x5a, 0x14, 0xae, 0x38, 0x52, 0x8c, - 0x16, 0x2c, 0x52, 0x0e, 0xf6, 0x65, 0x99, 0xcc, 0xf6, 0x9f, 0x77, 0xcc, - 0x2e, 0xaf, 0x14, 0xd1, 0xf0, 0x0f, 0xa7, 0x3e, 0x5b, 0x74, 0xff, 0xb9, - 0xd3, 0x30, 0x02, 0x5e, 0x52, 0xc8, 0x6f, 0x57, 0xef, 0x28, 0xf5, 0xfa, - 0x9e, 0x70, 0x00, 0xfc, 0x3e, 0xc3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xaa, 0x9f, 0x86, 0xb0, 0x6d, 0xa1, 0x0c, 0xfa, - 0xef, 0xb3, 0x6a, 0x50, 0xa6, 0xfe, 0xff, 0xa9, 0x61, 0x0b, 0x18, 0x72, - 0xee, 0xc6, 0xcd, 0x3a, 0x34, 0x5e, 0xa8, 0x81, 0x31, 0x54, 0x25, 0x05, - 0xc1, 0xd9, 0x66, 0x3d, 0x17, 0xbb, 0x03, 0x21, 0x07, 0x69, 0x3a, 0x37, - 0xe8, 0xd4, 0x6a, 0x68, 0xe1, 0xa3, 0x19, 0x5a, 0x8d, 0x14, 0x11, 0x09, - 0xef, 0xae, 0xfe, 0x94, 0x19, 0x8a, 0xe4, 0xb9, 0x6e, 0xe8, 0xfa, 0x12, - 0x2a, 0x5d, 0x00, 0x29, 0x27, 0x6d, 0x5a, 0xa5, 0x09, 0x34, 0x79, 0x2b, - 0xa8, 0xcc, 0x42, 0xb4, 0xde, 0xe0, 0x91, 0xb9, 0x06, 0x0c, 0x11, 0x17, - 0x25, 0x7a, 0x35, 0x57, 0x51, 0x40, 0xf3, 0xc7, 0xc6, 0x4a, 0x69, 0x98, - 0x2b, 0x2b, 0x3e, 0x5d, 0x32, 0xd8, 0x8f, 0xb0, 0x1d, 0xee, 0x77, 0xe3, - 0xaf, 0x4f, 0x71, 0x05, 0x04, 0xd2, 0xff, 0x51, 0xed, 0xa4, 0x69, 0x50, - 0x24, 0x2a, 0xe5, 0xaa, 0xbb, 0xc6, 0x7a, 0x7f, 0xb2, 0xdf, 0x1d, 0xc2, - 0x02, 0x2e, 0x52, 0xd1, 0xd9, 0x5b, 0xe7, 0x6c, 0x50, 0x31, 0x4e, 0xdf, - 0x8e, 0x3f, 0x37, 0xfc, 0xf5, 0x34, 0x0e, 0xdb, 0x4c, 0x5d, 0x7d, 0xc8, - 0xe4, 0x72, 0x40, 0xcb, 0x95, 0xa5, 0x41, 0xeb, 0x78, 0x5f, 0x64, 0x20, - 0x55, 0x19, 0xc7, 0xf9, 0x9c, 0x71, 0x40, 0x8f, 0xcc, 0x2d, 0x86, 0xc0, - 0xf4, 0x36, 0x2b, 0x0e, 0x28, 0xb4, 0xad, 0x1b, 0xde, 0x60, 0x67, 0x03, - 0x0f, 0x7c, 0x18, 0xd9, 0xc3, 0x73, 0x67, 0x0d, 0x44, 0x3d, 0xbe, 0x7c, - 0xcf, 0x96, 0x22, 0x0b, 0x0e, 0x3a, 0x0b, 0xcf, 0x04, 0x95, 0x92, 0x7d, - 0x4b, 0xa2, 0x6a, 0x0b, 0x47, 0x72, 0x73, 0xa8, 0x9b, 0x96, 0x3d, 0xc6, - 0x03, 0x34, 0xb1, 0x69, 0xc2, 0x50, 0x60, 0x89, 0x8c, 0x55, 0x8f, 0x8e, - 0x74, 0xa8, 0x9e, 0x25, 0xe4, 0x0e, 0x73, 0xef, 0x4f, 0x51, 0xbe, 0xed, - 0x5c, 0x14, 0xd3, 0xfa, 0x94, 0x58, 0x8d, 0x5c, 0xa0, 0xb1, 0xfc, 0x37, - 0x6e, 0x9c, 0x9e, 0x61, 0xe5, 0x12, 0x13, 0xb2, 0x88, 0xc6, 0xcf, 0x60, - 0x3f, 0x0d, 0x51, 0x33, 0x22, 0xfa, 0xfb, 0x2d, 0x2b, 0x8d, 0x43, 0x9b, - 0x3d, 0x1e, 0x88, 0x24, 0x50, 0x78, 0xf7, 0x7e, 0x45, 0xb1, 0x0f, 0xa9, - 0xe6, 0x77, 0xf8, 0x78, 0xff, 0x57, 0x6a, 0x05, 0x06, 0x0c, 0x7e, 0x1e, - 0x7f, 0xe9, 0x90, 0xe8, 0x61, 0x68, 0xbc, 0x9e, 0xc4, 0xe5, 0x06, 0x04, - 0x76, 0xcc, 0x01, 0x57, 0x1a, 0x55, 0x9e, 0x45, 0x26, 0xd6, 0xd8, 0xc2, - 0x50, 0x25, 0xfc, 0x72, 0x4e, 0x18, 0xbe, 0xf2, 0x2f, 0xc0, 0x1b, 0xc8, - 0x14, 0xeb, 0x24, 0xda, 0x15, 0x0a, 0x83, 0x38, 0xc5, 0xdd, 0xc9, 0xd7, - 0x12, 0x35, 0x55, 0xdf, 0x2c, 0x23, 0xea, 0x17, 0xca, 0xbf, 0x18, 0xc9, - 0x80, 0x63, 0x4b, 0x77, 0x8b, 0x17, 0x01, 0x05, 0x1b, 0xa3, 0x0b, 0x0f, - 0xdd, 0xc6, 0xe0, 0xdf, 0xc9, 0xa6, 0x8c, 0x50, 0x95, 0x8d, 0x6c, 0x96, - 0x67, 0xff, 0x88, 0x38, 0x3b, 0x76, 0x72, 0x11, 0x35, 0xa0, 0x1c, 0xc8, - 0x96, 0x9c, 0xe5, 0x90, 0x79, 0x0e, 0x62, 0x57, 0x00, 0xd9, 0x57, 0xf8, - 0xa4, 0xc2, 0xc2, 0x0a, 0x17, 0x8e, 0xd7, 0x03, 0x6d, 0x4d, 0x14, 0xb6, - 0x96, 0x8a, 0x76, 0x67, 0x58, 0xce, 0x9c, 0xb3, 0x10, 0x49, 0x06, 0xeb, - 0x56, 0x43, 0x40, 0xcb, 0xd4, 0xd7, 0x59, 0x42, 0xa4, 0xd7, 0x21, 0x6a, - 0x51, 0x3d, 0x1c, 0x54, 0xd7, 0xd6, 0xa2, 0xcf, 0xf8, 0xf6, 0x72, 0x35, - 0x04, 0xa6, 0xe3, 0x53, 0xca, 0xc5, 0x62, 0xee, 0xa9, 0xc3, 0x6d, 0x1b, - 0xc4, 0xc5, 0xd9, 0xa7, 0x37, 0xc2, 0x04, 0x01, 0xc9, 0x4a, 0x2e, 0x26, - 0xdd, 0x12, 0x6e, 0x41, 0x64, 0xb4, 0xe8, 0xe8, 0xc7, 0xf8, 0xab, 0x8a, - 0xab, 0x1d, 0x7f, 0x2d, 0x58, 0xc2, 0xc4, 0xf0, 0x5d, 0x11, 0x35, 0x52, - 0x88, 0xbc, 0x0f, 0x44, 0x6e, 0x91, 0x1e, 0x87, 0xb4, 0xb1, 0x91, 0x52, - 0x32, 0xe4, 0x38, 0x6d, 0x5e, 0x8d, 0x30, 0xf0, 0xbc, 0xc3, 0x15, 0x80, - 0x47, 0x36, 0x35, 0xb0, 0x93, 0xf3, 0xc4, 0x82, 0xc7, 0x73, 0xc1, 0x67, - 0x0c, 0x7a, 0x31, 0x36, 0xbc, 0x73, 0x67, 0x66, 0xae, 0x48, 0x82, 0x27, - 0x6e, 0x14, 0xd0, 0xd5, 0x12, 0x10, 0xce, 0x5e, 0x37, 0xcd, 0x7e, 0xa5, - 0xcb, 0xff, 0x91, 0xf0, 0x62, 0xdb, 0x95, 0x74, 0x0c, 0x8c, 0x1e, 0x78, - 0x11, 0x02, 0xb3, 0x02, 0x0b, 0x31, 0xe7, 0x4e, 0x8b, 0x58, 0x6a, 0xde, - 0x20, 0x93, 0x8b, 0x8e, 0x62, 0x03, 0x24, 0xc9, 0xca, 0xf8, 0x44, 0x1d, - 0x0c, 0x1b, 0xd8, 0x5d, 0xcc, 0xe2, 0x8e, 0x02, 0xc6, 0x5c, 0x06, 0x45, - 0xe6, 0x94, 0x8f, 0xa2, 0x3e, 0xf5, 0xe9, 0xf5, 0x88, 0x87, 0xb2, 0x84, - 0x1e, 0xb6, 0xb6, 0xfc, 0x9f, 0x8e, 0x79, 0xf5, 0x4b, 0x24, 0x81, 0x3e, - 0x5d, 0xf4, 0x10, 0x6e, 0xdd, 0x8c, 0x8c, 0xae, 0xc6, 0x2c, 0x26, 0xb2, - 0xfc, 0xf3, 0x99, 0xe8, 0x8c, 0x65, 0x5d, 0x6c, 0xa8, 0x1d, 0x6f, 0x1e, - 0x32, 0x0a, 0xee, 0x87, 0xf6, 0xe1, 0xdd, 0x5e, 0x7f, 0x7a, 0x90, 0x8c, - 0x3f, 0xe8, 0x47, 0x95, 0x9b, 0xc8, 0x2c, 0x49, 0xc9, 0xe4, 0x2d, 0xea, - 0x58, 0xfc, 0x29, 0x1a, 0xb7, 0xa1, 0xf9, 0xb8, 0x84, 0x41, 0xa0, 0xf1, - 0x77, 0x83, 0x56, 0x73, 0x86, 0xea, 0xf4, 0xf5, 0x2a, 0xa6, 0x6b, 0x00, - 0x64, 0x39, 0x08, 0x8f, 0xf0, 0x22, 0x1a, 0x4c, 0xf2, 0x5a, 0xd0, 0xaa, - 0x39, 0xae, 0x8a, 0xbc, 0x03, 0x99, 0xf7, 0xcc, 0x80, 0xdf, 0x2b, 0x85, - 0xbe, 0x1a, 0x97, 0x28, 0x63, 0x04, 0x72, 0x75, 0x75, 0xb4, 0x9c, 0xd3, - 0x17, 0xcc, 0x1e, 0xa1, 0xd2, 0x47, 0x18, 0x45, 0xad, 0xb4, 0x0a, 0x32, - 0x31, 0x36, 0x64, 0x48, 0x3f, 0x7b, 0x4b, 0xc0, 0xd6, 0x78, 0x46, 0xaa, - 0x90, 0x89, 0xf9, 0x36, 0x3d, 0xb4, 0xb3, 0x50, 0x51, 0xd9, 0x55, 0x6f, - 0xa9, 0xe7, 0x25, 0xaf, 0xa0, 0xca, 0x9d, 0x45, 0x83, 0xc3, 0x0b, 0x2a, - 0x0c, 0xf9, 0x3f, 0xe4, 0x08, 0xf4, 0xbd, 0x23, 0x45, 0x85, 0xcf, 0x41, - 0x93, 0xd3, 0x21, 0x5f, 0x53, 0xa2, 0x5b, 0xa9, 0xf5, 0xe9, 0x8f, 0x2a, - 0x2d, 0x53, 0x3c, 0x36, 0x17, 0xce, 0x37, 0x35, 0x3e, 0x9e, 0x6b, 0xbc, - 0xba, 0xaa, 0xa5, 0x61, 0x79, 0x98, 0x8e, 0xbd, 0x19, 0xf4, 0x5f, 0xa9, - 0xb8, 0x96, 0xa2, 0xce, 0x32, 0x00, 0xab, 0x51, 0xcb, 0xfa, 0x30, 0x3a, - 0x83, 0x92, 0x91, 0xad, 0x08, 0x61, 0x62, 0x51, 0x7f, 0x19, 0xa9, 0x2a, - 0x84, 0xf2, 0xab, 0x7e, 0x5e, 0xa7, 0x5a, 0x54, 0x7f, 0x68, 0x2a, 0x7b, - 0x4f, 0xde, 0x45, 0x1d, 0xef, 0x73, 0x5f, 0xc0, 0x40, 0x6e, 0xec, 0x6c, - 0xe9, 0xa5, 0x6b, 0x46, 0x54, 0x7c, 0x24, 0x8b, 0xa4, 0xe5, 0xb4, 0x82, - 0x31, 0x1f, 0x3e, 0x79, 0x2e, 0x21, 0x8c, 0xf1, 0xbd, 0xad, 0x7c, 0x28, - 0xcc, 0xbd, 0x58, 0x72, 0xe9, 0x6a, 0x04, 0x56, 0x67, 0x0f, 0x62, 0x98, - 0x5a, 0x97, 0x4b, 0xe2, 0x67, 0x70, 0xbb, 0x17, 0xb1, 0x84, 0x5b, 0xd4, - 0x6e, 0xab, 0x90, 0x29, 0x20, 0x93, 0x34, 0xa8, 0x03, 0x0f, 0xed, 0x1a, - 0xf0, 0x1b, 0x92, 0x87, 0x43, 0xa5, 0x6a, 0x1c, 0xdc, 0xd7, 0x22, 0x68, - 0x83, 0x98, 0x74, 0x2a, 0x4c, 0x51, 0xef, 0x71, 0x19, 0xd5, 0x3d, 0x05, - 0x19, 0x61, 0xb2, 0x52, 0xa8, 0x6e, 0xda, 0x72, 0x51, 0x66, 0x9f, 0xf0, - 0x12, 0xf6, 0x18, 0x60, 0xcc, 0xd7, 0x2f, 0x2e, 0x83, 0x14, 0x09, 0xdb, - 0x55, 0x1c, 0xf2, 0xaf, 0xfd, 0xa4, 0x40, 0xf1, 0x4a, 0xc7, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x9c, 0x52, 0xff, 0x48, - 0x06, 0x61, 0x76, 0x6d, 0xd7, 0x44, 0xb1, 0x0c, 0x32, 0x62, 0x15, 0xa1, - 0xc3, 0x97, 0x03, 0xdd, 0xed, 0x20, 0x3c, 0x3a, 0x09, 0x16, 0xe5, 0x7d, - 0x8c, 0xf9, 0x7b, 0x22, 0x5e, 0x3a, 0xdd, 0xf0, 0xc6, 0xf0, 0x3a, 0xd4, - 0x94, 0x85, 0x1c, 0x60, 0x74, 0x91, 0xa3, 0xe2, 0x8a, 0xe5, 0x3e, 0xd4, - 0x95, 0x28, 0x8b, 0x1a, 0x7b, 0xbe, 0x07, 0xc0, 0xe3, 0x6b, 0xb9, 0x85, - 0x82, 0x0b, 0x24, 0xba, 0x1c, 0xfc, 0xc0, 0x0a, 0x21, 0x33, 0xad, 0x00, - 0x19, 0xce, 0xb5, 0x8f, 0x73, 0x05, 0xf1, 0xac, 0x03, 0xbe, 0x1f, 0x22, - 0xd5, 0x32, 0x5e, 0x50, 0xe3, 0xe0, 0x62, 0x26, 0xf4, 0xb0, 0x85, 0xd8, - 0xf7, 0xa7, 0xf4, 0xa7, 0xff, 0x10, 0xb8, 0xbc, 0xe0, 0x3e, 0x4d, 0xcb, - 0x37, 0x74, 0xcc, 0x85, 0xed, 0xa0, 0x34, 0x6c, 0xfa, 0x37, 0x84, 0x6a, - 0x94, 0x55, 0x3b, 0x1e, 0x14, 0xab, 0x26, 0x7b, 0x3e, 0xac, 0xc3, 0x79, - 0xcd, 0x1b, 0x00, 0x02, 0xb3, 0x01, 0xc3, 0x10, 0xdd, 0x56, 0x7d, 0x0e, - 0x69, 0x39, 0x3c, 0x17, 0xa3, 0xae, 0x9c, 0x2d, 0xc7, 0x5a, 0x0b, 0x7c, - 0xd0, 0xac, 0xa1, 0x91, 0x6a, 0x6d, 0xc0, 0x3f, 0x98, 0xf1, 0x21, 0xf5, - 0xa5, 0x7c, 0xbc, 0x70, 0x0d, 0x7b, 0x2f, 0x0d, 0x5a, 0xa5, 0x4a, 0x5a, - 0xff, 0x51, 0xbf, 0x7f, 0xb5, 0x4f, 0x2c, 0xba, 0xa9, 0x46, 0x81, 0x6b, - 0xac, 0xc6, 0x62, 0x2d, 0xd7, 0xb5, 0x04, 0x5f, 0xd4, 0x5f, 0x1f, 0x6b, - 0x11, 0x7d, 0xe3, 0x58, 0x1f, 0xb5, 0xbf, 0x16, 0x43, 0x88, 0x05, 0xf5, - 0xa4, 0x7b, 0xb5, 0x0e, 0xf4, 0x01, 0xb6, 0x90, 0x69, 0x52, 0x0a, 0x5e, - 0x9b, 0x87, 0x51, 0x5e, 0xd5, 0xed, 0x2c, 0xcc, 0x58, 0xad, 0xe6, 0x77, - 0xa2, 0xc5, 0x7c, 0x1e, 0xc5, 0x92, 0xbe, 0xed, 0x3a, 0x9a, 0x97, 0xed, - 0x56, 0xc8, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x16, 0xe8, 0x24, 0xe3, 0x82, 0x36, 0x8e, 0x50, 0x45, 0xbe, 0xc6, 0x10, - 0x02, 0xb9, 0x6d, 0xf9, 0xed, 0x8f, 0x64, 0x35, 0x4d, 0x2c, 0x9f, 0x99, - 0xdc, 0xee, 0xfa, 0x63, 0x99, 0xc4, 0xb8, 0x3d, 0x77, 0xea, 0xda, 0xd5, - 0x95, 0x8b, 0x8e, 0x76, 0x02, 0x9c, 0x62, 0xa0, 0xad, 0xfe, 0x80, 0x61, - 0x72, 0x59, 0xd6, 0x9f, 0x16, 0x2e, 0x09, 0x71, 0xb8, 0xd7, 0x65, 0x25, - 0xc2, 0x5b, 0x40, 0x67, 0x8e, 0xd6, 0xf8, 0xdf, 0x67, 0x29, 0x19, 0xa2, - 0xa6, 0x07, 0xf3, 0xc8, 0x91, 0x7d, 0xf2, 0x50, 0x71, 0xba, 0x5c, 0x2d, - 0xa7, 0xae, 0xc4, 0xd5, 0xeb, 0xb9, 0x0d, 0x2d, 0x23, 0xe5, 0x8c, 0x65, - 0xf5, 0xf8, 0x97, 0x69, 0xde, 0x25, 0x6f, 0xea, 0x12, 0x72, 0x3e, 0xb9, - 0xa7, 0x8d, 0xcf, 0xa5, 0x66, 0xee, 0x4e, 0x2e, 0x66, 0x6b, 0xec, 0x77, - 0x7f, 0x53, 0xdc, 0x29, 0x73, 0x5e, 0xe9, 0x2f, 0x79, 0xac, 0x8d, 0x0f, - 0x44, 0x09, 0x5d, 0x25, 0x1d, 0x78, 0xb6, 0xe9, 0xd0, 0xfa, 0x8f, 0x5f, - 0x9c, 0xf0, 0xe0, 0xfc, 0x62, 0x9f, 0x52, 0x6b, 0x5b, 0x8e, 0x3f, 0xdf, - 0xb4, 0xf1, 0xdf, 0x35, 0xd0, 0x8f, 0x5a, 0xc9, 0x1f, 0x08, 0x86, 0xaa, - 0x5a, 0x9e, 0xe8, 0xb0, 0xaa, 0xd4, 0xcd, 0x2a, 0x5b, 0x4f, 0x7f, 0x39, - 0x9f, 0x7f, 0x21, 0xf2, 0xfd, 0x05, 0x96, 0x53, 0x09, 0xfd, 0x36, 0x4c, - 0xcd, 0x98, 0x74, 0xf5, 0xbd, 0xcd, 0x9e, 0x14, 0x15, 0x05, 0xb9, 0x3d, - 0x5f, 0x8a, 0x02, 0x86, 0x10, 0xd7, 0xd4, 0x01, 0x20, 0xd9, 0x8c, 0x65, - 0x7d, 0x9d, 0x39, 0x25, 0xbc, 0xce, 0x1a, 0xb1, 0x76, 0x92, 0xc3, 0x03, - 0xed, 0xa2, 0x41, 0x31, 0x0d, 0xc0, 0x40, 0x94, 0x01, 0xbc, 0x9b, 0xe9, - 0x5e, 0x3e, 0x8c, 0x49, 0xf6, 0x98, 0x0c, 0x39, 0x79, 0xdc, 0xd1, 0x1b, - 0xc5, 0xb2, 0x20, 0xb4, 0x6c, 0xb4, 0x4f, 0xce, 0xf4, 0x6c, 0x0b, 0xef, - 0x85, 0xf2, 0x7d, 0x9a, 0x90, 0x58, 0x1b, 0x51, 0x56, 0x52, 0xac, 0x75, - 0x9f, 0x17, 0xe6, 0x48, 0xaf, 0x18, 0x4c, 0xd8, 0x67, 0xe8, 0xd2, 0x61, - 0xbc, 0xa0, 0x95, 0xc9, 0x78, 0xd8, 0xa2, 0x1d, 0x47, 0x59, 0x30, 0xcf, - 0xf3, 0x79, 0x06, 0xd4, 0x25, 0xf8, 0x9c, 0x5c, 0x28, 0xee, 0xb0, 0xd2, - 0xb6, 0xaf, 0x34, 0x0e, 0xe5, 0xe4, 0x16, 0x2e, 0x05, 0x45, 0x23, 0xc1, - 0x88, 0x90, 0x4a, 0x8f, 0xff, 0xfb, 0xe2, 0xc0, 0xb7, 0xae, 0xb5, 0x50, - 0xc9, 0x26, 0xf0, 0xa2, 0xf5, 0x21, 0x23, 0x79, 0x23, 0xb6, 0x8f, 0x57, - 0x64, 0xd1, 0x27, 0xc2, 0x07, 0x63, 0xa6, 0x54, 0x1f, 0x2f, 0xca, 0x16, - 0xb8, 0x28, 0x51, 0x2a, 0x92, 0xe0, 0x06, 0x36, 0x55, 0x00, 0x6c, 0x99, - 0x31, 0xa7, 0x56, 0xb3, 0x7b, 0x15, 0xcd, 0xc1, 0x32, 0x3a, 0xc0, 0x37, - 0x1f, 0xea, 0x29, 0xb6, 0x75, 0xdf, 0x8a, 0x17, 0x09, 0x45, 0xc2, 0x6e, - 0xe2, 0x4c, 0xa5, 0x93, 0x9b, 0x17, 0x08, 0x27, 0x75, 0x33, 0xdb, 0x1f, - 0xab, 0x37, 0xad, 0x8e, 0xaa, 0xef, 0x0b, 0x82, 0xaa, 0xa7, 0xae, 0x2c, - 0x43, 0x4d, 0x8f, 0xa0, 0x43, 0xd7, 0xa1, 0x34, 0xeb, 0xc0, 0x4e, 0xbd, - 0x64, 0xfc, 0xc8, 0x6a, 0x56, 0xa8, 0xfc, 0x9e, 0x2d, 0x5f, 0x7a, 0xa3, - 0x72, 0x06, 0x79, 0x38, 0x33, 0x05, 0xa7, 0xf0, 0x09, 0x48, 0x55, 0xfe, - 0x3f, 0xab, 0x25, 0x8e, 0x76, 0x1d, 0x12, 0x5a, 0x20, 0x68, 0xfb, 0x51, - 0x51, 0x33, 0x40, 0x37, 0x0c, 0x90, 0x98, 0x6f, 0x66, 0x3f, 0x40, 0xa2, - 0x2e, 0x3c, 0xd1, 0x22, 0x51, 0x54, 0x25, 0x7e, 0x4c, 0x5d, 0x96, 0xb2, - 0x65, 0x0f, 0xa3, 0xdf, 0x8e, 0x97, 0xfe, 0xeb, 0xe7, 0xc6, 0x22, 0x2a, - 0x47, 0x3a, 0x78, 0x1b, 0x39, 0x2e, 0xd6, 0xbc, 0x35, 0xb4, 0xf4, 0xc3, - 0xf2, 0x6a, 0x12, 0xc9, 0xe7, 0x6c, 0x9a, 0xfc, 0xed, 0xbc, 0x11, 0xc7, - 0x71, 0x09, 0x8f, 0x56, 0xc1, 0xd8, 0xb6, 0x92, 0x35, 0x97, 0x8e, 0x71, - 0xd2, 0xbb, 0xb4, 0xed, 0xf0, 0x7e, 0xff, 0x58, 0xd9, 0x95, 0x26, 0xea, - 0xa9, 0x4d, 0x38, 0x8d, 0x4e, 0x8e, 0x53, 0xae, 0x7e, 0xe6, 0xe6, 0x82, - 0x35, 0x96, 0xab, 0x0f, 0x04, 0x0f, 0xf2, 0xac, 0x1b, 0xcd, 0x07, 0x17, - 0x1b, 0x25, 0x2f, 0x92, 0xaf, 0x19, 0xa2, 0x1b, 0xa0, 0x7a, 0xc7, 0x4f, - 0xb8, 0x1b, 0x89, 0x21, 0xb5, 0xe2, 0x24, 0xe9, 0x78, 0xae, 0x7d, 0xd7, - 0xcc, 0x8e, 0x3f, 0xa7, 0xe9, 0xbe, 0xe6, 0x79, 0x0f, 0xdf, 0x86, 0xe9, - 0xb9, 0xcd, 0x82, 0x7b, 0xf5, 0x04, 0x89, 0xa0, 0x73, 0x5d, 0xa2, 0x4e, - 0xd6, 0xa0, 0x60, 0x21, 0xe2, 0xfe, 0xd3, 0xf4, 0x19, 0x8b, 0x6a, 0x03, - 0x12, 0x9c, 0x51, 0x9a, 0x41, 0x4e, 0xf6, 0xb4, 0x6e, 0x0c, 0x43, 0xf5, - 0x00, 0x00, 0x78, 0x12, 0xdd, 0x21, 0xa8, 0xc7, 0x21, 0xa1, 0x4e, 0x44, - 0x10, 0xd0, 0xdb, 0x6f, 0x0b, 0x4c, 0xe7, 0x7a, 0x8c, 0x0c, 0xaa, 0xb6, - 0x9a, 0x7d, 0xa9, 0xff, 0x5a, 0x2e, 0x15, 0x9e, 0x6f, 0xea, 0xe1, 0x42, - 0x0c, 0x9c, 0x5a, 0x3b, 0xd5, 0xe6, 0xde, 0x23, 0x3f, 0x9c, 0x45, 0x20, - 0x67, 0x96, 0x50, 0x16, 0x80, 0x42, 0xe7, 0x67, 0x7d, 0x24, 0xdc, 0x00, - 0xaa, 0x01, 0x8a, 0xa3, 0x61, 0xfe, 0x9a, 0xce, 0xc1, 0xe5, 0x2e, 0x19, - 0x85, 0x04, 0xe6, 0x7b, 0xe8, 0x7a, 0xbc, 0x9d, 0xfe, 0x71, 0x29, 0x1d, - 0x17, 0xae, 0x6b, 0x1a, 0x64, 0xd7, 0xfe, 0x18, 0x29, 0x07, 0x9b, 0x49, - 0x43, 0xba, 0x29, 0x37, 0xa8, 0xb0, 0x26, 0x27, 0x6b, 0x7d, 0xde, 0x49, - 0x12, 0x90, 0x05, 0xe2, 0x2c, 0xd8, 0x08, 0xd0, 0x5d, 0x74, 0xa7, 0x15, - 0xbe, 0x34, 0x34, 0x6d, 0xad, 0xfb, 0xa8, 0x01, 0x4a, 0x6c, 0x98, 0xba, - 0x84, 0x38, 0xbd, 0x05, 0xe8, 0x87, 0x27, 0x91, 0x3f, 0xb8, 0xe9, 0x06, - 0x27, 0xda, 0x56, 0x07, 0xaa, 0xea, 0xf4, 0x80, 0x5c, 0x12, 0x44, 0xbe, - 0x23, 0xb3, 0x63, 0x9f, 0x5f, 0x37, 0xa7, 0x53, 0x4c, 0xfc, 0x4d, 0x87, - 0xeb, 0x91, 0xe8, 0xd7, 0x5a, 0xd6, 0xca, 0x67, 0x2d, 0x2f, 0x5a, 0x0e, - 0xc7, 0x82, 0x78, 0xa4, 0xf3, 0x56, 0x07, 0xa5, 0xab, 0x6d, 0x09, 0xd2, - 0x0d, 0x08, 0x6b, 0x6e, 0x1f, 0xc1, 0xf2, 0x91, 0x1a, 0x39, 0xfe, 0x14, - 0x56, 0x3f, 0xeb, 0x9f, 0x14, 0xc2, 0xb3, 0xb2, 0xc2, 0x8d, 0xc2, 0xee, - 0x7e, 0xf0, 0x7d, 0x92, 0xd2, 0xc3, 0x57, 0x3e, 0x2c, 0x07, 0x1b, 0x6a, - 0x9b, 0x3b, 0x79, 0x59, 0xc9, 0x22, 0x96, 0x6c, 0x3e, 0x37, 0xd3, 0x0e, - 0x5c, 0xf6, 0x8f, 0xa9, 0xaa, 0xc9, 0xa4, 0x4b, 0xaf, 0x5d, 0x1a, 0xb6, - 0xf3, 0x91, 0x32, 0x4f, 0xca, 0x72, 0xa0, 0x42, 0x01, 0x51, 0xaf, 0x19, - 0x89, 0xc4, 0xcc, 0x9b, 0xf3, 0x52, 0xe9, 0xa6, 0xf2, 0x71, 0x6f, 0x5a, - 0x38, 0x02, 0xb8, 0x75, 0x88, 0x5f, 0x8d, 0x12, 0xc5, 0x55, 0x4f, 0xd1, - 0xba, 0xf2, 0x24, 0xdc, 0x63, 0x5f, 0x93, 0xc7, 0xf3, 0xe7, 0x59, 0xac, - 0xc3, 0xed, 0xbc, 0x02, 0xe3, 0xad, 0xb2, 0x8e, 0x2c, 0x2d, 0x47, 0xb4, - 0x34, 0x8d, 0xae, 0x44, 0xc8, 0x5f, 0x14, 0xe8, 0x8e, 0x7b, 0xc3, 0x60, - 0x53, 0x9a, 0x51, 0xea, 0x7f, 0x2f, 0xb6, 0x62, 0x61, 0xf7, 0xc0, 0x18, - 0x0f, 0x20, 0x79, 0x13, 0x5c, 0xe8, 0xca, 0x04, 0x29, 0x5f, 0x70, 0x4d, - 0x88, 0xa2, 0x43, 0x20, 0x57, 0x33, 0x04, 0x74, 0x8e, 0x7c, 0x89, 0xd4, - 0x56, 0x8f, 0x93, 0x86, 0x81, 0x6c, 0x11, 0xfc, 0x32, 0x0e, 0xb0, 0x3e, - 0xe5, 0x13, 0xbf, 0x76, 0x62, 0xcc, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x0e, 0xf8, 0x8f, 0xde, 0xfd, 0xfd, 0xcf, 0xd1, - 0x6f, 0x9f, 0xf2, 0xb6, 0xb6, 0x59, 0xb2, 0x73, 0x1c, 0x3c, 0x0d, 0xb0, - 0x4d, 0xb8, 0x96, 0xc6, 0xeb, 0xe5, 0xf8, 0x0d, 0x3e, 0xd7, 0x0c, 0xbd, - 0x9c, 0xaa, 0xd5, 0x1c, 0x19, 0x9a, 0x4c, 0x8e, 0xfa, 0xac, 0x68, 0x74, - 0x16, 0x06, 0xb5, 0x49, 0xe7, 0xd5, 0x6f, 0x4f, 0xcc, 0xd9, 0x02, 0x74, - 0xd6, 0x08, 0x73, 0x7c, 0xa9, 0xfa, 0x3e, 0x50, 0x87, 0xf7, 0xfb, 0xa6, - 0x94, 0xdc, 0xb1, 0x40, 0xec, 0xa7, 0xa9, 0x39, 0xff, 0x40, 0x4a, 0x97, - 0x9b, 0xcc, 0x57, 0x66, 0x68, 0xd6, 0xa8, 0x4d, 0x13, 0x06, 0x0e, 0x03, - 0xc4, 0xdf, 0x7a, 0xe4, 0x2f, 0x0e, 0xd7, 0x54, 0xe0, 0xbd, 0x93, 0xeb, - 0x82, 0xd8, 0x05, 0x2d, 0xa2, 0xf0, 0x4e, 0xd0, 0xf9, 0x3e, 0x3e, 0x6b, - 0x3d, 0x08, 0x39, 0x4e, 0x35, 0x13, 0x7b, 0x3b, 0x39, 0x2c, 0x47, 0x2c, - 0x61, 0x9f, 0xfd, 0x59, 0x88, 0x5f, 0x65, 0x08, 0xa9, 0x66, 0xec, 0xb5, - 0x21, 0xf3, 0xe9, 0xba, 0x11, 0x63, 0x24, 0x6c, 0xf4, 0x50, 0x3a, 0xe5, - 0x0c, 0x06, 0x39, 0x69, 0x2f, 0xca, 0x0f, 0x48, 0xbe, 0x95, 0x7d, 0x13, - 0x3d, 0xa5, 0x75, 0x69, 0x85, 0xc8, 0xb3, 0x72, 0x72, 0x3c, 0x4f, 0x96, - 0xe7, 0xb7, 0xbd, 0xe7, 0x76, 0xba, 0xac, 0xc0, 0x07, 0x4d, 0xc1, 0xed, - 0xb9, 0xf0, 0x91, 0x2e, 0x36, 0xb7, 0x5b, 0x1c, 0xb7, 0xd6, 0xb3, 0x45, - 0x7d, 0x0a, 0xf5, 0x43, 0xdd, 0x7a, 0x8b, 0x4e, 0x18, 0xf2, 0xf3, 0x19, - 0xcd, 0x4a, 0xda, 0x3c, 0x1b, 0x05, 0x27, 0x67, 0x43, 0xa9, 0x8e, 0xe7, - 0x4a, 0x95, 0xa9, 0xad, 0x6c, 0x8c, 0xb2, 0x2e, 0x12, 0xcb, 0xf3, 0xeb, - 0x65, 0x26, 0xf4, 0x3e, 0x86, 0xee, 0x7e, 0xd9, 0xba, 0xce, 0x8d, 0x15, - 0x3e, 0xa8, 0x40, 0x59, 0x1d, 0x27, 0x78, 0x75, 0xf0, 0xf9, 0x33, 0xb5, - 0x32, 0xa9, 0x66, 0xe6, 0x2e, 0x2e, 0x3d, 0xf5, 0x4a, 0xf0, 0x97, 0x2d, - 0xe7, 0x43, 0x85, 0x43, 0x61, 0x25, 0x15, 0x13, 0x9e, 0x8e, 0xf6, 0x78, - 0xe8, 0x67, 0xba, 0xc2, 0x6d, 0xda, 0x46, 0x25, 0x76, 0xd9, 0x9b, 0x69, - 0x95, 0x4b, 0x50, 0x8c, 0xb7, 0x36, 0x49, 0xbc, 0xd7, 0x39, 0x69, 0xb9, - 0xc1, 0x5f, 0x5f, 0xcc, 0x83, 0x4c, 0x16, 0xb8, 0x0c, 0x85, 0xf1, 0xa4, - 0x57, 0x6c, 0x22, 0x1f, 0x60, 0x0c, 0xff, 0xb6, 0xc9, 0xf7, 0x21, 0x2d, - 0x35, 0x78, 0x31, 0x79, 0xd0, 0x6d, 0x61, 0xec, 0x61, 0x04, 0x75, 0x5c, - 0x06, 0xc3, 0x53, 0x1b, 0xb5, 0xdc, 0x23, 0xb9, 0xd9, 0x07, 0xd1, 0xd0, - 0xb3, 0xa5, 0xab, 0xd9, 0xbe, 0xb7, 0xdc, 0xae, 0x3f, 0x3e, 0xd7, 0x2a, - 0x79, 0x3f, 0x9c, 0x27, 0x81, 0x8d, 0x61, 0xe8, 0x46, 0x8f, 0x05, 0xf4, - 0x9c, 0x30, 0x35, 0x9a, 0x2f, 0x62, 0x84, 0x7c, 0xa5, 0x95, 0x68, 0x34, - 0xe6, 0xf0, 0xb9, 0x42, 0xd4, 0x37, 0xc6, 0xd2, 0x35, 0x1f, 0x7b, 0xe0, - 0xa6, 0x92, 0xcf, 0xf7, 0x0f, 0x08, 0x10, 0x79, 0xbd, 0xa8, 0x7c, 0x4e, - 0xef, 0xf1, 0x01, 0x8d, 0x1b, 0x0c, 0x98, 0x46, 0x28, 0xdc, 0xd5, 0xa8, - 0xcf, 0x67, 0x7d, 0x87, 0x2a, 0x8f, 0xdd, 0x52, 0x43, 0x5a, 0x55, 0x80, - 0x88, 0xa6, 0xcd, 0x9c, 0x5d, 0x36, 0xae, 0xef, 0x61, 0x43, 0xec, 0xf0, - 0x7f, 0x92, 0x21, 0x1f, 0xa2, 0xa3, 0x76, 0x0e, 0x5d, 0xf3, 0xa7, 0xe7, - 0x7d, 0xb0, 0x2c, 0x94, 0x36, 0x95, 0x34, 0x4e, 0x04, 0xfb, 0x51, 0xf9, - 0xe6, 0x7e, 0x56, 0x7a, 0x59, 0xce, 0x0a, 0x45, 0x7e, 0xeb, 0xc4, 0xbc, - 0xfd, 0x20, 0xaa, 0x34, 0x6b, 0xee, 0x3b, 0x09, 0xe8, 0x00, 0x4b, 0xfc, - 0x68, 0x24, 0x43, 0xdb, 0x09, 0x58, 0xd0, 0xb6, 0xbf, 0xaf, 0x1d, 0x7f, - 0x8a, 0x4c, 0x9e, 0x51, 0x97, 0x97, 0xe1, 0x0c, 0x0d, 0xaf, 0xd1, 0x1e, - 0x62, 0xad, 0x70, 0xa5, 0x8a, 0x24, 0x2f, 0x4a, 0xa6, 0x55, 0xb1, 0x44, - 0x09, 0x88, 0xab, 0xa5, 0x45, 0x28, 0xa0, 0x34, 0x9e, 0x14, 0x2c, 0xf9, - 0x0f, 0xb8, 0x33, 0x8f, 0xcc, 0xba, 0x50, 0x34, 0x4c, 0x96, 0x89, 0x09, - 0xb9, 0xa8, 0xfb, 0xac, 0x59, 0x73, 0xea, 0x61, 0xbc, 0x0d, 0x24, 0x3a, - 0x20, 0xc2, 0x76, 0xfc, 0x2e, 0xce, 0xfb, 0x75, 0x00, 0xca, 0x58, 0xbd, - 0xab, 0x61, 0x9b, 0x13, 0x2b, 0xa3, 0xf6, 0x15, 0x55, 0x83, 0x23, 0xc4, - 0xf3, 0x4c, 0x89, 0xc5, 0x4a, 0x18, 0x5c, 0x8d, 0x41, 0xcc, 0x06, 0x7b, - 0xe3, 0x2a, 0x1f, 0x6a, 0x57, 0xbc, 0x54, 0x61, 0x0c, 0xf2, 0xec, 0xbf, - 0xb0, 0xf0, 0x21, 0xde, 0xfc, 0xe4, 0xef, 0xce, 0x47, 0xc8, 0xdc, 0x11, - 0xc7, 0x8a, 0x12, 0x97, 0x68, 0x1d, 0x9e, 0x9a, 0xbf, 0xad, 0x62, 0x7e, - 0x4b, 0x88, 0xd7, 0x20, 0x22, 0xce, 0x5e, 0xe3, 0x87, 0x12, 0xa3, 0x05, - 0xef, 0x1f, 0x05, 0xb1, 0xbd, 0x1b, 0x80, 0x43, 0x84, 0x33, 0x8b, 0x87, - 0xa5, 0xc2, 0xe1, 0x49, 0xa8, 0x75, 0x49, 0x9b, 0x1b, 0x64, 0x8a, 0xd0, - 0x86, 0x10, 0xa8, 0x72, 0xeb, 0x2e, 0xe7, 0x3f, 0xaa, 0x6b, 0x4a, 0x22, - 0xae, 0x17, 0x8f, 0x10, 0x22, 0x03, 0x66, 0x67, 0x35, 0x40, 0x29, 0x1e, - 0xf2, 0x05, 0x36, 0xd5, 0xed, 0xe2, 0x2a, 0xcc, 0x77, 0xe2, 0x16, 0xef, - 0xa7, 0x9b, 0xe1, 0x1b, 0xba, 0xf3, 0xf5, 0x74, 0x6c, 0x2a, 0x98, 0x8a, - 0x14, 0xaf, 0x2c, 0xab, 0xfb, 0x51, 0x53, 0x75, 0x17, 0xcb, 0x5c, 0x86, - 0xb5, 0x60, 0x70, 0x29, 0x65, 0x69, 0x49, 0x42, 0x4f, 0x42, 0x6b, 0xc7, - 0xdb, 0x98, 0x7d, 0x1e, 0xf8, 0x45, 0xb2, 0x33, 0xd6, 0x34, 0x26, 0xa6, - 0x7f, 0x76, 0x31, 0x13, 0x13, 0x9d, 0xd2, 0xb0, 0x30, 0x0b, 0x0b, 0x3e, - 0x1a, 0x84, 0xb0, 0xbd, 0x81, 0x34, 0x25, 0x73, 0x99, 0x87, 0x1a, 0xc8, - 0x44, 0x34, 0x9d, 0x1a, 0x3d, 0x76, 0x44, 0x1d, 0xe2, 0x22, 0xad, 0x3d, - 0xb2, 0xa3, 0x1c, 0xd5, 0x27, 0x8c, 0xc6, 0x84, 0xdf, 0x33, 0xbe, 0xb2, - 0xa7, 0xb9, 0xc5, 0x6e, 0x48, 0xdc, 0xe9, 0xf8, 0xef, 0xfc, 0xaa, 0x1f, - 0x5e, 0x41, 0x48, 0x1e, 0xe0, 0xb9, 0xd6, 0x6e, 0x7a, 0x9c, 0xa3, 0x98, - 0x4b, 0xfa, 0x90, 0xa4, 0x58, 0x33, 0x85, 0x3b, 0x11, 0x44, 0x83, 0x4b, - 0x1e, 0x0e, 0x5d, 0x11, 0x36, 0x15, 0xe1, 0xbf, 0x15, 0x04, 0x8e, 0x88, - 0xc6, 0x18, 0x53, 0xc3, 0x8d, 0x28, 0x86, 0x25, 0xef, 0x55, 0x7b, 0xf6, - 0x85, 0xf8, 0xed, 0x3b, 0xcf, 0x5d, 0xa6, 0xc7, 0x66, 0xb7, 0xbe, 0x14, - 0xf0, 0x62, 0x89, 0x1f, 0x32, 0x1e, 0x86, 0x2a, 0x93, 0xd5, 0xca, 0x37, - 0x03, 0x0b, 0xf8, 0x0f, 0xca, 0x50, 0x6c, 0x16, 0x2b, 0xf0, 0x77, 0xca, - 0xbb, 0x8e, 0x95, 0x11, 0xef, 0x5b, 0xbe, 0x2f, 0x62, 0x50, 0xb8, 0x3d, - 0xff, 0xfa, 0x30, 0x21, 0xb2, 0x86, 0x3f, 0x50, 0x57, 0x98, 0x79, 0x15, - 0xce, 0x3e, 0xbf, 0x49, 0x58, 0xb0, 0xb5, 0xd7, 0xbe, 0x01, 0x55, 0xee, - 0x60, 0x14, 0x9d, 0x5b, 0x57, 0x48, 0x05, 0x72, 0x6a, 0x23, 0x29, 0xeb, - 0xf3, 0x36, 0x2a, 0xc1, 0xda, 0x5e, 0x4a, 0x63, 0xc4, 0x6b, 0x04, 0xe8, - 0xe8, 0xc1, 0xb5, 0xc4, 0x2d, 0x60, 0x1f, 0xa0, 0x2b, 0x33, 0xa5, 0xb7, - 0x82, 0x59, 0x21, 0xba, 0x13, 0xda, 0x79, 0xda, 0x5a, 0xb1, 0x82, 0x5b, - 0x52, 0x7f, 0x0c, 0x70, 0x75, 0x65, 0xe0, 0x44, 0xb3, 0xca, 0xd0, 0x09, - 0x38, 0x24, 0x83, 0x8e, 0x0c, 0x4c, 0xef, 0x96, 0xe4, 0x04, 0x30, 0x46, - 0x23, 0x6a, 0x28, 0x13, 0x1d, 0x37, 0x14, 0x75, 0x6e, 0xd0, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x21, 0xa2, 0xf0, 0x7d, - 0x29, 0x8f, 0x62, 0x2e, 0xf4, 0x0e, 0x14, 0x9b, 0x60, 0x38, 0xc0, 0x95, - 0xfb, 0x3c, 0x90, 0x5a, 0xa0, 0x1f, 0x30, 0x09, 0xfc, 0x6d, 0xa9, 0xd1, - 0x7b, 0x0b, 0x7c, 0x78, 0xf9, 0xf6, 0xa8, 0x5e, 0xa6, 0x7a, 0xf6, 0x1c, - 0xab, 0x1b, 0x0e, 0xa9, 0x08, 0xfd, 0xd9, 0x97, 0x08, 0x24, 0x2b, 0xda, - 0x08, 0x8b, 0x0c, 0x07, 0x70, 0x15, 0xa8, 0x0c, 0x86, 0xfc, 0xd1, 0x84, - 0xba, 0xd0, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x35, 0x7a, 0xab, 0xaa, 0xbe, 0xd7, 0xad, 0x22, 0x99, 0x46, 0xbb, 0x78, - 0xfd, 0x47, 0x8f, 0x2a, 0x4a, 0xa6, 0x2f, 0x8d, 0x15, 0x07, 0xed, 0x26, - 0x1d, 0xb3, 0x12, 0xd3, 0x88, 0x0f, 0xf1, 0x75, 0x2a, 0x07, 0x62, 0xac, - 0xbf, 0x52, 0x4a, 0xc3, 0x12, 0xe5, 0x3c, 0xea, 0xa6, 0x1e, 0x57, 0x90, - 0x56, 0x60, 0x7d, 0xcf, 0x4b, 0x65, 0xaf, 0xee, 0x17, 0x56, 0xbe, 0xd2, - 0x38, 0x3f, 0xd6, 0xbc, 0xef, 0xa7, 0x32, 0xb7, 0x10, 0xe9, 0xbd, 0x97, - 0x45, 0x92, 0x3c, 0xd3, 0x35, 0x2e, 0x59, 0x37, 0x65, 0x5c, 0x7f, 0xd0, - 0x99, 0x9c, 0x01, 0xe9, 0x1f, 0x65, 0xe9, 0xec, 0x0f, 0x2d, 0x46, 0xbc, - 0xd4, 0x8f, 0x51, 0x1c, 0xa0, 0xa4, 0x9b, 0x4f, 0x95, 0x54, 0xb0, 0x50, - 0x74, 0xfa, 0x0f, 0xe6, 0x55, 0x81, 0xce, 0x0f, 0xd1, 0x25, 0x56, 0xc8, - 0x2f, 0x3a, 0x65, 0xd4, 0x86, 0x4a, 0x8e, 0xff, 0x5a, 0xcc, 0x67, 0x96, - 0xcc, 0x65, 0x0d, 0x20, 0xee, 0xba, 0x6b, 0xcb, 0xde, 0x10, 0x2f, 0xbf, - 0x67, 0x6d, 0xbe, 0xef, 0x72, 0xfc, 0x25, 0x62, 0xbf, 0xbb, 0xc5, 0xe0, - 0x7b, 0x4c, 0x32, 0xc5, 0xdb, 0x9f, 0xb5, 0xe2, 0x75, 0x8a, 0xba, 0xbb, - 0x69, 0x28, 0xb6, 0x41, 0x25, 0x83, 0x67, 0x35, 0x1b, 0xd7, 0xb3, 0xd7, - 0x58, 0x54, 0x8a, 0x0b, 0x7c, 0xf3, 0x05, 0xcf, 0x2c, 0x78, 0x70, 0xc6, - 0xed, 0x7e, 0x56, 0xb6, 0x4e, 0x48, 0xaa, 0x57, 0xc4, 0xb0, 0xb2, 0xa0, - 0xca, 0x50, 0xe1, 0xc7, 0x41, 0xea, 0xac, 0x5f, 0x18, 0x13, 0xe5, 0x85, - 0x78, 0x3f, 0x05, 0xf3, 0xfd, 0x74, 0x7a, 0x42, 0x61, 0x91, 0x19, 0xc6, - 0x19, 0xe9, 0xd2, 0x78, 0x2c, 0xb1, 0xa3, 0x7f, 0x62, 0xea, 0x2a, 0x35, - 0x1c, 0x55, 0xa3, 0xf7, 0xdc, 0xec, 0x48, 0x23, 0x99, 0x8d, 0xe1, 0x4d, - 0x45, 0xad, 0x92, 0xc6, 0xf4, 0xa2, 0xe5, 0xe6, 0x58, 0xe4, 0xd5, 0x37, - 0xd0, 0x47, 0x0b, 0x64, 0x68, 0x48, 0x7e, 0xeb, 0xbe, 0x5e, 0x74, 0xd1, - 0xc4, 0xa5, 0x60, 0xd0, 0x30, 0x62, 0xbc, 0x81, 0xc4, 0x01, 0x68, 0x18, - 0xf3, 0xac, 0x9d, 0xb1, 0x4d, 0xdd, 0x8b, 0xd2, 0x54, 0x5d, 0xd1, 0x1c, - 0xee, 0x75, 0x9e, 0x99, 0x42, 0x69, 0x38, 0xcc, 0x66, 0x24, 0xd9, 0x8f, - 0x70, 0x98, 0xc3, 0x5e, 0x08, 0xf0, 0xd8, 0x2d, 0xe6, 0x52, 0x48, 0xdf, - 0xd0, 0x03, 0x04, 0x92, 0xab, 0xa1, 0xa1, 0x2f, 0x7d, 0x84, 0xb2, 0x82, - 0x51, 0x56, 0x74, 0x4a, 0x94, 0xff, 0xd2, 0xe4, 0x4e, 0x1a, 0xbd, 0x18, - 0xab, 0x33, 0x68, 0x0e, 0x4f, 0x99, 0x1d, 0x7e, 0x02, 0x3f, 0x1f, 0x50, - 0x05, 0xf8, 0x59, 0x47, 0x97, 0x98, 0x60, 0xb1, 0x30, 0xb1, 0x14, 0xac, - 0x2c, 0x0a, 0xa8, 0x97, 0x83, 0xf5, 0x5a, 0x5c, 0x87, 0xe5, 0x36, 0x26, - 0xec, 0xb4, 0x94, 0x46, 0x9a, 0xad, 0x2b, 0x9a, 0xb7, 0xac, 0xc4, 0x1a, - 0x55, 0x53, 0xc0, 0x16, 0x91, 0x1c, 0xd6, 0xaa, 0x6b, 0xdd, 0x85, 0x6a, - 0x54, 0xec, 0x7c, 0xa1, 0xd5, 0x18, 0x00, 0x74, 0xd2, 0xf1, 0x7e, 0xad, - 0x7c, 0xa8, 0x85, 0x9b, 0xc0, 0x9f, 0x4f, 0x3b, 0xd9, 0x08, 0xc8, 0x9d, - 0x31, 0x22, 0x7a, 0x53, 0xa8, 0xbd, 0x00, 0xdf, 0xe8, 0x39, 0x52, 0xe9, - 0x14, 0x74, 0x7b, 0x53, 0xf9, 0xbd, 0x29, 0x8e, 0x5d, 0xf2, 0x35, 0x3b, - 0xe3, 0x48, 0xbf, 0xa0, 0xc4, 0x3d, 0x40, 0xb4, 0xf2, 0x7c, 0xd0, 0xe3, - 0x17, 0x11, 0x5b, 0xd6, 0x55, 0xd2, 0x54, 0xcf, 0x20, 0x8d, 0x74, 0x4a, - 0x6b, 0xe9, 0x5d, 0xfe, 0x72, 0x14, 0x6a, 0x11, 0x8b, 0x14, 0x19, 0xba, - 0x63, 0xe4, 0x6b, 0x39, 0xb4, 0x90, 0x67, 0x79, 0x56, 0x31, 0xd3, 0xb5, - 0xeb, 0x9e, 0x95, 0x4b, 0x1e, 0x04, 0x20, 0xd8, 0xbe, 0xe8, 0x1c, 0xd7, - 0x95, 0xcb, 0x57, 0x60, 0xe6, 0x11, 0x35, 0x42, 0x90, 0xfd, 0xb2, 0xe4, - 0x9b, 0x24, 0x70, 0xc0, 0xc3, 0xa9, 0x8a, 0xc9, 0x46, 0xd0, 0xea, 0xc9, - 0x93, 0x7d, 0x9f, 0x64, 0x12, 0x54, 0x09, 0xb7, 0xc2, 0x4d, 0x6e, 0xcc, - 0x60, 0x07, 0x36, 0x31, 0x64, 0x3d, 0x1e, 0xd3, 0x86, 0x47, 0x47, 0x42, - 0x76, 0xb6, 0xf0, 0xe5, 0xb4, 0xe7, 0xbe, 0x47, 0x91, 0x78, 0xbe, 0x06, - 0xf1, 0x6e, 0x58, 0xce, 0x32, 0x13, 0x26, 0x34, 0x92, 0xae, 0xb2, 0x29, - 0xd0, 0x30, 0x55, 0xfd, 0x89, 0x6a, 0xbf, 0x3e, 0xdf, 0x11, 0x39, 0xe4, - 0xfd, 0x56, 0xd7, 0x2f, 0x89, 0x96, 0x08, 0x54, 0xaa, 0xab, 0x8b, 0xfa, - 0x65, 0xe5, 0x64, 0xff, 0x24, 0x25, 0x8f, 0x7d, 0xf6, 0xb1, 0x7f, 0x2f, - 0xa6, 0xf6, 0x46, 0xab, 0x61, 0xfd, 0x47, 0xad, 0x6d, 0x38, 0x6d, 0xc1, - 0xe9, 0x4a, 0xf1, 0x85, 0x05, 0x0e, 0x69, 0x48, 0x7c, 0xa6, 0x76, 0x61, - 0xe3, 0x94, 0xf2, 0xd6, 0x7a, 0x9c, 0x79, 0xc0, 0x2a, 0x51, 0x23, 0xc6, - 0xaf, 0x29, 0x04, 0x0f, 0x47, 0xc2, 0x93, 0xd7, 0x64, 0xe5, 0x37, 0x2e, - 0x53, 0x3b, 0xb7, 0x7c, 0x9c, 0xb4, 0x63, 0x13, 0xc7, 0x56, 0x90, 0xe9, - 0x53, 0xd5, 0x86, 0x2b, 0x96, 0x41, 0x42, 0x56, 0xc5, 0x16, 0xd7, 0x9e, - 0x30, 0xce, 0xa1, 0x0d, 0x93, 0x5d, 0x11, 0x07, 0xb2, 0x95, 0xfd, 0xf6, - 0x0b, 0x28, 0x95, 0x1a, 0x8f, 0xfa, 0xe1, 0x57, 0x7e, 0x06, 0xff, 0x18, - 0xaf, 0xe3, 0x4f, 0x3c, 0x34, 0x5b, 0xd4, 0x46, 0x1a, 0xd1, 0xd1, 0x7e, - 0x55, 0xba, 0x5d, 0x2a, 0x1f, 0x42, 0x49, 0x95, 0x75, 0x5f, 0x80, 0x60, - 0x02, 0x01, 0xdb, 0x36, 0xad, 0x68, 0x69, 0x1e, 0x0b, 0x90, 0x3f, 0xa6, - 0xb6, 0x2f, 0x66, 0xa6, 0x7d, 0x81, 0x8c, 0xa0, 0xee, 0x05, 0x95, 0xbc, - 0xb3, 0x7c, 0x18, 0xd4, 0x1b, 0x40, 0x96, 0xf5, 0x05, 0x9d, 0x27, 0x3b, - 0x78, 0xfc, 0x19, 0x18, 0xc0, 0x61, 0xa0, 0xd6, 0xf9, 0xc0, 0x3f, 0xe5, - 0x48, 0x35, 0x0f, 0x8b, 0x0d, 0xfb, 0x31, 0xb7, 0x32, 0x40, 0x1d, 0x69, - 0x12, 0x5a, 0x23, 0xf0, 0xce, 0xe9, 0x5e, 0xa6, 0x68, 0x6b, 0xe1, 0xe2, - 0x68, 0x07, 0x02, 0x0d, 0x7a, 0xc2, 0x0a, 0x40, 0x10, 0x5e, 0x94, 0xba, - 0x77, 0x1d, 0xf7, 0xac, 0xec, 0x79, 0xa9, 0xa1, 0x8a, 0xb8, 0x49, 0x32, - 0x08, 0xe0, 0x18, 0xa8, 0x3d, 0x69, 0x41, 0x5d, 0x30, 0x3b, 0xb6, 0x91, - 0x46, 0x8d, 0x81, 0x10, 0xb0, 0xc2, 0xed, 0xa0, 0x4e, 0x59, 0x48, 0xd8, - 0x64, 0x7d, 0x2d, 0x46, 0xf2, 0x8a, 0x2e, 0x5d, 0x0c, 0x4d, 0x9f, 0xfe, - 0x7b, 0x5e, 0xbf, 0x1a, 0x78, 0xdf, 0xfc, 0x0f, 0x04, 0x37, 0x72, 0x1a, - 0x09, 0xb8, 0x6e, 0x1b, 0xf1, 0x18, 0x7d, 0x83, 0x44, 0xaa, 0x9b, 0x71, - 0xe1, 0x03, 0x04, 0x83, 0xe5, 0xaa, 0xc0, 0xd4, 0xa7, 0x80, 0x10, 0x35, - 0x09, 0xae, 0xf7, 0xe1, 0x5e, 0x7c, 0x31, 0x20, 0x43, 0x82, 0xda, 0x07, - 0x39, 0xfe, 0x8f, 0x9d, 0x70, 0x3c, 0x57, 0x43, 0x01, 0x51, 0x37, 0x2e, - 0x97, 0xef, 0xcf, 0x05, 0x44, 0x75, 0x69, 0xf7, 0xdb, 0xda, 0x80, 0x78, - 0x0c, 0xcc, 0xc1, 0x49, 0xac, 0x3b, 0x7e, 0x27, 0x6a, 0xbb, 0xdf, 0x45, - 0x5b, 0x3b, 0x29, 0xf6, 0x1b, 0xa9, 0x25, 0xf9, 0x2f, 0xcf, 0x37, 0x71, - 0x33, 0xb4, 0x90, 0xd7, 0x9b, 0x87, 0x41, 0x15, 0xd1, 0xa6, 0x39, 0xa7, - 0xa9, 0xcd, 0x66, 0x29, 0x59, 0xb4, 0x53, 0x12, 0xa1, 0x20, 0xd5, 0x04, - 0xca, 0x40, 0x31, 0xfa, 0x6f, 0xbb, 0x92, 0x04, 0xf3, 0xc2, 0x10, 0x0d, - 0xc1, 0x19, 0x78, 0x8c, 0x82, 0xed, 0x92, 0x3a, 0x6b, 0xd1, 0x3d, 0xe8, - 0xac, 0x55, 0xe4, 0x8c, 0xc6, 0xd4, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x00, 0xc2, 0x1d, 0x86, 0xe4, 0xf6, 0xa1, 0xbe, 0xf5, - 0xf3, 0x36, 0x9d, 0x32, 0x80, 0x17, 0x3b, 0x1f, 0x18, 0x21, 0xed, 0xa7, - 0xf5, 0xaf, 0xf1, 0x94, 0xe2, 0xa7, 0x08, 0xd5, 0xca, 0x18, 0x45, 0xf5, - 0x68, 0x94, 0x82, 0x61, 0xf7, 0xb7, 0xb2, 0xfa, 0xd4, 0x5e, 0x32, 0xd0, - 0xf0, 0x20, 0x66, 0x83, 0xd1, 0x6b, 0x3c, 0xdf, 0x73, 0xeb, 0x73, 0x82, - 0x09, 0x9b, 0xd0, 0xc5, 0xb0, 0x9f, 0x01, 0x77, 0x85, 0xcc, 0x6e, 0x23, - 0xb7, 0x00, 0x45, 0xe0, 0xa6, 0x01, 0x29, 0x1d, 0x8b, 0xc4, 0xe0, 0xc2, - 0xe0, 0x4f, 0x3b, 0x07, 0xd5, 0xac, 0x6b, 0x88, 0xb8, 0xa4, 0xe2, 0x5c, - 0x19, 0xe9, 0x98, 0x72, 0xa5, 0x6b, 0xf5, 0xa4, 0xf7, 0x15, 0xaf, 0xfb, - 0xb4, 0x80, 0x9a, 0xe3, 0xa5, 0x35, 0x2f, 0x45, 0x81, 0xf1, 0x8b, 0x2d, - 0x26, 0x5c, 0x65, 0xa9, 0x5b, 0x6e, 0x83, 0xc3, 0x62, 0x2f, 0x84, 0xef, - 0x11, 0xa5, 0x58, 0x48, 0xe9, 0x67, 0x7e, 0xd3, 0x0b, 0x5d, 0x51, 0x80, - 0x39, 0x08, 0x8e, 0xc1, 0x0d, 0x04, 0x11, 0x5f, 0x72, 0x64, 0x1f, 0x83, - 0xf8, 0xd3, 0x09, 0x38, 0xb6, 0x7f, 0x50, 0x78, 0x27, 0x20, 0xe5, 0xbd, - 0x16, 0xbf, 0x51, 0xd8, 0x4f, 0x67, 0x60, 0xf6, 0x9e, 0xff, 0x08, 0xfe, - 0xc6, 0x96, 0xd6, 0x64, 0x94, 0x28, 0xc6, 0x9a, 0x09, 0x1a, 0x34, 0x08, - 0x31, 0x4b, 0x0b, 0x97, 0x5a, 0x18, 0x72, 0x49, 0xe9, 0x1d, 0xbb, 0x9c, - 0xed, 0x7e, 0xb5, 0xc5, 0xa7, 0xf4, 0x25, 0x7a, 0x26, 0xe9, 0x15, 0x61, - 0x85, 0x32, 0xc9, 0xb3, 0xcf, 0x95, 0xbf, 0x35, 0x10, 0x2d, 0x71, 0xfe, - 0x03, 0xd6, 0x69, 0x75, 0x8d, 0xb7, 0x16, 0xa7, 0x3d, 0x0e, 0xb7, 0x55, - 0x6d, 0xa7, 0x9f, 0x10, 0x7e, 0x7e, 0xff, 0x39, 0xee, 0x8e, 0xa7, 0x81, - 0x7d, 0x11, 0xea, 0xa9, 0xd6, 0xed, 0x54, 0xf8, 0xd2, 0xd5, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xf9, 0xde, 0x41, 0xe7, - 0xa6, 0x88, 0x53, 0x76, 0x5a, 0x26, 0xc3, 0x5c, 0xf2, 0x58, 0x68, 0x9c, - 0xc7, 0x4e, 0x53, 0x18, 0x53, 0x67, 0x39, 0x23, 0x96, 0xb0, 0xef, 0x58, - 0x29, 0xe1, 0x68, 0xd8, 0xce, 0xc0, 0x41, 0xc2, 0x35, 0x5f, 0x74, 0xfa, - 0xdf, 0xc7, 0x0f, 0x80, 0x50, 0xd1, 0xf6, 0x5a, 0x3a, 0x81, 0xe0, 0xd9, - 0x9b, 0x47, 0x96, 0xcd, 0xc5, 0x0f, 0x91, 0x12, 0x81, 0x77, 0x1e, 0xef, - 0x2e, 0xba, 0x16, 0x51, 0x70, 0x78, 0xdc, 0xa3, 0x84, 0x12, 0x7c, 0x9e, - 0x21, 0x7d, 0xa3, 0x5f, 0xce, 0xa1, 0x25, 0x84, 0x99, 0xa4, 0x2d, 0xa6, - 0x0f, 0x95, 0xef, 0xef, 0x31, 0xe6, 0xf2, 0x18, 0x08, 0x47, 0xd2, 0x5a, - 0x39, 0x01, 0x7a, 0xca, 0xd3, 0x03, 0xb1, 0xc2, 0x48, 0xf4, 0x1f, 0x6d, - 0xc2, 0x8c, 0x5c, 0xda, 0xf5, 0x10, 0xed, 0xfc, 0x2e, 0x0c, 0xb3, 0x52, - 0xaa, 0xa9, 0xed, 0xbc, 0x41, 0xcc, 0xd4, 0x4b, 0x1c, 0xd0, 0xa3, 0x1d, - 0xf4, 0xe7, 0x48, 0x34, 0x4e, 0xcf, 0x3b, 0xb3, 0x71, 0x06, 0xbe, 0x0c, - 0x35, 0xbb, 0xb4, 0x17, 0xd8, 0x8b, 0xba, 0xdd, 0x32, 0x30, 0x51, 0xb1, - 0xb1, 0xd6, 0x3a, 0xdc, 0x3b, 0x25, 0x9a, 0x57, 0xc7, 0x4d, 0xd3, 0x75, - 0x93, 0x59, 0x3e, 0x9b, 0x10, 0xcf, 0xdb, 0x38, 0x75, 0x51, 0xb2, 0x2a, - 0x48, 0x78, 0xfc, 0xaa, 0xe3, 0x91, 0xe7, 0x93, 0xe7, 0x0a, 0x07, 0x2c, - 0xf8, 0x88, 0x93, 0xde, 0x2f, 0xba, 0x7b, 0x72, 0xcd, 0x92, 0xdd, 0xb1, - 0xac, 0x1e, 0xe4, 0xe3, 0x5d, 0xa4, 0x7f, 0x86, 0xa7, 0xcb, 0xb5, 0x81, - 0x86, 0xf1, 0xf5, 0xad, 0xd6, 0x36, 0x08, 0x09, 0x9f, 0x75, 0x6f, 0x4a, - 0x5b, 0x30, 0xf8, 0xaf, 0xd2, 0xbc, 0xb5, 0xbe, 0xf2, 0xeb, 0x9b, 0xbc, - 0x11, 0xd4, 0x0c, 0x14, 0xa6, 0x6f, 0x43, 0xd3, 0xc9, 0x4e, 0xca, 0x9b, - 0x4e, 0x46, 0x60, 0x4c, 0x63, 0xcc, 0x07, 0x36, 0x8c, 0xf2, 0xd1, 0x93, - 0x7a, 0x51, 0x49, 0x15, 0xbf, 0xbf, 0x9e, 0x82, 0x21, 0x06, 0xa0, 0x39, - 0x11, 0x1d, 0x6c, 0x41, 0x72, 0xcd, 0x2a, 0x8a, 0x4a, 0xd0, 0x13, 0x6c, - 0x56, 0xf4, 0x00, 0x48, 0xaf, 0xab, 0xdf, 0xa9, 0xe9, 0xa6, 0xaa, 0x06, - 0x61, 0x79, 0xc4, 0x57, 0x42, 0xca, 0x12, 0x18, 0xcf, 0x81, 0xec, 0x79, - 0x19, 0xd2, 0xd2, 0xe3, 0x1d, 0xc6, 0x6c, 0xd0, 0xd6, 0x0a, 0xfb, 0x70, - 0x42, 0x28, 0x25, 0x23, 0xb6, 0x23, 0x15, 0x28, 0x5e, 0x9f, 0x49, 0xf2, - 0x7b, 0x69, 0x74, 0xa5, 0xb9, 0x26, 0x81, 0xfe, 0x39, 0x3e, 0x3f, 0xc8, - 0x7e, 0x9e, 0x5e, 0x8e, 0xf2, 0xdb, 0x6b, 0xfd, 0xe1, 0xc3, 0x01, 0x4a, - 0xba, 0x8f, 0x33, 0x71, 0x09, 0x80, 0x5d, 0x9c, 0x58, 0x64, 0xb7, 0x90, - 0x13, 0x2a, 0xe9, 0x1d, 0x07, 0x2c, 0x06, 0x70, 0x43, 0x0d, 0xb6, 0x57, - 0x02, 0x3c, 0xbe, 0x3c, 0x42, 0xab, 0x77, 0x15, 0x0e, 0x98, 0xfb, 0xf2, - 0x1d, 0x14, 0xd9, 0xb8, 0xd1, 0x59, 0x2a, 0x67, 0x6f, 0xfc, 0x59, 0x39, - 0x33, 0xe0, 0x49, 0x0b, 0x4e, 0x65, 0x81, 0x9f, 0x71, 0xf2, 0xa5, 0x90, - 0x4f, 0x24, 0xc7, 0x05, 0xfb, 0x77, 0x1e, 0x14, 0xca, 0x2f, 0xfc, 0xac, - 0xec, 0xbf, 0xa2, 0x69, 0x15, 0x0a, 0x6b, 0xa9, 0xa0, 0x74, 0xee, 0xad, - 0xa9, 0x50, 0x4d, 0x4d, 0xab, 0x6e, 0xc1, 0xb3, 0xda, 0xbb, 0xbd, 0xab, - 0x00, 0x05, 0x14, 0xc1, 0xc4, 0x53, 0x7b, 0x78, 0x97, 0x68, 0x3c, 0x05, - 0xf2, 0xed, 0x87, 0xca, 0x86, 0xd1, 0xdf, 0xda, 0xb3, 0x2f, 0x17, 0x87, - 0x87, 0x2f, 0xd8, 0xe9, 0xb2, 0x96, 0xdc, 0x7f, 0x22, 0xf1, 0x2a, 0x9f, - 0xfe, 0x54, 0x55, 0xa1, 0x96, 0xab, 0x9f, 0x61, 0x74, 0xcd, 0x4d, 0x77, - 0x38, 0x02, 0x23, 0x29, 0x28, 0x5b, 0xfc, 0x86, 0x17, 0x40, 0xd4, 0x42, - 0x2a, 0x9b, 0x84, 0xf7, 0x67, 0x2b, 0x3a, 0xc1, 0x31, 0x89, 0x4b, 0x67, - 0xd1, 0x7d, 0x6b, 0x36, 0xec, 0x69, 0x6b, 0x24, 0xca, 0xd6, 0x2d, 0xbb, - 0x21, 0xc8, 0x0c, 0x53, 0x41, 0x29, 0x0b, 0xc1, 0xfe, 0xd5, 0xa3, 0x4c, - 0x66, 0x2f, 0xc7, 0xf1, 0xa8, 0xc0, 0x3d, 0x9a, 0xb9, 0x09, 0x50, 0x3f, - 0x09, 0x87, 0xa4, 0x3f, 0x7a, 0x33, 0xef, 0xf0, 0xfb, 0x77, 0x02, 0x7d, - 0x92, 0xaf, 0x73, 0xaa, 0xcc, 0x3f, 0x66, 0x56, 0xd0, 0x21, 0xd1, 0xe8, - 0x0e, 0x47, 0x03, 0x5e, 0x3b, 0xe9, 0xa2, 0xe3, 0x83, 0x0b, 0x73, 0xd3, - 0xaa, 0x94, 0x80, 0xef, 0x7c, 0xdf, 0xde, 0x86, 0xc3, 0xa9, 0x62, 0x34, - 0x76, 0xee, 0x4d, 0x15, 0x73, 0x7b, 0xd7, 0x6d, 0xd4, 0x21, 0x05, 0xd4, - 0xcf, 0xf3, 0x54, 0xdc, 0x49, 0x5f, 0x5a, 0x2a, 0x37, 0x19, 0x89, 0x61, - 0x1d, 0x95, 0x17, 0x8b, 0x09, 0x95, 0x5d, 0x9f, 0xde, 0x86, 0x03, 0x93, - 0x76, 0xec, 0x54, 0xec, 0x13, 0xc3, 0xf9, 0x38, 0x8f, 0xa9, 0x11, 0xf0, - 0x9a, 0x0e, 0x5e, 0x38, 0x69, 0xeb, 0x62, 0x41, 0x9e, 0xd0, 0x1b, 0x59, - 0x8c, 0xfd, 0x16, 0xfa, 0xd8, 0x99, 0x0d, 0x83, 0x7e, 0xba, 0x5b, 0xc6, - 0x59, 0xe1, 0xae, 0xba, 0xb9, 0xb8, 0xba, 0xa5, 0x4d, 0x20, 0x00, 0xc9, - 0x0c, 0xe1, 0x77, 0xdf, 0xc4, 0x95, 0xca, 0x7c, 0xa5, 0xef, 0x0a, 0xed, - 0x9b, 0x31, 0x06, 0xe1, 0xc9, 0xa3, 0x88, 0x0a, 0xcc, 0x3d, 0xc8, 0xb6, - 0x01, 0xe2, 0xa9, 0x29, 0x03, 0x8a, 0x28, 0xf8, 0x0d, 0x70, 0x77, 0xb9, - 0xe1, 0x1b, 0x06, 0x19, 0x86, 0xc1, 0xd3, 0xcf, 0x6b, 0x9c, 0x09, 0x70, - 0x50, 0xed, 0xb5, 0xf6, 0x69, 0xcc, 0xac, 0x30, 0x6a, 0x1f, 0x1d, 0xe6, - 0x75, 0x33, 0xab, 0x55, 0x48, 0xfa, 0x81, 0xb8, 0x06, 0x3a, 0x78, 0xee, - 0xde, 0xef, 0xe2, 0x17, 0xc4, 0x3e, 0xe5, 0x22, 0xa7, 0xd1, 0x45, 0x5b, - 0x57, 0xb0, 0xde, 0x69, 0x30, 0xd1, 0x9a, 0xd7, 0x6b, 0x0e, 0x7a, 0x30, - 0x0d, 0xb5, 0xec, 0x60, 0xa7, 0x05, 0x87, 0x42, 0x4b, 0x92, 0x1f, 0x68, - 0x8e, 0x1a, 0x90, 0x84, 0x27, 0x2a, 0xc0, 0xd2, 0xff, 0xbc, 0x8e, 0x34, - 0x53, 0x9d, 0x04, 0x50, 0xcb, 0x79, 0xd9, 0x55, 0xd5, 0x4d, 0x3c, 0xe2, - 0xb4, 0x9b, 0x57, 0x07, 0x1f, 0xce, 0xd0, 0xa7, 0x84, 0xe1, 0xb7, 0x3a, - 0xaf, 0xc5, 0x67, 0x64, 0xbc, 0x02, 0xbe, 0xb0, 0x65, 0x7e, 0xb0, 0x4c, - 0xc2, 0x2d, 0xcd, 0xf8, 0x60, 0xcb, 0xfe, 0xd1, 0x8d, 0x14, 0x5a, 0xd3, - 0x38, 0xd4, 0x71, 0x5a, 0xca, 0xbb, 0xfe, 0x0e, 0x54, 0xf9, 0xb4, 0x25, - 0xa5, 0x71, 0x13, 0x95, 0x14, 0xdc, 0x86, 0xb8, 0x21, 0xa7, 0x2e, 0x13, - 0xc6, 0x2f, 0xce, 0xe7, 0x6c, 0xb8, 0x0d, 0xc9, 0xe4, 0xc4, 0x64, 0x12, - 0x78, 0x1c, 0x95, 0x92, 0xc2, 0xec, 0xaa, 0xd3, 0xc3, 0x3a, 0xd2, 0xe8, - 0x95, 0xf0, 0x6b, 0x03, 0x8c, 0xcf, 0x6b, 0xdb, 0x21, 0xa0, 0xcf, 0xf4, - 0x05, 0xc8, 0xe7, 0x77, 0x05, 0x55, 0x7b, 0x6b, 0xfa, 0x96, 0xf1, 0x7c, - 0x30, 0x62, 0x75, 0xbe, 0x6e, 0xea, 0xba, 0x9f, 0x40, 0x2e, 0x9a, 0x86, - 0x93, 0xcc, 0x38, 0xf7, 0xee, 0xd8, 0xbb, 0x24, 0xcd, 0x85, 0x3e, 0x85, - 0x16, 0x8c, 0x33, 0x23, 0x73, 0xe6, 0x43, 0xc4, 0x67, 0xbf, 0xef, 0x85, - 0xb1, 0x44, 0xf9, 0x55, 0x93, 0x4d, 0x0b, 0x8e, 0xc1, 0x42, 0x13, 0xc6, - 0xc8, 0x09, 0x63, 0xab, 0xb3, 0xc7, 0xc4, 0xa4, 0x8b, 0x72, 0xfb, 0xa5, - 0x99, 0xa1, 0x5d, 0x07, 0x02, 0x82, 0x56, 0x11, 0x3c, 0xc2, 0x5a, 0x55, - 0xf9, 0x3a, 0x93, 0x61, 0x89, 0x46, 0xb7, 0x6a, 0x42, 0x76, 0x1e, 0x70, - 0xde, 0xd9, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x32, 0xc1, 0x61, 0xaa, 0xdb, 0xe9, 0xae, 0x88, 0xcb, 0xf7, 0x28, 0xdd, - 0x82, 0x62, 0x61, 0x41, 0x4e, 0xbb, 0xf9, 0xb7, 0xe8, 0x81, 0x99, 0x18, - 0xe2, 0xa7, 0xb4, 0x7c, 0xb7, 0x08, 0x44, 0x6f, 0x24, 0xb3, 0xda, 0x57, - 0x62, 0x29, 0xc7, 0xa6, 0x84, 0xb1, 0x5d, 0xc5, 0x00, 0x4c, 0x30, 0x16, - 0xf0, 0x0a, 0x74, 0x73, 0xec, 0xaf, 0xb5, 0xde, 0xb0, 0xa7, 0x75, 0x22, - 0x8f, 0x9e, 0x43, 0x01, 0x68, 0xae, 0x91, 0xeb, 0x46, 0x52, 0x3f, 0x2c, - 0x4e, 0xc5, 0xd0, 0xc8, 0x15, 0xea, 0x99, 0xc2, 0x37, 0x5b, 0x68, 0xb5, - 0xce, 0x41, 0x92, 0xbf, 0xd6, 0xdb, 0x85, 0xad, 0x08, 0xd1, 0x11, 0x93, - 0xe8, 0xd4, 0x78, 0x43, 0x3b, 0x7d, 0xcb, 0x42, 0x84, 0xf3, 0x61, 0x88, - 0x9e, 0x6a, 0x73, 0xb9, 0x78, 0x17, 0x9a, 0x9f, 0xfb, 0x97, 0xcb, 0xd6, - 0xb5, 0x3f, 0x00, 0x41, 0xb0, 0x30, 0x2f, 0x6f, 0x89, 0xdd, 0xfa, 0x13, - 0xd1, 0x07, 0xbe, 0x2f, 0xea, 0x91, 0x62, 0xaa, 0xed, 0xcb, 0xfd, 0x07, - 0x82, 0xbb, 0x3f, 0xf4, 0xa6, 0x94, 0x66, 0x71, 0x20, 0x61, 0xac, 0x84, - 0x04, 0x70, 0xf2, 0xd3, 0xdf, 0xac, 0x44, 0xfd, 0x47, 0x26, 0x81, 0x64, - 0xb3, 0xa6, 0x90, 0x2b, 0xd2, 0x2c, 0xd0, 0x77, 0x81, 0x53, 0x45, 0x78, - 0x5f, 0x30, 0x77, 0x91, 0x83, 0x13, 0x33, 0xd1, 0x91, 0xa6, 0x35, 0x21, - 0xcb, 0x26, 0x54, 0x0a, 0xf7, 0x70, 0x5e, 0xdb, 0xd8, 0x92, 0xc7, 0xdf, - 0xf9, 0x2a, 0x46, 0x91, 0x22, 0x3b, 0xe6, 0xe1, 0x91, 0xeb, 0xa6, 0x78, - 0x81, 0x57, 0xf3, 0x04, 0xdf, 0x34, 0x55, 0x74, 0x0a, 0xfe, 0xf2, 0xbd, - 0xb3, 0xeb, 0xa3, 0x8e, 0x71, 0x15, 0xa9, 0x2f, 0x53, 0xe2, 0xa1, 0x45, - 0xdf, 0xe8, 0x29, 0x40, 0xf1, 0x4b, 0x23, 0xdb, 0x8e, 0xee, 0x19, 0xa8, - 0xd4, 0x15, 0x90, 0x8c, 0x04, 0x46, 0x81, 0x49, 0x92, 0xe5, 0xe1, 0xfe, - 0x99, 0x06, 0xfc, 0x3e, 0x43, 0x58, 0x3b, 0x19, 0x7f, 0xd2, 0x13, 0x65, - 0xc2, 0x64, 0x27, 0x6d, 0x93, 0x6a, 0xcf, 0x48, 0x2a, 0x3d, 0xdd, 0x79, - 0x9f, 0x05, 0x32, 0xeb, 0xfd, 0xb4, 0xd2, 0x1d, 0x16, 0x61, 0x3d, 0x17, - 0x4c, 0xb8, 0xad, 0x63, 0x0e, 0x6b, 0x8a, 0x4a, 0x34, 0x4c, 0xb5, 0x3c, - 0x0f, 0x05, 0x28, 0x8c, 0x8b, 0xdf, 0xf4, 0xa0, 0x49, 0xbf, 0x34, 0x6c, - 0x6a, 0x5f, 0x40, 0x95, 0x48, 0x4b, 0x93, 0x1e, 0x61, 0x6d, 0x58, 0xc3, - 0x86, 0x98, 0x70, 0x11, 0x4e, 0x44, 0x65, 0xc1, 0x0d, 0xea, 0x2f, 0xda, - 0x38, 0x16, 0xbd, 0xd4, 0x7b, 0x3e, 0x31, 0xee, 0x42, 0x4c, 0xdc, 0xe9, - 0x8b, 0x1f, 0xa9, 0xcf, 0xab, 0x60, 0xb5, 0xb1, 0xd2, 0xf2, 0x6a, 0xe9, - 0xbc, 0xcc, 0xcb, 0x60, 0x4a, 0xca, 0x70, 0x79, 0x64, 0x9d, 0x07, 0x1e, - 0xdb, 0xef, 0x34, 0xaf, 0x17, 0x93, 0x6b, 0x60, 0x73, 0x2d, 0x8c, 0x08, - 0x27, 0x1e, 0x46, 0x9f, 0xcb, 0x33, 0xdd, 0x76, 0xef, 0x17, 0x58, 0x9a, - 0x5f, 0x82, 0x78, 0x0f, 0xbf, 0xe7, 0x0f, 0x3a, 0x1e, 0xa8, 0x30, 0xbf, - 0xff, 0xc7, 0xc7, 0x82, 0x8b, 0xc3, 0x65, 0x04, 0xfd, 0x45, 0xc9, 0x88, - 0x99, 0x8e, 0x44, 0xc5, 0x23, 0x1e, 0xbf, 0xf1, 0x95, 0x70, 0x35, 0xe6, - 0x56, 0x4a, 0x53, 0xb2, 0xac, 0x0c, 0xfd, 0xf5, 0x61, 0x26, 0x5b, 0x70, - 0xd6, 0x4c, 0xfc, 0x0f, 0xcc, 0x53, 0x6e, 0x25, 0xca, 0x1d, 0x0c, 0x56, - 0xf7, 0x9c, 0x95, 0xf6, 0x3c, 0x08, 0x0c, 0x64, 0xb1, 0x1c, 0x5c, 0xe6, - 0x25, 0xa4, 0xa3, 0xb7, 0xaf, 0x8b, 0xbc, 0xe1, 0x68, 0xdf, 0x10, 0xab, - 0xbb, 0xd5, 0x30, 0x64, 0x42, 0xf6, 0xe6, 0x9a, 0xb5, 0x59, 0x12, 0x76, - 0x92, 0xac, 0x29, 0xe9, 0x45, 0xdb, 0x2e, 0x62, 0x22, 0x58, 0x24, 0x89, - 0xc8, 0x6a, 0x2a, 0xa7, 0x3f, 0x04, 0x53, 0x4e, 0x07, 0x41, 0x4e, 0x5f, - 0x95, 0x5f, 0x6e, 0x14, 0x5b, 0xa7, 0xa7, 0xd3, 0x5a, 0xa2, 0x95, 0x4a, - 0xc8, 0xe9, 0x3c, 0x5a, 0x84, 0x50, 0xbc, 0xe1, 0x9c, 0x7a, 0x16, 0xe5, - 0xc7, 0x04, 0x9d, 0x60, 0x2e, 0x7d, 0xb3, 0x77, 0x5d, 0x86, 0x2e, 0xac, - 0x57, 0x2a, 0x31, 0x26, 0x23, 0x6e, 0xcc, 0x7f, 0xb8, 0x36, 0x29, 0xa9, - 0xa8, 0xd9, 0xc6, 0x75, 0xee, 0x16, 0x23, 0x27, 0x0f, 0xe1, 0xb0, 0x3d, - 0x91, 0x3a, 0x26, 0x4a, 0x60, 0x72, 0x14, 0xf9, 0x3c, 0x66, 0x66, 0xe8, - 0x7d, 0x4a, 0x6f, 0x7e, 0x63, 0x58, 0x6a, 0x28, 0x78, 0x50, 0xef, 0x3b, - 0x9d, 0xeb, 0xb6, 0x4b, 0x5d, 0x55, 0x80, 0x84, 0x97, 0x9b, 0x74, 0x4b, - 0x5c, 0x09, 0x1d, 0xe7, 0x57, 0xfc, 0x40, 0x3f, 0xa9, 0xbd, 0xdf, 0x61, - 0x2a, 0x89, 0x62, 0x51, 0xfc, 0x24, 0xee, 0xee, 0x97, 0x10, 0xca, 0xb6, - 0x0e, 0x8e, 0x71, 0x67, 0x2a, 0x79, 0x4f, 0xc4, 0xe6, 0x3e, 0x27, 0xc2, - 0x9b, 0x85, 0xfd, 0xde, 0xfb, 0x58, 0x75, 0xf3, 0x1c, 0x31, 0xa2, 0x56, - 0x3e, 0xdc, 0x24, 0xf4, 0x4f, 0xcb, 0x5a, 0x1a, 0x77, 0x5c, 0x28, 0xd1, - 0x5a, 0x55, 0xa9, 0x8c, 0xb5, 0xdd, 0x77, 0x93, 0x58, 0xd8, 0x2f, 0x7d, - 0x5a, 0x67, 0xa1, 0x95, 0x0a, 0xd2, 0x6a, 0x93, 0xa6, 0xf0, 0x5f, 0x7f, - 0x0a, 0x29, 0xdb, 0x1d, 0x8c, 0xa7, 0x12, 0x0a, 0xf4, 0xc9, 0xcd, 0x70, - 0xd1, 0xbd, 0x48, 0xd4, 0x9a, 0xbb, 0xbb, 0x24, 0xbf, 0x52, 0x25, 0xb9, - 0x75, 0xc2, 0x17, 0x36, 0x6f, 0x4a, 0xc0, 0x53, 0x6d, 0x38, 0xfb, 0x7a, - 0x60, 0xc8, 0x5d, 0x03, 0xc1, 0x1c, 0x0c, 0x31, 0xf0, 0x59, 0xed, 0x0a, - 0x5f, 0x84, 0xf2, 0x89, 0x6c, 0xb4, 0xd5, 0x24, 0x2d, 0x2a, 0xda, 0xbe, - 0x74, 0x1d, 0x22, 0xe2, 0xc6, 0xf0, 0x9b, 0x98, 0x5a, 0x41, 0x11, 0x4c, - 0x51, 0x97, 0x16, 0xa7, 0xc9, 0xd8, 0x53, 0x12, 0x53, 0xdd, 0x22, 0xa9, - 0xf2, 0xae, 0x52, 0x49, 0x02, 0xf9, 0x5c, 0x78, 0x00, 0xa2, 0x64, 0xff, - 0x91, 0x62, 0x20, 0x6a, 0x87, 0x6a, 0x40, 0x01, 0x85, 0x30, 0xf5, 0xdd, - 0xa7, 0x64, 0x0a, 0x85, 0x8d, 0x37, 0x99, 0xcb, 0x03, 0xc8, 0x29, 0x56, - 0x7e, 0x75, 0x4f, 0xa1, 0xc3, 0x76, 0xce, 0xdb, 0xa3, 0xb4, 0x7e, 0x91, - 0x95, 0xbe, 0x53, 0x0e, 0x20, 0xc9, 0xe7, 0x71, 0x78, 0xad, 0x3d, 0x4c, - 0xbb, 0x59, 0xb9, 0x77, 0xcf, 0x7d, 0x7b, 0xff, 0x15, 0xdb, 0x1d, 0xae, - 0x1f, 0xbe, 0x33, 0x88, 0x01, 0x04, 0x95, 0xe5, 0xe9, 0x6a, 0x1c, 0xbf, - 0xc8, 0xc3, 0x33, 0x3b, 0xd8, 0x2f, 0x75, 0x4a, 0xc3, 0x6f, 0x09, 0x88, - 0x26, 0x46, 0x90, 0x89, 0x53, 0x12, 0x27, 0xc2, 0x7d, 0x23, 0x6b, 0xc4, - 0xe3, 0x0a, 0x0f, 0xc2, 0x86, 0x6d, 0x20, 0x35, 0x82, 0x33, 0xec, 0xdd, - 0xa7, 0x6a, 0xc3, 0xa8, 0x11, 0xdc, 0x02, 0xd9, 0x05, 0x1b, 0x04, 0x75, - 0x92, 0x6c, 0x08, 0x9e, 0x38, 0x72, 0xd9, 0x7d, 0x9b, 0xbc, 0xfd, 0xca, - 0xb8, 0x06, 0x0e, 0x24, 0x89, 0x90, 0xde, 0x52, 0xe4, 0xd1, 0xcc, 0x99, - 0x87, 0x0b, 0x87, 0xbb, 0x5c, 0xa9, 0xab, 0xec, 0xb5, 0xe4, 0xdd, 0x5d, - 0xfa, 0xb1, 0x97, 0x5f, 0x61, 0xf7, 0x58, 0xd6, 0x08, 0x02, 0xf2, 0x51, - 0x7c, 0x7a, 0xe6, 0xf1, 0xcb, 0x43, 0xd0, 0x21, 0x09, 0xb8, 0x82, 0xa9, - 0x52, 0xd9, 0xa8, 0x7f, 0x2b, 0xe1, 0x0f, 0x31, 0xbc, 0x16, 0xa2, 0xce, - 0x35, 0x55, 0x2e, 0xd6, 0xda, 0x38, 0xd9, 0xc2, 0x5e, 0xca, 0x27, 0xd9, - 0xa6, 0xd6, 0x4b, 0xa2, 0x73, 0xc4, 0xce, 0x66, 0x30, 0x60, 0xa2, 0x01, - 0xfa, 0xc1, 0xd6, 0xc8, 0xea, 0xdd, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x70, 0xe2, 0x62, 0x68, 0xff, 0x60, 0x67, 0x64, - 0x88, 0xdd, 0x81, 0x79, 0x82, 0xf5, 0x46, 0xf9, 0x7e, 0x0e, 0xa9, 0x26, - 0xf6, 0xcf, 0x5d, 0xef, 0x10, 0x11, 0xe1, 0x71, 0x72, 0x77, 0xcf, 0x02, - 0x7b, 0xf1, 0x6e, 0xc4, 0xb4, 0xfa, 0x2a, 0x12, 0xfe, 0x7e, 0x3c, 0x66, - 0xef, 0x41, 0x98, 0x3a, 0x1f, 0xa9, 0x14, 0x8f, 0x46, 0x22, 0xa0, 0xc2, - 0xee, 0x93, 0x25, 0x34, 0xf2, 0xb7, 0x6d, 0x0a, 0x36, 0xde, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xd4, 0x17, 0x62, 0x25, - 0xfd, 0x5b, 0x75, 0xeb, 0xec, 0x06, 0xc9, 0x39, 0x86, 0x6d, 0xc5, 0x60, - 0x2d, 0x33, 0x3d, 0xce, 0x6a, 0x9f, 0x07, 0x3b, 0xb9, 0x70, 0x0f, 0xc7, - 0x13, 0x46, 0x35, 0x46, 0x26, 0xe4, 0xbc, 0x6e, 0x54, 0x89, 0x29, 0xd5, - 0xa4, 0x94, 0xa0, 0x3a, 0x7a, 0x61, 0xcf, 0xd1, 0x48, 0x27, 0x7a, 0x72, - 0x95, 0xde, 0x93, 0xd1, 0x19, 0x1f, 0xc9, 0xc8, 0x8f, 0x0d, 0xce, 0x34, - 0x03, 0x39, 0x0a, 0x92, 0x16, 0x09, 0xc4, 0x49, 0xf9, 0x30, 0x2e, 0x19, - 0xd1, 0x69, 0x7e, 0x78, 0x00, 0x25, 0x30, 0x6f, 0x6b, 0xe1, 0xbe, 0xad, - 0xb2, 0x05, 0xde, 0xc7, 0xc2, 0xf7, 0xd5, 0xa7, 0x4d, 0x03, 0x6f, 0x6b, - 0xcd, 0xcb, 0x42, 0xfa, 0x88, 0x16, 0xd5, 0xa6, 0x60, 0x08, 0xd4, 0xa5, - 0x5b, 0x3b, 0x7b, 0xa2, 0xca, 0xa3, 0xa2, 0x5d, 0x63, 0x7f, 0xc0, 0x37, - 0xc5, 0x7e, 0x99, 0x04, 0x5d, 0x9a, 0xb9, 0xa5, 0xac, 0xd1, 0xe2, 0x5d, - 0xb2, 0x2b, 0x7e, 0xbb, 0xb9, 0x66, 0x13, 0xa7, 0x30, 0xbf, 0x80, 0x0c, - 0x2b, 0x8d, 0x45, 0xe1, 0x8d, 0x96, 0x25, 0x27, 0x47, 0x3d, 0x21, 0x7d, - 0x1c, 0x42, 0xac, 0x31, 0x26, 0x47, 0x59, 0xb3, 0x44, 0x85, 0xf2, 0x8e, - 0x7d, 0x01, 0x96, 0x6d, 0xb2, 0x64, 0xc3, 0xfc, 0xa7, 0x82, 0x06, 0x4a, - 0x87, 0x75, 0x9b, 0x99, 0x47, 0x7e, 0xa6, 0x4d, 0x2c, 0x36, 0xff, 0xac, - 0x2b, 0x77, 0x96, 0x52, 0x14, 0x8d, 0x07, 0x0d, 0x28, 0x9d, 0x84, 0xa2, - 0xda, 0xd6, 0x45, 0x3a, 0xd4, 0xe6, 0xb7, 0x9a, 0xf3, 0x34, 0xe3, 0xda, - 0x39, 0xdf, 0x35, 0x9c, 0xe4, 0x87, 0x55, 0xc8, 0x43, 0xd0, 0x61, 0x46, - 0x52, 0x2f, 0x75, 0x63, 0xbb, 0x98, 0x97, 0xeb, 0xfb, 0x15, 0xaf, 0x8e, - 0x96, 0xdc, 0xff, 0x0a, 0x90, 0xda, 0x09, 0x63, 0x28, 0x7b, 0x92, 0x73, - 0x0b, 0xd4, 0x2b, 0x72, 0x2a, 0x86, 0x32, 0xc3, 0xc1, 0x3e, 0xe4, 0x2c, - 0x07, 0x89, 0x53, 0xb7, 0xfe, 0x78, 0x6c, 0x95, 0xb4, 0x62, 0x4d, 0x4b, - 0xfe, 0x6c, 0xfc, 0x5e, 0x4e, 0xa7, 0x8c, 0x07, 0x4f, 0x85, 0x27, 0xe0, - 0x7b, 0xd9, 0x7a, 0xe5, 0x1d, 0xbc, 0x36, 0xda, 0x8e, 0x21, 0xff, 0xb3, - 0x60, 0x2c, 0x5e, 0x23, 0x0f, 0xde, 0x3f, 0xae, 0xa5, 0x3a, 0x50, 0xa9, - 0x99, 0x39, 0x45, 0xaf, 0xd3, 0x5f, 0x4a, 0x15, 0xad, 0x9c, 0x66, 0x7f, - 0x92, 0xe0, 0x02, 0x81, 0x3e, 0x06, 0x6a, 0x5e, 0xd0, 0x0c, 0x42, 0xe7, - 0xcf, 0xe2, 0xeb, 0xa3, 0xe0, 0xf7, 0x2d, 0x8a, 0x21, 0xdb, 0x64, 0x28, - 0x2a, 0xb3, 0x2b, 0xc4, 0xc9, 0xd5, 0x60, 0xaf, 0xfc, 0x15, 0xa1, 0x44, - 0x9c, 0x96, 0x04, 0x42, 0x1c, 0x55, 0x8c, 0xa5, 0xce, 0x80, 0xce, 0x75, - 0x64, 0xa9, 0xf6, 0xa5, 0x5a, 0x0f, 0x8a, 0x4b, 0x8b, 0x72, 0xcf, 0x3e, - 0xd7, 0xeb, 0xe1, 0xd0, 0xd3, 0x2d, 0x04, 0x6c, 0x9e, 0x02, 0x75, 0x43, - 0x5c, 0xc1, 0x57, 0x66, 0xd9, 0x14, 0x5b, 0x08, 0x10, 0x44, 0x8d, 0x8e, - 0x89, 0xd1, 0x65, 0x27, 0x2a, 0x0b, 0x99, 0x6f, 0x09, 0xa6, 0x20, 0xa5, - 0x75, 0x24, 0xe4, 0xf7, 0xf5, 0xe0, 0xed, 0x79, 0x37, 0x18, 0x13, 0x1c, - 0xd9, 0xd1, 0xf5, 0x69, 0x0c, 0xa5, 0x02, 0xdf, 0x6a, 0xfd, 0x2e, 0x35, - 0x8e, 0xd0, 0x41, 0x91, 0x61, 0x0f, 0x5c, 0xdd, 0x70, 0xbf, 0x1c, 0x49, - 0xcb, 0xe9, 0xc9, 0x33, 0xc4, 0x99, 0x1e, 0x8b, 0x75, 0x48, 0xc2, 0x58, - 0xa4, 0x70, 0x1f, 0xbb, 0xcd, 0xd3, 0x0e, 0x79, 0x25, 0xbe, 0x53, 0xfa, - 0x32, 0x32, 0xf6, 0xb9, 0xf0, 0x0a, 0x52, 0x5b, 0xe0, 0x69, 0xff, 0x43, - 0xda, 0x98, 0x1f, 0xee, 0x54, 0x60, 0xf8, 0x24, 0x43, 0xc5, 0x37, 0x72, - 0xd1, 0xfc, 0x99, 0x9a, 0x3e, 0x24, 0xe0, 0xd9, 0xc2, 0x61, 0x47, 0xb3, - 0x26, 0x09, 0x85, 0x74, 0xa1, 0x2b, 0x4a, 0x70, 0xd0, 0x1b, 0x90, 0x03, - 0x25, 0xd9, 0x22, 0xc2, 0x16, 0x22, 0x3a, 0x62, 0x20, 0xd4, 0x13, 0xce, - 0xa2, 0xc7, 0x02, 0xfb, 0x9a, 0xbf, 0xf1, 0x1c, 0x80, 0x01, 0x97, 0x90, - 0x7f, 0x5a, 0x98, 0x70, 0x30, 0x61, 0x77, 0xe5, 0xd4, 0x3b, 0x03, 0x42, - 0x57, 0x31, 0x5e, 0xc6, 0x64, 0xe1, 0xf4, 0x64, 0x77, 0x21, 0x9b, 0x44, - 0x1c, 0xd9, 0x8c, 0x95, 0x8a, 0xf1, 0xcb, 0x82, 0xac, 0xc1, 0x26, 0x31, - 0xf2, 0x22, 0x41, 0xab, 0xbb, 0x23, 0xd3, 0x8d, 0xcc, 0x5c, 0x9d, 0x9b, - 0x1d, 0x9c, 0x4d, 0xf3, 0x62, 0xde, 0x15, 0x6a, 0x94, 0x8d, 0x24, 0xe7, - 0x52, 0x8d, 0x2a, 0xa4, 0x1d, 0x54, 0x5a, 0xda, 0xaf, 0xab, 0x05, 0x27, - 0x4b, 0xbb, 0xb4, 0xda, 0x0c, 0xb9, 0x20, 0xb3, 0xaf, 0x4a, 0xeb, 0x37, - 0xe5, 0x43, 0xe4, 0xc1, 0xf6, 0x9e, 0xf8, 0x6c, 0xd8, 0xa1, 0x0c, 0xf9, - 0xd1, 0x4b, 0x96, 0xa0, 0x6d, 0x38, 0x64, 0x41, 0xd3, 0x14, 0xfb, 0xad, - 0x89, 0xa9, 0xf7, 0x36, 0x01, 0x0f, 0xbe, 0x8e, 0xd7, 0x76, 0xc6, 0x70, - 0x22, 0x32, 0x8b, 0x08, 0xca, 0x95, 0xbf, 0xcf, 0x5e, 0xb8, 0xc0, 0x3f, - 0xd9, 0xaa, 0x84, 0xab, 0x30, 0x5b, 0xe3, 0x7a, 0x61, 0x32, 0xe5, 0x54, - 0x01, 0x5e, 0xb6, 0x1c, 0x9c, 0x78, 0x52, 0x2a, 0xa7, 0xf5, 0x29, 0xa6, - 0x0f, 0x14, 0xa5, 0x3a, 0x34, 0xd4, 0xf5, 0xc2, 0xb2, 0x8d, 0x12, 0x7b, - 0x8a, 0x64, 0x00, 0xfd, 0x02, 0x0e, 0x02, 0x26, 0x5a, 0xb9, 0xeb, 0xfd, - 0x30, 0xce, 0x51, 0xec, 0x5f, 0xbc, 0xee, 0x53, 0x21, 0xec, 0x0e, 0xee, - 0xc4, 0x28, 0x1a, 0xec, 0x2a, 0x39, 0x4e, 0xe1, 0x50, 0x11, 0x3f, 0x16, - 0xdd, 0xbf, 0xaf, 0x3e, 0xbe, 0xd4, 0xfe, 0x34, 0x1e, 0x62, 0x3f, 0x5a, - 0xea, 0x05, 0xfc, 0xd5, 0x45, 0x08, 0x47, 0xce, 0x38, 0x3f, 0x75, 0x7e, - 0x0c, 0x3a, 0x2a, 0x14, 0xa7, 0x61, 0xba, 0x3a, 0xa1, 0x41, 0xa2, 0x72, - 0x19, 0xfa, 0x33, 0x43, 0xa7, 0xf4, 0x4e, 0x5b, 0xf9, 0xb1, 0x45, 0x16, - 0x57, 0x8e, 0xb1, 0xad, 0x7d, 0x88, 0xd3, 0x93, 0xa2, 0x08, 0xf3, 0x96, - 0x4d, 0x84, 0x63, 0x08, 0xfa, 0x9d, 0xf3, 0x04, 0x33, 0xbd, 0x7e, 0x7a, - 0xc7, 0x63, 0xc5, 0x31, 0x5a, 0x82, 0x33, 0x90, 0x56, 0x44, 0xe9, 0xd3, - 0xc4, 0xd4, 0x76, 0x29, 0x2f, 0xdb, 0xa3, 0x9d, 0xff, 0xd4, 0xd2, 0xb1, - 0xce, 0xf1, 0xcb, 0x7f, 0x10, 0x3b, 0x90, 0xa4, 0x1b, 0xa0, 0x9b, 0xa7, - 0xfa, 0x27, 0x40, 0x11, 0x35, 0xc9, 0x7f, 0x01, 0x97, 0x76, 0x9f, 0x33, - 0xc5, 0xd6, 0x8d, 0x20, 0x07, 0x73, 0x93, 0x0b, 0x24, 0x88, 0x4e, 0x73, - 0x68, 0x79, 0x92, 0x20, 0x2a, 0x71, 0xed, 0x22, 0x0b, 0xfb, 0x42, 0xb5, - 0xd9, 0xc3, 0xaa, 0xed, 0x45, 0x03, 0x64, 0xde, 0x6f, 0x25, 0x8e, 0x3b, - 0x9a, 0xef, 0xc5, 0x63, 0xc2, 0x7f, 0x34, 0xd0, 0x1b, 0x20, 0xa3, 0xab, - 0x9d, 0x54, 0x41, 0x0e, 0x7b, 0x2e, 0x96, 0x12, 0x75, 0x58, 0xdf, 0xd5, - 0xaa, 0x3c, 0xf2, 0x26, 0xc1, 0xf1, 0x18, 0x37, 0x56, 0xf2, 0xd2, 0x86, - 0x6f, 0xd4, 0x9f, 0x57, 0x2b, 0x32, 0xe9, 0x08, 0x94, 0x53, 0x40, 0xc5, - 0x4d, 0x77, 0x39, 0xc6, 0x4c, 0x63, 0x53, 0xf9, 0xbf, 0x35, 0x08, 0xc5, - 0x0d, 0xd0, 0x89, 0x82, 0xa7, 0x2d, 0x6a, 0xb4, 0x22, 0xb1, 0x10, 0x7f, - 0xcf, 0x2e, 0x21, 0x27, 0x9c, 0x12, 0xc6, 0x0e, 0xca, 0xd2, 0x32, 0xb1, - 0x6d, 0xfd, 0x59, 0x12, 0x23, 0x60, 0x46, 0x89, 0xe0, 0x75, 0x5e, 0xc9, - 0xf4, 0x3d, 0x8a, 0x89, 0xd4, 0x23, 0xc2, 0xbe, 0x30, 0x32, 0x4a, 0x95, - 0x42, 0xe2, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0xa7, 0x0b, 0x48, 0xe2, 0xeb, 0xd7, 0x12, 0x42, 0x4c, 0x71, 0xfb, 0x25, - 0x17, 0x23, 0x0e, 0x01, 0xa6, 0x21, 0xb9, 0x17, 0x6e, 0xf0, 0x24, 0x66, - 0x9e, 0x9d, 0x0f, 0x71, 0xf8, 0x5b, 0x79, 0xb0, 0x1b, 0x1f, 0xe7, 0xa2, - 0xc0, 0x17, 0x16, 0x08, 0x5e, 0x24, 0x7b, 0xf9, 0x7a, 0x1e, 0x70, 0xe2, - 0x05, 0x40, 0x16, 0x56, 0xe7, 0x79, 0xf2, 0x30, 0xa3, 0xdc, 0xe3, 0x7a, - 0x7e, 0x22, 0x88, 0xc0, 0xf7, 0xc8, 0x5c, 0x93, 0x95, 0x86, 0x02, 0x6c, - 0x73, 0x76, 0xef, 0x03, 0x2d, 0xcb, 0xa5, 0x22, 0xfe, 0x05, 0xbb, 0xe6, - 0xfd, 0x19, 0x8c, 0x8b, 0x67, 0x58, 0x81, 0x81, 0x2d, 0x36, 0xd0, 0xc1, - 0x20, 0xb2, 0x87, 0x87, 0xdb, 0xe4, 0xe5, 0xd1, 0xd1, 0xd5, 0x81, 0x34, - 0x4c, 0xd6, 0x09, 0xa2, 0x5d, 0xcc, 0x99, 0x12, 0xa5, 0x06, 0x0f, 0x06, - 0x7e, 0xbb, 0x67, 0x26, 0x69, 0x15, 0x6e, 0x5f, 0xb1, 0x8e, 0xd6, 0x34, - 0xfc, 0x4d, 0xd9, 0x03, 0xb7, 0x5a, 0xf4, 0xaa, 0x03, 0x00, 0x88, 0x6b, - 0x5a, 0xc9, 0xf2, 0xfb, 0x67, 0x72, 0xbc, 0xf7, 0xb9, 0xdc, 0x97, 0xdf, - 0x80, 0x91, 0xfa, 0x30, 0x18, 0x02, 0x89, 0xc7, 0xc9, 0x62, 0x1d, 0xc0, - 0x0b, 0xa6, 0xfe, 0x7e, 0xb9, 0xa9, 0x1f, 0x11, 0x71, 0xe1, 0xd1, 0xfe, - 0x8d, 0x90, 0x2c, 0x09, 0x82, 0x2e, 0x36, 0x79, 0xa5, 0x75, 0x54, 0xfb, - 0xd3, 0x3c, 0xb4, 0x18, 0x2f, 0x4e, 0x3f, 0x37, 0xc4, 0xf8, 0xc5, 0x59, - 0xa3, 0xfd, 0x0c, 0x62, 0x9e, 0xa8, 0x7a, 0x56, 0xc5, 0x97, 0x89, 0x35, - 0xc7, 0xb0, 0x29, 0x87, 0xbf, 0x6a, 0xdc, 0xb1, 0x2f, 0x01, 0xf4, 0x0d, - 0x7c, 0x25, 0x95, 0x39, 0x81, 0xdd, 0x1a, 0x81, 0x36, 0xc0, 0x6b, 0xbf, - 0x6b, 0x4d, 0xea, 0x23, 0xc0, 0x3e, 0x5c, 0x39, 0xe5, 0x6b, 0x59, 0xa0, - 0x50, 0x02, 0x99, 0xdf, 0x4e, 0xe3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x17, 0x88, 0xf8, 0xda, 0x3d, 0x57, 0x83, 0x63, - 0x76, 0xa0, 0x5c, 0x13, 0x1a, 0x00, 0x64, 0x30, 0x19, 0xfd, 0x2e, 0x9c, - 0x64, 0xb6, 0xda, 0x51, 0x7b, 0x55, 0xe8, 0xc4, 0x67, 0x1b, 0xda, 0xfc, - 0x4c, 0xd0, 0x27, 0x58, 0x56, 0xa1, 0x52, 0xd2, 0xb8, 0xd8, 0xd5, 0x94, - 0x69, 0xcf, 0xd0, 0xd5, 0x72, 0xeb, 0x2b, 0x05, 0xf3, 0x12, 0xa6, 0xac, - 0xa6, 0xf7, 0x90, 0x24, 0x1f, 0x22, 0x97, 0x5e, 0x8b, 0x7c, 0x2c, 0x30, - 0x61, 0x11, 0x9b, 0xdf, 0x83, 0x2b, 0x10, 0x09, 0x42, 0x77, 0x2b, 0xd9, - 0x43, 0xb3, 0x27, 0x69, 0x75, 0xf2, 0x2e, 0x72, 0xed, 0x50, 0xea, 0xbf, - 0x7f, 0x47, 0x39, 0x9c, 0xf8, 0x1e, 0xce, 0x6f, 0xdd, 0xe8, 0x40, 0xc5, - 0x14, 0x01, 0x7e, 0xbb, 0x0f, 0x43, 0x2d, 0x36, 0x70, 0x54, 0xc6, 0xbe, - 0x69, 0x24, 0xd1, 0x65, 0x49, 0x77, 0xf0, 0xd2, 0x99, 0xb4, 0x50, 0x8d, - 0x98, 0xcb, 0xbf, 0x7a, 0x7c, 0x65, 0xd3, 0x46, 0xcf, 0x90, 0x69, 0x56, - 0x15, 0xa2, 0xae, 0x11, 0x94, 0x60, 0xf9, 0x45, 0x17, 0x54, 0x6b, 0xbd, - 0xeb, 0xd8, 0x74, 0x41, 0x5c, 0xf6, 0x49, 0x0a, 0x14, 0xce, 0x43, 0x1f, - 0x67, 0xc3, 0x6c, 0xf4, 0x01, 0xce, 0x3f, 0x85, 0xed, 0x19, 0xa1, 0xf7, - 0x1b, 0xf8, 0x46, 0x45, 0xb4, 0xe9, 0xa7, 0x1f, 0x2a, 0x65, 0x00, 0x2a, - 0xd3, 0x8b, 0x6a, 0x3b, 0xac, 0x78, 0xab, 0xf4, 0xc8, 0x62, 0x76, 0xc8, - 0x24, 0xf8, 0xf8, 0x08, 0xe0, 0x64, 0x00, 0x64, 0x74, 0x9e, 0x55, 0x2e, - 0xf8, 0xc9, 0xc8, 0x58, 0x0e, 0x1f, 0x27, 0x32, 0xfd, 0x30, 0x24, 0x68, - 0xc8, 0xa4, 0x8c, 0x1c, 0xf3, 0xa7, 0x32, 0xae, 0x84, 0x0a, 0x8a, 0x1e, - 0x11, 0xce, 0xb2, 0x02, 0xf1, 0xb3, 0x5f, 0x7d, 0x5e, 0x54, 0x8c, 0xe0, - 0xeb, 0x46, 0x6e, 0x8a, 0x5f, 0x3f, 0x71, 0x47, 0x2a, 0x8a, 0xe6, 0xf0, - 0xb0, 0x04, 0x49, 0x64, 0xb3, 0x7e, 0x16, 0x09, 0x83, 0x5f, 0x12, 0xe0, - 0x85, 0xb7, 0x36, 0xc0, 0x8a, 0xa5, 0xcd, 0xae, 0xc0, 0xb4, 0xa2, 0x62, - 0x9b, 0xfa, 0x64, 0x18, 0x16, 0x8e, 0xb6, 0x50, 0xf2, 0x9b, 0xc4, 0x7d, - 0x0c, 0x4c, 0x8b, 0x58, 0xcf, 0x9b, 0x87, 0x09, 0xb1, 0x37, 0xbb, 0xaf, - 0xa7, 0x72, 0x79, 0x81, 0x09, 0x55, 0xa1, 0x6a, 0x87, 0xb0, 0x7d, 0xc8, - 0xb0, 0xc1, 0xa4, 0xa9, 0xdf, 0xcf, 0x95, 0x77, 0x36, 0x8e, 0x2b, 0xae, - 0xeb, 0x4b, 0xf9, 0x2a, 0x83, 0x6c, 0x53, 0x3c, 0x89, 0xa6, 0x08, 0xae, - 0x00, 0x4e, 0xb8, 0xf6, 0x34, 0x7c, 0xc6, 0x76, 0x87, 0x1a, 0x02, 0xb0, - 0x89, 0xa3, 0x0f, 0x00, 0xc6, 0x7b, 0xeb, 0xf7, 0x95, 0x40, 0xc5, 0x0d, - 0x6f, 0x74, 0xd8, 0x21, 0x2f, 0x9f, 0x24, 0xac, 0x43, 0xdb, 0x3a, 0x39, - 0x6c, 0x34, 0x59, 0x62, 0x66, 0xbc, 0x28, 0x7f, 0x8c, 0x64, 0x62, 0x8c, - 0x28, 0x6c, 0xf5, 0x79, 0x24, 0xb1, 0x00, 0x9c, 0x58, 0x6b, 0x09, 0xef, - 0xb0, 0x73, 0xcd, 0x47, 0xbb, 0x52, 0xfd, 0x26, 0x6a, 0xff, 0xb9, 0xf1, - 0xd5, 0x82, 0x59, 0x01, 0xfa, 0x87, 0x14, 0x24, 0x10, 0xb0, 0xf7, 0xdf, - 0xf9, 0x3f, 0x67, 0x19, 0xbd, 0xc7, 0x85, 0xb0, 0xad, 0x47, 0xa8, 0x4c, - 0x3e, 0xb6, 0x2e, 0x8a, 0xb3, 0xcc, 0x35, 0xa0, 0x48, 0xc7, 0x90, 0x81, - 0xb7, 0x53, 0x1c, 0x38, 0x63, 0xf2, 0x2f, 0xa0, 0x71, 0x82, 0xe2, 0x56, - 0xdb, 0x68, 0xe8, 0x5f, 0xf8, 0x42, 0xf2, 0xf6, 0xb8, 0x10, 0x6b, 0x54, - 0x21, 0xa0, 0xc1, 0xfe, 0xcb, 0xce, 0x12, 0xa2, 0x49, 0x51, 0x86, 0x53, - 0x56, 0xec, 0x33, 0xb3, 0x72, 0xce, 0xa4, 0x46, 0xe3, 0x37, 0xcb, 0xc0, - 0x95, 0xaa, 0xe2, 0xa3, 0xc5, 0xe9, 0x36, 0x40, 0xfe, 0xf7, 0xe2, 0x5a, - 0x6d, 0x58, 0x39, 0xb2, 0x41, 0x5d, 0xe2, 0x71, 0x72, 0xd0, 0xf0, 0x5c, - 0x16, 0x88, 0x95, 0x30, 0x0a, 0xfb, 0x8d, 0xda, 0x14, 0x80, 0xf4, 0x15, - 0xf2, 0xf6, 0xac, 0xf3, 0xd8, 0x8d, 0x13, 0x24, 0x2c, 0x74, 0x60, 0x6e, - 0x8c, 0xa1, 0x59, 0xcf, 0x74, 0x7c, 0x2d, 0x0b, 0xbb, 0x06, 0x5c, 0x9d, - 0xcd, 0xf3, 0x1e, 0x4a, 0xba, 0x3f, 0x9c, 0x4a, 0xc4, 0xd7, 0xf9, 0xf0, - 0xa5, 0x56, 0x7f, 0xb0, 0xa2, 0x57, 0xd0, 0xc3, 0xaa, 0xa7, 0xd0, 0x49, - 0xe2, 0x28, 0x9b, 0xc4, 0x64, 0x0c, 0xe0, 0x71, 0x9c, 0x05, 0x04, 0x95, - 0x00, 0x1f, 0x7b, 0xa9, 0xb9, 0xb3, 0x2b, 0x8f, 0x0b, 0x45, 0x1e, 0x23, - 0xaa, 0x27, 0x89, 0x4a, 0xb0, 0x7d, 0x03, 0xdf, 0xae, 0xdb, 0xcb, 0xc4, - 0xec, 0x3b, 0x02, 0xe2, 0x85, 0x3a, 0xb7, 0x25, 0xfb, 0xab, 0xca, 0xc1, - 0x33, 0x00, 0x5b, 0xd2, 0xcf, 0xb0, 0x11, 0x1d, 0x51, 0xb5, 0x5b, 0xea, - 0x94, 0xf7, 0xa0, 0x98, 0x33, 0xba, 0x58, 0xfc, 0x12, 0xea, 0xdd, 0x89, - 0xbd, 0x63, 0x03, 0xbe, 0x7e, 0x3b, 0x69, 0xc4, 0x9d, 0x57, 0x0f, 0xd6, - 0xbe, 0xea, 0x5b, 0xd0, 0x97, 0x63, 0x89, 0xb0, 0xa0, 0xc0, 0xd6, 0x39, - 0xc1, 0x69, 0x12, 0x6a, 0xfb, 0xac, 0x74, 0x7f, 0xfb, 0xf4, 0x7f, 0x38, - 0x44, 0x4c, 0x8a, 0xa2, 0x41, 0x15, 0xc0, 0x54, 0xc0, 0xed, 0x14, 0x83, - 0xef, 0xbc, 0x9c, 0xc7, 0xdd, 0x21, 0xd6, 0xf0, 0x9b, 0x7f, 0x09, 0xd5, - 0x96, 0xe5, 0xf7, 0xc5, 0xa9, 0xb3, 0x41, 0xb0, 0x9d, 0xeb, 0x49, 0x68, - 0x9d, 0x2b, 0xea, 0x47, 0x80, 0x3b, 0x54, 0xb8, 0xf4, 0x14, 0x5e, 0xd6, - 0x66, 0x89, 0x04, 0xb3, 0x00, 0xa3, 0xa8, 0x32, 0x62, 0x2e, 0xc3, 0x15, - 0xc6, 0x93, 0x7d, 0x40, 0x32, 0xb1, 0x6b, 0x60, 0xd3, 0x52, 0xdf, 0x09, - 0x8c, 0x80, 0x2b, 0x01, 0xe7, 0x97, 0x8d, 0xbb, 0x14, 0xd6, 0x10, 0x15, - 0x64, 0x00, 0x4a, 0x2c, 0x67, 0xca, 0xd0, 0xa1, 0x37, 0x33, 0x7b, 0xa1, - 0x2a, 0x5b, 0x5b, 0x78, 0xf8, 0x2f, 0xdd, 0x76, 0xab, 0x8a, 0xc3, 0xe3, - 0x37, 0x00, 0xd1, 0x29, 0xb0, 0x96, 0x1d, 0x18, 0xbe, 0x5d, 0x32, 0x7e, - 0xb7, 0x11, 0xa9, 0x78, 0x72, 0xa2, 0x2d, 0x29, 0x1c, 0x32, 0xa4, 0xff, - 0xc7, 0xce, 0xfe, 0xaf, 0xb7, 0x17, 0x43, 0xe5, 0x2f, 0xae, 0x45, 0xd3, - 0xaf, 0x10, 0xe3, 0xd0, 0x58, 0xb6, 0xee, 0xee, 0x7a, 0xb5, 0x06, 0x70, - 0x26, 0x7e, 0x2d, 0x5b, 0xd5, 0xe1, 0x7b, 0x9a, 0x37, 0x02, 0xfc, 0x1d, - 0x08, 0x4f, 0x1a, 0xf5, 0x44, 0x63, 0xde, 0x4b, 0x14, 0x68, 0x54, 0x0b, - 0x6a, 0x22, 0x4e, 0x02, 0x65, 0xcd, 0xf4, 0x04, 0xec, 0xcc, 0x8a, 0x0b, - 0xe0, 0x59, 0xf8, 0x65, 0x25, 0x63, 0xed, 0x0f, 0xa6, 0xc5, 0x3c, 0xcb, - 0x5d, 0xc5, 0xd8, 0x9f, 0x5a, 0xd3, 0x88, 0x3d, 0xd4, 0x2c, 0xb3, 0x04, - 0xf6, 0x97, 0xc7, 0xe2, 0xfd, 0xb6, 0xf4, 0x7d, 0x0d, 0xb9, 0x75, 0x7e, - 0x9d, 0x81, 0xdc, 0xdf, 0x8e, 0x90, 0x40, 0x0c, 0x7b, 0x45, 0xfe, 0x68, - 0xfd, 0xff, 0x1c, 0xf1, 0x16, 0x09, 0x33, 0x74, 0x27, 0x7b, 0x4d, 0xd9, - 0x9b, 0x48, 0x6d, 0x84, 0xeb, 0x96, 0x8f, 0x4b, 0x82, 0x73, 0xd5, 0x69, - 0x7d, 0x14, 0x45, 0x8c, 0xb8, 0x71, 0x87, 0x70, 0x09, 0x26, 0xfc, 0x89, - 0x6f, 0x0f, 0xb6, 0xc1, 0xd6, 0xe1, 0xbf, 0xdb, 0x85, 0x8f, 0x94, 0xad, - 0x94, 0x01, 0x01, 0xbb, 0x3f, 0xc0, 0xb5, 0xff, 0xf5, 0xbb, 0x4f, 0x50, - 0x09, 0xca, 0x7d, 0x36, 0x47, 0x66, 0x9a, 0x8c, 0xee, 0x84, 0x73, 0x9a, - 0x1f, 0x49, 0x75, 0xb4, 0xab, 0x66, 0xf7, 0x3b, 0xfe, 0x81, 0x67, 0xc9, - 0xd1, 0x16, 0xde, 0x1f, 0xc2, 0x24, 0xed, 0x6a, 0x5a, 0xe7, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0xc5, 0xd7, 0x14, 0x84, - 0xf8, 0xcf, 0x9b, 0xf4, 0xb7, 0x6f, 0x47, 0x90, 0x47, 0x30, 0x80, 0x4b, - 0x9e, 0x32, 0x25, 0xa9, 0xf1, 0x33, 0xb5, 0xde, 0xa1, 0x68, 0xf4, 0xe2, - 0x85, 0x1f, 0x07, 0x2f, 0xcc, 0x00, 0xfc, 0xaa, 0x7c, 0xa6, 0x20, 0x61, - 0x71, 0x7a, 0x48, 0xe5, 0x2e, 0x29, 0xa3, 0xfa, 0x37, 0x9a, 0x95, 0x3f, - 0xaa, 0x68, 0x93, 0xe3, 0x2e, 0xc5, 0xa2, 0x7b, 0x94, 0x5e, 0x60, 0x5f, - 0x10, 0x85, 0xf3, 0x23, 0x2d, 0x42, 0x4c, 0x13, 0x29, 0xc8, 0x8d, 0x78, - 0x6e, 0xd6, 0x8c, 0xe6, 0xfc, 0xb6, 0x2a, 0xa6, 0x3b, 0xf9, 0xab, 0x61, - 0x7c, 0x08, 0x8a, 0x3b, 0x70, 0xbe, 0x57, 0xaa, 0xda, 0x1f, 0x33, 0x4a, - 0x70, 0x17, 0x25, 0x0d, 0x3f, 0x60, 0x3d, 0xc8, 0x2e, 0xbd, 0x3b, 0x12, - 0x0b, 0x63, 0x5e, 0x3f, 0xf5, 0x6b, 0x1f, 0x0b, 0xd9, 0x33, 0x85, 0x23, - 0x71, 0x24, 0x9a, 0xb3, 0xdf, 0x5c, 0x1f, 0xef, 0x14, 0x33, 0xc8, 0x66, - 0x85, 0xb7, 0xf0, 0x56, 0x68, 0x1d, 0x51, 0x52, 0xaf, 0x80, 0x3c, 0xe2, - 0x59, 0x06, 0xf1, 0xd1, 0x9f, 0xb6, 0xc6, 0x80, 0x4e, 0x06, 0xea, 0x28, - 0xab, 0x17, 0x8f, 0x45, 0x7a, 0xf6, 0xb4, 0x93, 0xb7, 0x43, 0x9e, 0xc6, - 0xd4, 0x29, 0x00, 0x62, 0xab, 0x51, 0x7a, 0x72, 0xe5, 0xc1, 0xd4, 0x10, - 0xcd, 0xd6, 0x17, 0x54, 0xe4, 0x20, 0x84, 0x50, 0xe4, 0xf9, 0x00, 0x13, - 0xfd, 0xa6, 0x9f, 0xef, 0x19, 0xd4, 0x60, 0x2a, 0x42, 0x07, 0xcd, 0xd5, - 0xa1, 0x01, 0x6d, 0x07, 0x01, 0x32, 0x61, 0x3c, 0x65, 0x9a, 0x8f, 0x5d, - 0x33, 0xf3, 0xcb, 0x29, 0x0b, 0x8c, 0xe7, 0x3b, 0x83, 0x44, 0xb1, 0x3a, - 0x4f, 0x8e, 0x09, 0x15, 0x14, 0x69, 0x84, 0xa1, 0xbb, 0x15, 0xfd, 0xea, - 0xde, 0xbe, 0x5b, 0x6a, 0xc0, 0x95, 0x04, 0x46, 0x4d, 0x8a, 0xaa, 0xac, - 0xbc, 0x2f, 0xad, 0x12, 0x15, 0x8a, 0x53, 0x4c, 0x94, 0xb8, 0xca, 0x42, - 0x96, 0x3a, 0xf4, 0x7a, 0x18, 0x9d, 0x5b, 0x24, 0x9a, 0xce, 0xa8, 0x99, - 0xd4, 0x37, 0x32, 0xf6, 0xf2, 0xac, 0xaf, 0x3f, 0xf5, 0x3b, 0xfe, 0xda, - 0x13, 0x9a, 0xab, 0x4f, 0x55, 0xc0, 0x2c, 0x21, 0x2b, 0x65, 0x71, 0x1f, - 0xc5, 0x04, 0x32, 0xc9, 0x94, 0xe5, 0xfa, 0x6f, 0xd8, 0x2a, 0xbc, 0x70, - 0x85, 0x55, 0xdc, 0x62, 0xb7, 0x3a, 0x20, 0x0e, 0xe7, 0x67, 0x3c, 0xfe, - 0xcb, 0x83, 0x6a, 0x15, 0x6e, 0x4a, 0x35, 0x65, 0xea, 0xc1, 0xb9, 0x4d, - 0x35, 0xf9, 0x4b, 0xcf, 0xd8, 0xfd, 0xa5, 0xff, 0xff, 0x67, 0x70, 0x04, - 0xae, 0xa2, 0xa4, 0x12, 0x4b, 0x83, 0x4f, 0xc2, 0x96, 0xf0, 0x21, 0x2b, - 0x14, 0x21, 0x73, 0x42, 0x14, 0x99, 0x07, 0xe5, 0xa9, 0x52, 0x4c, 0xeb, - 0xbe, 0xc3, 0x11, 0x2e, 0x27, 0xda, 0x69, 0x94, 0xd5, 0xf6, 0xc6, 0x77, - 0x0a, 0x00, 0x5d, 0x9a, 0x82, 0xaa, 0x21, 0xfc, 0x86, 0x9b, 0xd0, 0xc4, - 0xc4, 0x1f, 0x53, 0x41, 0x7a, 0x92, 0xab, 0x1c, 0x12, 0xf6, 0xd5, 0x48, - 0xfb, 0x29, 0x4d, 0xb4, 0xd2, 0x12, 0xee, 0xc5, 0xea, 0x18, 0x33, 0xf1, - 0x4d, 0x0a, 0x10, 0x43, 0xa5, 0x35, 0xb1, 0x63, 0xc4, 0xfb, 0x38, 0x1e, - 0xef, 0xac, 0x3f, 0x97, 0x41, 0xc6, 0x96, 0x3e, 0x60, 0x13, 0xc8, 0xe3, - 0xbe, 0x61, 0xe9, 0xb6, 0x26, 0x16, 0x14, 0xf8, 0x82, 0x0d, 0x6e, 0x75, - 0x2f, 0xd7, 0x9c, 0x3a, 0x4a, 0xda, 0xd8, 0x2b, 0x35, 0xd4, 0x20, 0x32, - 0xd4, 0x4f, 0x0f, 0xe4, 0xdc, 0xd5, 0x0f, 0xfe, 0xa6, 0x81, 0x28, 0xb4, - 0x24, 0x3e, 0xb7, 0x0f, 0xb0, 0xb2, 0x5b, 0x05, 0x76, 0xbb, 0x24, 0x49, - 0x6a, 0x01, 0x68, 0x3f, 0x03, 0x96, 0xbc, 0x0c, 0x77, 0x48, 0x5f, 0xe8, - 0x39, 0xf4, 0xb0, 0x84, 0x42, 0x0e, 0x6a, 0xb9, 0xab, 0xf2, 0x95, 0x97, - 0xa7, 0x5e, 0x29, 0x34, 0x9d, 0x50, 0xc0, 0x4b, 0x40, 0x72, 0xa1, 0x7c, - 0x79, 0x5e, 0x95, 0xbe, 0xd6, 0x17, 0x43, 0x0a, 0xc9, 0x27, 0x25, 0x43, - 0xd7, 0x99, 0xd5, 0x48, 0xd8, 0x98, 0xb5, 0x2b, 0x7f, 0xe3, 0xbd, 0x1d, - 0xc0, 0xd1, 0x04, 0xd5, 0xa4, 0xe1, 0x68, 0xbe, 0x96, 0xf1, 0x2e, 0x5e, - 0x37, 0x8d, 0x39, 0x4e, 0xe4, 0xcc, 0x5e, 0xd7, 0xdd, 0x59, 0x7e, 0xe8, - 0xae, 0x48, 0xb5, 0xec, 0x2c, 0xf7, 0x68, 0x96, 0x00, 0xe5, 0xec, 0x03, - 0x6f, 0x98, 0x3a, 0x9a, 0x4f, 0xd9, 0xf1, 0x2f, 0xfe, 0x76, 0xcf, 0x8f, - 0x0b, 0x3d, 0x8a, 0x14, 0x00, 0x83, 0xcb, 0xca, 0xe3, 0x34, 0x81, 0xb5, - 0x91, 0x64, 0x2b, 0x12, 0x24, 0x86, 0x9c, 0xae, 0x3c, 0x7f, 0x53, 0x22, - 0xd4, 0x94, 0x90, 0x44, 0x6b, 0x35, 0xd2, 0xce, 0x8e, 0x95, 0xe2, 0xbe, - 0x46, 0x50, 0x3f, 0x3d, 0xc3, 0xcd, 0xef, 0x47, 0x99, 0xb5, 0xf2, 0xd4, - 0x6f, 0xf4, 0xfa, 0xa2, 0xfc, 0x1e, 0xe3, 0x99, 0x49, 0xfd, 0x1a, 0x6e, - 0x0d, 0xb5, 0xf1, 0xc8, 0x05, 0x22, 0x29, 0xca, 0x03, 0xb8, 0x15, 0x3b, - 0x01, 0x8a, 0x95, 0x74, 0x48, 0x93, 0x61, 0x35, 0xde, 0xeb, 0xa9, 0xc4, - 0x56, 0xa9, 0xd7, 0xde, 0x4b, 0xe5, 0x4b, 0xa1, 0x42, 0x6a, 0x5f, 0xe3, - 0xb2, 0xc7, 0xda, 0xfb, 0xc7, 0x70, 0x64, 0xe0, 0x68, 0x19, 0xc6, 0x11, - 0x77, 0x2b, 0x5f, 0xba, 0x1d, 0x58, 0x77, 0x98, 0x2c, 0x91, 0xb4, 0xd2, - 0xea, 0x1b, 0xdc, 0xe8, 0xfa, 0x82, 0xf3, 0x6e, 0xac, 0x88, 0x15, 0x16, - 0x1a, 0x53, 0xb3, 0x01, 0x94, 0x03, 0x47, 0x20, 0xdb, 0x71, 0xcb, 0x71, - 0xe8, 0x62, 0xad, 0x34, 0x2b, 0xa3, 0xa5, 0xe9, 0xa6, 0x82, 0x0e, 0x16, - 0x61, 0xbc, 0x29, 0x6b, 0xb1, 0x60, 0x67, 0x80, 0x9a, 0x9f, 0xc4, 0x82, - 0xf6, 0xb0, 0x7a, 0x16, 0x9c, 0x25, 0x04, 0xeb, 0xfd, 0xe0, 0x18, 0xd3, - 0xfc, 0xeb, 0xe1, 0x3c, 0x2b, 0x29, 0x7b, 0x32, 0x4e, 0xd3, 0x6d, 0xe1, - 0x27, 0xda, 0xc9, 0x14, 0x5c, 0x7f, 0xfa, 0x70, 0x41, 0x8e, 0xb4, 0xa3, - 0xde, 0x36, 0x92, 0x67, 0x97, 0xe2, 0xec, 0x85, 0x8b, 0x76, 0x08, 0x3c, - 0x32, 0x58, 0xd4, 0x7f, 0x6f, 0x91, 0x03, 0xdb, 0x19, 0x3e, 0xc4, 0x8b, - 0x3c, 0xb7, 0x75, 0x90, 0x71, 0x7a, 0x21, 0x9d, 0xa7, 0x77, 0xbf, 0xf5, - 0x92, 0x57, 0x46, 0x07, 0xa7, 0xbb, 0x0c, 0x42, 0xca, 0x4f, 0x5a, 0x27, - 0x45, 0x69, 0xfe, 0x6d, 0x78, 0x43, 0x77, 0xc4, 0xb4, 0x43, 0xff, 0x37, - 0x0d, 0xb7, 0xfa, 0xe9, 0x9e, 0x06, 0x70, 0x53, 0xfd, 0xf6, 0xa0, 0x28, - 0x84, 0x46, 0xcd, 0x61, 0xa2, 0x95, 0xc4, 0x1e, 0x6a, 0x13, 0xa1, 0x7f, - 0xaf, 0xe1, 0x73, 0x85, 0xb0, 0x53, 0x9c, 0x08, 0xb6, 0x1d, 0x4d, 0xb4, - 0x0b, 0xfb, 0x1f, 0x0c, 0x7b, 0x17, 0x06, 0x73, 0xa7, 0x22, 0x1f, 0xb0, - 0xd8, 0x45, 0x6e, 0xe5, 0xde, 0x48, 0xb7, 0x9f, 0x5a, 0xa8, 0xd1, 0xc3, - 0x04, 0xd1, 0x87, 0xec, 0x15, 0x3e, 0xd1, 0xc7, 0x57, 0x01, 0x46, 0x4b, - 0x28, 0xa8, 0x79, 0x5a, 0x7e, 0x0b, 0x56, 0x56, 0x28, 0xda, 0x35, 0xea, - 0x4c, 0x14, 0x81, 0xae, 0xc0, 0x0d, 0x12, 0xfe, 0x2d, 0xb7, 0x95, 0x4d, - 0xea, 0x78, 0xb6, 0x53, 0xcf, 0xac, 0x8a, 0xfc, 0xc9, 0x07, 0x9f, 0x93, - 0xf0, 0x11, 0x86, 0x13, 0xe9, 0xca, 0x3d, 0xce, 0xb1, 0xfd, 0x1a, 0x0a, - 0x8b, 0x11, 0x82, 0x94, 0x6a, 0xae, 0xc5, 0x80, 0x6a, 0x3b, 0xa8, 0x7c, - 0xb4, 0x53, 0x4e, 0xa9, 0x04, 0x1a, 0x4f, 0xb0, 0xb9, 0x95, 0x96, 0xa5, - 0xfd, 0xce, 0xdc, 0x57, 0x00, 0x48, 0x16, 0xe2, 0x40, 0xae, 0x04, 0xf5, - 0x83, 0x60, 0x23, 0xd9, 0x8e, 0x59, 0x56, 0x20, 0x50, 0x38, 0xc4, 0xde, - 0x88, 0x9f, 0x91, 0x06, 0xdb, 0x8f, 0x84, 0xa2, 0xaf, 0x61, 0xdd, 0x48, - 0x03, 0x4f, 0xc4, 0xb8, 0xed, 0x12, 0xd2, 0x74, 0x08, 0xb9, 0x51, 0x63, - 0xb5, 0xfe, 0x09, 0x7f, 0x7b, 0x8c, 0x5e, 0xd7, 0x27, 0xe5, 0x79, 0xe6, - 0x33, 0x60, 0x54, 0xe1, 0x21, 0xda, 0xca, 0x8b, 0x81, 0xdf, 0xb6, 0xa7, - 0x2e, 0x9d, 0x0f, 0xfc, 0x05, 0x80, 0x67, 0xcb, 0xc5, 0xdf, 0xc7, 0x13, - 0xee, 0xb5, 0x40, 0x8e, 0xa7, 0x0c, 0xcb, 0xf2, 0x45, 0x15, 0x29, 0xb1, - 0xb8, 0x02, 0x23, 0x61, 0x38, 0xf1, 0x16, 0xa1, 0x0c, 0xa1, 0xc9, 0x40, - 0x8c, 0xd0, 0x48, 0x4b, 0xce, 0x9c, 0x1e, 0x53, 0x40, 0x44, 0xf6, 0x17, - 0x16, 0xc6, 0x5c, 0xb0, 0x2a, 0x29, 0x59, 0x87, 0x67, 0x85, 0xa7, 0x81, - 0x84, 0xe9, 0x4f, 0xe5, 0x4e, 0x13, 0x5a, 0x11, 0xa1, 0x24, 0x62, 0xe9, - 0x7a, 0xea, 0x51, 0xaa, 0x45, 0xf3, 0x1d, 0x2a, 0xaf, 0x01, 0x28, 0x35, - 0xda, 0xb4, 0xe7, 0xab, 0xc1, 0xb9, 0x3c, 0x45, 0xa2, 0x0b, 0x5d, 0x40, - 0x09, 0xac, 0x62, 0x16, 0xd3, 0x1f, 0x9f, 0xc7, 0x1a, 0x56, 0xb7, 0x27, - 0xd1, 0x1b, 0xe1, 0xb5, 0x82, 0x9e, 0xe8, 0xd3, 0x5c, 0x0f, 0xe8, 0x87, - 0x61, 0xc6, 0x20, 0xb7, 0x31, 0x3f, 0x0d, 0xb3, 0x0a, 0x5a, 0xce, 0x06, - 0xa5, 0xe9, 0xfd, 0xf3, 0x29, 0x1a, 0xcd, 0x86, 0x0e, 0x31, 0x29, 0xaa, - 0xb7, 0x32, 0xf1, 0x10, 0x4e, 0x92, 0x12, 0x00, 0xc0, 0xac, 0x50, 0x4b, - 0x52, 0x59, 0x51, 0x7c, 0xa8, 0x0c, 0xf7, 0xcb, 0x16, 0x73, 0x7b, 0x90, - 0xa8, 0x57, 0x79, 0xb4, 0x73, 0x53, 0xd7, 0xed, 0xba, 0x46, 0xc5, 0x06, - 0x53, 0x02, 0xc7, 0x58, 0x4c, 0x09, 0x0c, 0xa5, 0x01, 0x13, 0x18, 0x39, - 0x4b, 0x4e, 0xc2, 0x0d, 0xd6, 0xdf, 0xaa, 0x7e, 0x46, 0xba, 0x6e, 0xcc, - 0x25, 0x42, 0xd0, 0xb3, 0x31, 0xdc, 0xdf, 0x7d, 0xf1, 0xc3, 0x73, 0xca, - 0x7a, 0xf6, 0xcb, 0x23, 0x81, 0x8d, 0xbe, 0x0b, 0xf2, 0x79, 0x8d, 0x14, - 0xa4, 0xc8, 0x36, 0x18, 0x49, 0xc8, 0x0d, 0xd7, 0xc9, 0xdd, 0x35, 0xeb, - 0xec, 0x52, 0x56, 0xae, 0xf2, 0xd2, 0x51, 0x91, 0x39, 0xbc, 0xb0, 0x49, - 0xb7, 0xf2, 0x1b, 0x64, 0x83, 0x5a, 0xa6, 0x97, 0xc2, 0x15, 0x95, 0xdc, - 0x11, 0xd2, 0x89, 0xc0, 0x6a, 0xb1, 0x44, 0x43, 0x38, 0xb6, 0x54, 0x0f, - 0xdc, 0xcb, 0xed, 0x26, 0x27, 0xd9, 0x46, 0x56, 0x4e, 0x6a, 0x54, 0x74, - 0x0f, 0x45, 0xfc, 0xb6, 0x93, 0xab, 0x3c, 0xd1, 0x86, 0x51, 0xaf, 0xa9, - 0x4a, 0xc0, 0x9c, 0x78, 0xc1, 0xb1, 0xc7, 0xf1, 0x9c, 0xd1, 0xd0, 0x32, - 0x4e, 0x4b, 0x02, 0x36, 0x68, 0x38, 0x88, 0x56, 0xc0, 0x2b, 0x12, 0x05, - 0x3b, 0xb9, 0xf6, 0xa2, 0x37, 0xe7, 0xbc, 0x81, 0xf9, 0x75, 0x51, 0x27, - 0x56, 0x0d, 0x55, 0xd1, 0x6a, 0xe0, 0xcf, 0x87, 0x0a, 0x44, 0xc6, 0x57, - 0xe1, 0x1b, 0xc0, 0x2c, 0xcf, 0xab, 0x77, 0xe9, 0x14, 0xf5, 0x34, 0x89, - 0xfb, 0xc9, 0xf2, 0x87, 0x5c, 0x75, 0xba, 0x51, 0x9a, 0x49, 0xe9, 0x23, - 0x23, 0xf4, 0xc9, 0xd1, 0x2f, 0x87, 0xf6, 0x75, 0x38, 0x97, 0x48, 0xb8, - 0x30, 0x46, 0x1d, 0x46, 0x65, 0x03, 0x10, 0xcf, 0xfb, 0x36, 0xf2, 0xb1, - 0xaf, 0x31, 0x02, 0x7b, 0x74, 0xfe, 0x9f, 0x8c, 0x73, 0x04, 0xfd, 0xb5, - 0xae, 0x2e, 0x27, 0x9c, 0xd8, 0x73, 0xbc, 0xc3, 0x4a, 0x76, 0x93, 0x66, - 0xf6, 0xb7, 0x90, 0xc4, 0x42, 0x3d, 0xcd, 0xb5, 0xf1, 0x75, 0xbf, 0xb7, - 0xdd, 0x8e, 0xb7, 0xcd, 0x90, 0x35, 0xf5, 0x95, 0x3d, 0xe4, 0x4e, 0xb0, - 0x7c, 0x5f, 0xad, 0xff, 0x75, 0x38, 0xc4, 0xc7, 0xed, 0xec, 0x70, 0xcc, - 0x9f, 0xf9, 0x77, 0xa1, 0x00, 0x2f, 0xf1, 0xa2, 0xc9, 0x74, 0xdc, 0x18, - 0x14, 0xd0, 0x2f, 0x86, 0x66, 0xa7, 0x5b, 0x39, 0x5c, 0xba, 0x0e, 0x77, - 0x16, 0x04, 0xc3, 0x02, 0x42, 0x3b, 0x66, 0x29, 0xee, 0x65, 0x00, 0xd4, - 0x22, 0x5a, 0x77, 0x74, 0xd4, 0xc3, 0xf3, 0x00, 0xdf, 0x6b, 0xc3, 0x15, - 0x89, 0x0e, 0xb1, 0xbc, 0xac, 0xe8, 0x44, 0x2f, 0x80, 0x34, 0x34, 0x8b, - 0x0c, 0x48, 0x45, 0xc2, 0x6a, 0xa3, 0x67, 0xd7, 0x3d, 0x36, 0xf3, 0x3f, - 0xe5, 0xf0, 0x5b, 0xe8, 0xad, 0x41, 0xd5, 0x82, 0xc1, 0x28, 0xab, 0x77, - 0xe8, 0x7f, 0xb3, 0xf6, 0xd2, 0x0c, 0xe4, 0x03, 0xcf, 0xe4, 0x72, 0xdb, - 0x7b, 0x81, 0xf4, 0xf3, 0x48, 0x74, 0xe1, 0x91, 0xb8, 0xf8, 0x4c, 0x2c, - 0x60, 0x99, 0x3e, 0x1e, 0x4f, 0xaf, 0x12, 0xab, 0x52, 0xef, 0xc7, 0x60, - 0xd2, 0xfe, 0x62, 0x55, 0xc8, 0x18, 0xad, 0x60, 0xa7, 0x5d, 0xde, 0x4d, - 0xfc, 0x6d, 0xe1, 0x10, 0x7c, 0xf9, 0xa2, 0x64, 0x00, 0x16, 0x1f, 0x44, - 0x7c, 0xe2, 0x72, 0x37, 0xd9, 0x92, 0xad, 0xfc, 0x62, 0x53, 0xbe, 0xb6, - 0xe0, 0xc8, 0xe0, 0xa2, 0xef, 0x22, 0x4b, 0x70, 0x3a, 0x4f, 0xc9, 0xed, - 0x6b, 0xbc, 0x17, 0x0a, 0xcf, 0x6a, 0x2c, 0xd3, 0xd2, 0x6b, 0x02, 0x45, - 0xfa, 0x9e, 0xc2, 0x21, 0x28, 0xfc, 0x07, 0x68, 0xd6, 0xb8, 0x9f, 0x2a, - 0x0b, 0x7a, 0x0e, 0xbc, 0x4e, 0xee, 0x84, 0x38, 0xe4, 0x8e, 0x70, 0xc3, - 0xc4, 0xad, 0x74, 0x87, 0x2d, 0x16, 0x4f, 0xa1, 0xf8, 0x20, 0xf5, 0xde, - 0xa3, 0xc5, 0x0c, 0x3b, 0xde, 0x44, 0x48, 0x0f, 0x3c, 0xdc, 0x7e, 0x10, - 0x8b, 0x87, 0xc4, 0x3b, 0xb0, 0x95, 0xbf, 0x61, 0x1e, 0xad, 0x07, 0x52, - 0xfd, 0x0b, 0x84, 0xa9, 0x46, 0xb0, 0x32, 0xd5, 0x22, 0x80, 0x35, 0x26, - 0x41, 0xf8, 0x11, 0x72, 0xb1, 0x31, 0x6f, 0x5a, 0x75, 0xcc, 0x67, 0xe0, - 0xb2, 0x50, 0x89, 0xb2, 0x66, 0x6e, 0xee, 0xa0, 0x41, 0x8d, 0x00, 0x2a, - 0xa7, 0x9d, 0xa5, 0x11, 0x2b, 0x07, 0x95, 0x3a, 0x55, 0x8c, 0x67, 0xb1, - 0xe5, 0x2d, 0xd4, 0xd1, 0x3e, 0x29, 0xed, 0xa5, 0x59, 0x97, 0x7b, 0xdf, - 0x92, 0x10, 0x0b, 0x04, 0x89, 0x27, 0xa0, 0xa2, 0x93, 0x18, 0x7f, 0x47, - 0x84, 0x1c, 0xc6, 0xd6, 0x8f, 0x73, 0x81, 0xa0, 0xfa, 0xe5, 0x3e, 0xd8, - 0xbf, 0x56, 0x1a, 0x76, 0xf4, 0xc4, 0x0f, 0x7a, 0x29, 0x9d, 0x32, 0x5d, - 0x41, 0xe0, 0x07, 0xb9, 0xd3, 0x3f, 0x7e, 0xff, 0x90, 0x89, 0xce, 0xdc, - 0xf1, 0x1d, 0x54, 0xb6, 0x67, 0x7f, 0x4d, 0x71, 0x9a, 0x4a, 0x5f, 0x80, - 0x0d, 0x5c, 0x77, 0xd5, 0x50, 0x7c, 0x41, 0x56, 0x7e, 0x99, 0x0a, 0xeb, - 0x66, 0x1f, 0xd2, 0x55, 0xc3, 0xc6, 0x6c, 0xc5, 0xfc, 0x34, 0x40, 0x2c, - 0x05, 0x29, 0x05, 0x7c, 0xca, 0xe6, 0x8d, 0xd3, 0xb0, 0xca, 0x84, 0x27, - 0x50, 0x7c, 0x6b, 0x17, 0x1b, 0x22, 0xe4, 0x7f, 0xe6, 0x44, 0x94, 0x06, - 0x4b, 0xb3, 0xb7, 0xbb, 0x98, 0x81, 0x44, 0x0b, 0xf5, 0x66, 0xcb, 0xad, - 0xf2, 0x9a, 0xe1, 0x47, 0xf3, 0x97, 0xa9, 0xb2, 0xc2, 0xca, 0xcd, 0x98, - 0x78, 0x60, 0xdc, 0x6e, 0x87, 0x55, 0x47, 0xf3, 0xae, 0x84, 0xdd, 0x9a, - 0xe9, 0x1a, 0x63, 0x83, 0xea, 0x23, 0x09, 0x67, 0x34, 0x83, 0x00, 0x6e, - 0x5e, 0x58, 0xb8, 0x89, 0x04, 0x08, 0x0a, 0x55, 0x9e, 0x78, 0xc9, 0xff, - 0xb9, 0xb5, 0x2c, 0xdd, 0x3b, 0x0c, 0x58, 0x07, 0x8b, 0xb4, 0x6a, 0xc4, - 0x64, 0xa3, 0x5e, 0x5b, 0xfe, 0x4d, 0xd0, 0x74, 0x01, 0x1b, 0xdf, 0x10, - 0x45, 0x2b, 0xd6, 0x9e, 0xa9, 0x60, 0x1f, 0xad, 0x46, 0xa1, 0x8c, 0xf8, - 0xf6, 0xa9, 0x8a, 0x27, 0xea, 0x51, 0x37, 0x84, 0xcf, 0xe5, 0xd7, 0x51, - 0xd6, 0x40, 0x39, 0x39, 0x5f, 0xf6, 0x96, 0x33, 0xd9, 0x86, 0x8d, 0x38, - 0xb6, 0x26, 0x04, 0x14, 0x07, 0x46, 0x3e, 0xd0, 0xc5, 0xf6, 0x0d, 0xa0, - 0x47, 0x2b, 0xc8, 0x73, 0x18, 0x6b, 0xd3, 0x0e, 0x18, 0xcc, 0x43, 0x98, - 0xd0, 0xcf, 0x1c, 0xe4, 0x4a, 0x41, 0x6a, 0x56, 0x2d, 0xf0, 0x93, 0x89, - 0x81, 0x6c, 0xce, 0x04, 0x1a, 0x23, 0x05, 0x91, 0x4f, 0x48, 0x44, 0x3a, - 0xaa, 0x03, 0xa5, 0x4a, 0xa9, 0x20, 0x2c, 0xbe, 0x6a, 0x81, 0xe6, 0xa9, - 0xf8, 0xf0, 0x2b, 0x29, 0xa1, 0xe0, 0xc4, 0xce, 0xf5, 0xda, 0x25, 0x70, - 0x49, 0xcc, 0xa0, 0x4b, 0x24, 0x49, 0x4f, 0x11, 0xc4, 0x3b, 0x22, 0x89, - 0x9a, 0xb4, 0xf4, 0xcd, 0xa3, 0xee, 0xb0, 0x76, 0x13, 0xc4, 0xbb, 0xaf, - 0x03, 0x7f, 0x27, 0xf3, 0x38, 0xbc, 0xde, 0x7c, 0x0c, 0x39, 0x14, 0xb7, - 0x14, 0xbb, 0x5c, 0xae, 0x89, 0xf8, 0xf7, 0xd6, 0x00, 0x78, 0xf4, 0xb0, - 0x52, 0x16, 0xf5, 0x54, 0xc5, 0x93, 0xf7, 0x6d, 0x0d, 0xe8, 0x58, 0xe2, - 0xa1, 0xa7, 0xdc, 0x49, 0xdb, 0xc8, 0x79, 0xbc, 0xc3, 0x97, 0x7b, 0x6c, - 0x82, 0x7b, 0xbe, 0xe9, 0x79, 0xac, 0x4a, 0xa4, 0x7c, 0x49, 0x83, 0x58, - 0x3a, 0xe4, 0xf5, 0x68, 0x5c, 0xb7, 0x7f, 0x2d, 0xfe, 0x6b, 0x96, 0xc7, - 0x8b, 0x67, 0xb5, 0xd0, 0xa1, 0x0a, 0x16, 0x62, 0x64, 0x53, 0xea, 0x29, - 0x80, 0x93, 0xf9, 0xd6, 0xa0, 0xc5, 0x1b, 0x3a, 0x1e, 0xab, 0x51, 0x88, - 0xe0, 0x9e, 0xd4, 0xf6, 0xbf, 0x70, 0x2d, 0x29, 0x2e, 0x08, 0xa9, 0x31, - 0x78, 0x0a, 0x15, 0x30, 0x9f, 0x2e, 0xc8, 0x41, 0x65, 0x8e, 0x97, 0x51, - 0x5e, 0x73, 0x46, 0x42, 0x74, 0x84, 0xfd, 0x9b, 0x4a, 0x8a, 0x68, 0x28, - 0x45, 0xd0, 0x5d, 0x65, 0x08, 0xb3, 0xf5, 0x40, 0x8a, 0x29, 0x8e, 0x70, - 0x02, 0x49, 0x6a, 0x01, 0xd6, 0x41, 0x4a, 0xf8, 0x15, 0xa3, 0x70, 0x59, - 0xe9, 0xa2, 0xe2, 0x76, 0x8c, 0x60, 0x33, 0xb3, 0xfa, 0x8b, 0xb4, 0x90, - 0x6f, 0x92, 0xc8, 0x21, 0x59, 0xc0, 0x3a, 0x30, 0x46, 0xeb, 0x49, 0xd8, - 0x85, 0x63, 0x5a, 0x23, 0x87, 0xe1, 0xa7, 0xc0, 0x1a, 0xb0, 0xc7, 0xc4, - 0x40, 0x4d, 0x11, 0x9c, 0xe3, 0xd4, 0x6b, 0xef, 0x68, 0xc8, 0x2c, 0x31, - 0xcd, 0x3e, 0xee, 0x55, 0x10, 0x67, 0x77, 0x7b, 0x30, 0xc1, 0xd0, 0x23, - 0x6c, 0x65, 0x6f, 0xfb, 0x2e, 0x62, 0x33, 0x42, 0x63, 0xdc, 0xca, 0x86, - 0xf1, 0x0e, 0xb3, 0xb0, 0x69, 0x11, 0x65, 0xe1, 0x6e, 0x6c, 0x03, 0x49, - 0x79, 0xe8, 0xf1, 0x2e, 0x8d, 0x94, 0xc8, 0xa8, 0x98, 0x2d, 0x3f, 0xfe, - 0xbd, 0x2d, 0x75, 0x45, 0xd1, 0x7a, 0x09, 0xf8, 0x90, 0x49, 0xbd, 0x4a, - 0x3b, 0xa4, 0xa3, 0x26, 0xb8, 0x62, 0x66, 0x97, 0xd9, 0xc1, 0xca, 0x12, - 0x49, 0xe1, 0x27, 0x93, 0x4f, 0x60, 0xfa, 0xb3, 0x4f, 0x4c, 0xdb, 0x87, - 0x6c, 0x3b, 0x50, 0x47, 0xe2, 0xd8, 0x5b, 0x13, 0x99, 0xf0, 0x2b, 0xbb, - 0x32, 0x33, 0xfd, 0x7d, 0x15, 0x0f, 0x2c, 0xee, 0x85, 0x83, 0xc0, 0x53, - 0x79, 0x3e, 0x51, 0xfe, 0x7c, 0x06, 0x73, 0x49, 0x49, 0x4f, 0x5a, 0x22, - 0x36, 0x8f, 0x30, 0x8a, 0xef, 0x84, 0xd6, 0x15, 0x26, 0x48, 0xe7, 0x1e, - 0xb1, 0xaa, 0x82, 0xd0, 0xc7, 0x0b, 0x97, 0x7b, 0x6c, 0x2d, 0x49, 0x7e, - 0x6d, 0xe7, 0xa3, 0x05, 0x80, 0xd7, 0x42, 0xa9, 0xc6, 0x66, 0x98, 0x30, - 0xe3, 0x8a, 0x79, 0x86, 0x9c, 0x2b, 0xbc, 0x4a, 0xe6, 0x0d, 0xc5, 0xe5, - 0x1a, 0x92, 0xd9, 0xef, 0x63, 0x52, 0x03, 0x88, 0x36, 0xc5, 0x83, 0x65, - 0xf8, 0xf1, 0x87, 0xce, 0x43, 0xfe, 0x89, 0x58, 0x07, 0x6a, 0xad, 0x85, - 0x37, 0x0f, 0xdf, 0x9e, 0xa5, 0x62, 0xa9, 0xd2, 0x41, 0x3f, 0x7f, 0xb7, - 0xf1, 0xe2, 0x58, 0xb5, 0xda, 0xdf, 0xd1, 0xba, 0x36, 0x2c, 0xe7, 0x43, - 0x31, 0x07, 0xc5, 0xf5, 0x79, 0xc9, 0x31, 0xd7, 0x1d, 0x97, 0x57, 0x9a, - 0x8e, 0x3f, 0xac, 0x00, 0x49, 0x00, 0x2f, 0xad, 0xac, 0xe7, 0x65, 0x7c, - 0xbf, 0xec, 0x85, 0x57, 0xe6, 0xcc, 0x07, 0x34, 0x02, 0x36, 0xa8, 0x6a, - 0x9f, 0x3a, 0x9a, 0x2f, 0x34, 0x93, 0x1f, 0x7d, 0x38, 0x54, 0xe3, 0x54, - 0x54, 0xee, 0x84, 0x55, 0xe1, 0x0d, 0xc1, 0x08, 0x3e, 0x33, 0x9e, 0x2a, - 0xc3, 0x6a, 0x83, 0xc4, 0x75, 0xed, 0xbc, 0x5f, 0xd9, 0x04, 0xd7, 0x77, - 0x91, 0xb1, 0xa0, 0xf2, 0xef, 0x81, 0xb0, 0x8b, 0x53, 0x5f, 0x71, 0xec, - 0xa5, 0x0b, 0xbe, 0xf2, 0x92, 0x7e, 0x0a, 0x34, 0xeb, 0x5d, 0x65, 0xc7, - 0xa9, 0x44, 0x10, 0xfb, 0xd3, 0xef, 0xe1, 0xbc, 0x06, 0x65, 0x68, 0x22, - 0xfb, 0x43, 0x2c, 0xcf, 0x8e, 0x6a, 0x28, 0xdb, 0x0b, 0xf4, 0xaf, 0x01, - 0x65, 0x97, 0xd6, 0xe5, 0x91, 0x20, 0x13, 0x2c, 0xb1, 0xc2, 0xd3, 0xc3, - 0x76, 0x90, 0xf8, 0xcd, 0x00, 0xde, 0x93, 0xf8, 0x4e, 0xcc, 0xdc, 0xca, - 0x9a, 0xf0, 0xbd, 0x9b, 0xd6, 0x57, 0xb1, 0x13, 0xd9, 0xe0, 0xe1, 0x9e, - 0x21, 0x74, 0xa9, 0x76, 0xc0, 0x0c, 0xad, 0x4f, 0x5d, 0xfe, 0x23, 0x32, - 0x5a, 0x10, 0x75, 0x5b, 0x05, 0xdf, 0xdc, 0x5b, 0x94, 0xcb, 0xe1, 0x9f, - 0x13, 0x51, 0xf5, 0x50, 0x36, 0x3b, 0xf2, 0x90, 0x9c, 0x9a, 0xc8, 0x10, - 0x88, 0xa9, 0xec, 0x22, 0x1e, 0x96, 0x70, 0xe8, 0x9e, 0x69, 0xc1, 0x22, - 0xd9, 0x14, 0x15, 0x2e, 0xbc, 0x03, 0x96, 0x9e, 0x1d, 0x00, 0x10, 0x16, - 0x4f, 0x56, 0xf0, 0x29, 0x47, 0x0a, 0x45, 0x34, 0x27, 0x21, 0x3b, 0x67, - 0x33, 0xf9, 0xdd, 0x29, 0x3a, 0xf2, 0xe4, 0x56, 0x34, 0x46, 0xbe, 0xd8, - 0x42, 0x29, 0x11, 0x7f, 0x30, 0xc1, 0xbe, 0xa5, 0xc8, 0x9d, 0x7b, 0x2e, - 0x4e, 0xcf, 0xba, 0x91, 0xb4, 0xbf, 0x0a, 0x04, 0x00, 0x49, 0x83, 0x6b, - 0x46, 0x5f, 0x3b, 0xfa, 0xf7, 0x40, 0x8d, 0x85, 0x47, 0x14, 0x58, 0xb3, - 0xa5, 0x66, 0x30, 0xfd, 0x4a, 0x80, 0xa4, 0x61, 0x3b, 0x7c, 0xb4, 0xcc, - 0x34, 0x8c, 0xc6, 0xb6, 0x10, 0xa9, 0x76, 0xc9, 0x11, 0xd7, 0x8a, 0x51, - 0x86, 0x17, 0x89, 0x28, 0xab, 0xd5, 0x03, 0x88, 0x74, 0x5b, 0x81, 0xbd, - 0x3a, 0x57, 0xfe, 0x66, 0x25, 0xd0, 0x92, 0x15, 0x84, 0x02, 0x0f, 0x51, - 0xa8, 0x58, 0xcf, 0x77, 0x65, 0x10, 0x61, 0xe8, 0xe6, 0xab, 0xb1, 0xba, - 0x3b, 0x08, 0xd6, 0xba, 0x5f, 0xf5, 0x74, 0xc5, 0x07, 0x60, 0xfd, 0xd3, - 0xc8, 0x52, 0x4e, 0xdb, 0xc3, 0xe3, 0x6d, 0x81, 0x20, 0x51, 0x01, 0x9a, - 0x5e, 0x32, 0x4e, 0x80, 0x5a, 0xcb, 0x83, 0xd7, 0xa4, 0xd9, 0xfb, 0xed, - 0x3d, 0x80, 0xa1, 0x83, 0x81, 0x91, 0xc0, 0x0b, 0xff, 0x67, 0xd8, 0x8b, - 0xd0, 0x12, 0x0b, 0xd4, 0x2b, 0x8e, 0x0d, 0x0f, 0xfc, 0xc7, 0xb3, 0xf1, - 0xe3, 0xf3, 0x5e, 0x0c, 0xb6, 0x6b, 0x9d, 0xdc, 0x22, 0x70, 0x31, 0x54, - 0xe8, 0x41, 0xfe, 0xa1, 0xe1, 0x4f, 0xfa, 0x81, 0xfb, 0xae, 0x72, 0x16, - 0xb8, 0x87, 0xc9, 0x31, 0x9d, 0x42, 0x47, 0x4a, 0x20, 0xae, 0x63, 0x16, - 0x0d, 0xfa, 0xf1, 0x27, 0x19, 0x47, 0xee, 0x45, 0x84, 0x29, 0x9a, 0xb6, - 0x42, 0xef, 0xbd, 0x15, 0xa8, 0x34, 0x33, 0x38, 0x9c, 0x9d, 0xbb, 0x5c, - 0x03, 0xf3, 0xcf, 0xcf, 0x6d, 0x2e, 0xd5, 0x88, 0xf8, 0xdd, 0xfc, 0xc0, - 0x4a, 0xdb, 0x69, 0xd9, 0x62, 0x89, 0x24, 0x46, 0xee, 0xa4, 0xb9, 0x95, - 0xe6, 0xaf, 0x7d, 0x53, 0xec, 0x41, 0xae, 0x70, 0xfe, 0x4f, 0x31, 0xe3, - 0xa2, 0x59, 0x2c, 0xa1, 0x53, 0x8b, 0xb6, 0x3b, 0x39, 0xc1, 0xa4, 0xa7, - 0x9e, 0xaa, 0x00, 0x60, 0x9a, 0x5f, 0x56, 0x51, 0xf3, 0x7b, 0x28, 0x84, - 0x36, 0x1a, 0xc1, 0x2d, 0xc8, 0xed, 0xf8, 0x48, 0x48, 0x1d, 0x39, 0x4d, - 0x3d, 0xce, 0x30, 0x90, 0x29, 0x33, 0x6f, 0x9a, 0xce, 0x58, 0xe7, 0x88, - 0xac, 0x59, 0xce, 0x85, 0x5a, 0x52, 0x2b, 0x6c, 0xb7, 0xe9, 0x2e, 0xa9, - 0xd9, 0x9a, 0xea, 0x1c, 0x47, 0xb2, 0x59, 0xff, 0x73, 0x76, 0x21, 0x40, - 0xe1, 0xde, 0x32, 0xb8, 0x73, 0x3d, 0xa5, 0x44, 0x66, 0x79, 0xa1, 0xfe, - 0xaf, 0xf6, 0x8a, 0x97, 0x09, 0x5c, 0x8b, 0x64, 0x38, 0x9f, 0xe1, 0x59, - 0x38, 0x18, 0xe9, 0xc0, 0xd6, 0xa2, 0xac, 0x74, 0xa9, 0xfd, 0x4a, 0x0d, - 0xf6, 0x47, 0x00, 0x2b, 0x09, 0x46, 0x38, 0x1c, 0xa4, 0x9f, 0x63, 0x20, - 0x18, 0x75, 0x5a, 0xb8, 0xc4, 0xbc, 0xd6, 0x6b, 0xc8, 0x14, 0x72, 0x03, - 0xe4, 0x05, 0xd4, 0x4e, 0x66, 0x20, 0x42, 0xa2, 0x8f, 0x96, 0xe7, 0xaf, - 0xd3, 0xfb, 0xa8, 0x88, 0x9b, 0xe3, 0xaa, 0xcd, 0xab, 0xce, 0x8f, 0x07, - 0x6d, 0xef, 0x98, 0xce, 0xdb, 0x42, 0x5b, 0xf4, 0x61, 0x57, 0x62, 0x27, - 0x8a, 0x53, 0x5e, 0xf8, 0x3e, 0xf6, 0x7f, 0xde, 0x5e, 0x3b, 0x1b, 0x13, - 0x2e, 0x30, 0x46, 0x4b, 0x6b, 0xb7, 0xbb, 0x33, 0x31, 0xc0, 0xfa, 0x40, - 0xab, 0x68, 0x72, 0xe3, 0x92, 0x30, 0x47, 0xd6, 0x30, 0x60, 0x42, 0x5b, - 0x88, 0x8d, 0xa6, 0x56, 0xe4, 0xac, 0x33, 0x2e, 0xca, 0x05, 0x1f, 0x60, - 0xaf, 0xde, 0x7f, 0xa9, 0xda, 0x3f, 0xa8, 0x21, 0xf6, 0xfc, 0x98, 0x7d, - 0xc4, 0x1e, 0xb0, 0xa9, 0x56, 0x2d, 0x8d, 0xea, 0x03, 0x51, 0x48, 0xac, - 0xe8, 0x22, 0xc7, 0x8b, 0xef, 0x91, 0x0e, 0xcf, 0x0c, 0xe9, 0x38, 0x43, - 0x99, 0xa8, 0x98, 0x4f, 0xfa, 0xe3, 0x03, 0xa6, 0x4f, 0xd4, 0x0d, 0x98, - 0x5b, 0x50, 0x28, 0xd7, 0xe7, 0x46, 0xd7, 0xad, 0x43, 0xb8, 0x56, 0x2a, - 0x2f, 0x7c, 0x39, 0x67, 0xf4, 0x62, 0x0e, 0xc0, 0xa8, 0x87, 0xb5, 0x81, - 0xe2, 0x13, 0x9f, 0xe4, 0xdd, 0x72, 0xf2, 0x07, 0xca, 0xac, 0x6d, 0xb2, - 0x96, 0x53, 0x5a, 0x8f, 0x66, 0x3c, 0xb4, 0xc1, 0x4f, 0x9a, 0x82, 0x55, - 0xcf, 0x0e, 0x27, 0x5f, 0xc7, 0xd2, 0x28, 0x27, 0x7f, 0x22, 0x6e, 0xa5, - 0xe7, 0x32, 0x56, 0x51, 0x18, 0xe0, 0x85, 0x6d, 0x1f, 0xfc, 0x25, 0x08, - 0x18, 0x60, 0x57, 0xfc, 0x66, 0x94, 0x2c, 0x4c, 0xbe, 0x00, 0xab, 0x9e, - 0x73, 0x9b, 0x06, 0xd3, 0xb5, 0x24, 0xa8, 0x8f, 0xb1, 0x33, 0x99, 0x4c, - 0xb4, 0x13, 0x07, 0xcd, 0x04, 0xdd, 0x77, 0xdc, 0xee, 0x96, 0x02, 0x59, - 0xe8, 0x22, 0x07, 0x16, 0x2e, 0x41, 0xc9, 0xc4, 0x59, 0x70, 0x37, 0x0f, - 0x14, 0xc9, 0xcf, 0x90, 0x57, 0xc2, 0x0d, 0xa3, 0xd7, 0x66, 0xb6, 0x7d, - 0x10, 0xd4, 0xfc, 0x18, 0x66, 0xad, 0xea, 0x5e, 0x64, 0x6c, 0x12, 0x66, - 0x3d, 0x96, 0xa5, 0xa8, 0x9c, 0x49, 0x5c, 0xd4, 0x8d, 0x1c, 0xc3, 0x38, - 0xfe, 0x53, 0xc2, 0x71, 0xd1, 0xc6, 0x41, 0xe2, 0xb9, 0x17, 0x74, 0x6e, - 0xcc, 0xf8, 0x72, 0x28, 0x38, 0x4e, 0x54, 0x9b, 0x0e, 0xa3, 0x3a, 0x43, - 0x5c, 0xd5, 0x83, 0x06, 0xbb, 0x46, 0x16, 0x6e, 0xe3, 0x8a, 0xd5, 0x1e, - 0x7f, 0x88, 0x62, 0xac, 0x35, 0x89, 0xfb, 0xbe, 0x96, 0x1d, 0x87, 0x37, - 0xb7, 0x91, 0x63, 0xae, 0x77, 0x7b, 0x66, 0x60, 0xc1, 0x3e, 0x80, 0x56, - 0xb1, 0xc8, 0x0d, 0x16, 0xde, 0x38, 0x82, 0x66, 0x99, 0x2b, 0x35, 0xd8, - 0xb4, 0x5b, 0x4b, 0x3e, 0x93, 0x96, 0x59, 0xf8, 0x96, 0x7e, 0x7b, 0x27, - 0xf4, 0x62, 0xb7, 0xda, 0x89, 0xa7, 0x34, 0x47, 0xed, 0xb3, 0x42, 0x20, - 0xeb, 0xcd, 0xf6, 0xa3, 0x9f, 0xf7, 0x48, 0x91, 0x17, 0xd2, 0x21, 0xed, - 0x5a, 0x22, 0x39, 0xc9, 0x76, 0x95, 0x36, 0xd9, 0x97, 0x0f, 0x19, 0xce, - 0xd3, 0xbc, 0x74, 0x7d, 0x53, 0x37, 0x3b, 0x4a, 0x97, 0xb7, 0xf8, 0x7e, - 0xdd, 0x4c, 0x5f, 0xae, 0x5c, 0x0b, 0xab, 0x4c, 0x34, 0xa1, 0x7e, 0x34, - 0x35, 0xf4, 0xfc, 0x92, 0xab, 0x2e, 0x6a, 0x15, 0xce, 0x84, 0xae, 0x70, - 0xae, 0x85, 0x21, 0xe6, 0x41, 0x13, 0x31, 0xe0, 0x8f, 0xab, 0x82, 0xe3, - 0x09, 0xaf, 0xa4, 0x7c, 0xb4, 0xb9, 0xb7, 0xc0, 0x67, 0x08, 0xc9, 0x9d, - 0xcd, 0x0b, 0x3c, 0xa0, 0x0c, 0xde, 0x49, 0x2f, 0x40, 0x19, 0x95, 0x64, - 0xb9, 0x7c, 0x2a, 0x72, 0xdd, 0xa2, 0x92, 0x0a, 0x21, 0xeb, 0x8c, 0xc3, - 0x6d, 0x52, 0xe7, 0x05, 0x50, 0x01, 0x55, 0x19, 0x2f, 0xbd, 0x1b, 0x72, - 0x73, 0xfe, 0x82, 0x9f, 0xbf, 0xa0, 0xfe, 0x19, 0x7c, 0x42, 0x6d, 0x76, - 0x32, 0x47, 0x36, 0x15, 0x2e, 0xde, 0xe8, 0xe6, 0xca, 0x07, 0xa3, 0x6b, - 0x40, 0x99, 0x96, 0xcd, 0x19, 0xea, 0x7e, 0xc9, 0x87, 0x9d, 0x3d, 0xa0, - 0x82, 0x88, 0xe7, 0xe4, 0x34, 0x9f, 0xa5, 0x27, 0xdf, 0xae, 0x03, 0x37, - 0xa8, 0x35, 0x64, 0x02, 0x09, 0x09, 0x9e, 0xec, 0x38, 0x0a, 0xff, 0x79, - 0x8c, 0x9a, 0x87, 0x66, 0xcd, 0xe4, 0xf4, 0x9d, 0xa9, 0x07, 0x96, 0x36, - 0xae, 0x2e, 0x4e, 0xc5, 0xe9, 0x86, 0xb2, 0x8e, 0x71, 0x5d, 0xe8, 0xee, - 0x84, 0xf3, 0x30, 0x2a, 0x58, 0x1a, 0x80, 0xb8, 0xaa, 0xb8, 0x1d, 0xc4, - 0xae, 0x59, 0x91, 0xf3, 0x16, 0x9b, 0xa3, 0x8a, 0xa3, 0x26, 0xb2, 0x0a, - 0xe5, 0x58, 0xb7, 0x96, 0x87, 0xfb, 0x00, 0xe4, 0x50, 0x7c, 0xb1, 0x77, - 0x3a, 0x18, 0xc2, 0xe3, 0xc1, 0x12, 0xa6, 0x0d, 0x06, 0xeb, 0x80, 0x6c, - 0x5a, 0xee, 0x34, 0xcc, 0x1c, 0x87, 0x35, 0x46, 0x1d, 0x05, 0x83, 0xd8, - 0x91, 0x22, 0xaa, 0xf6, 0xad, 0x87, 0xab, 0x76, 0x18, 0x79, 0xe2, 0x09, - 0xc3, 0xa3, 0x15, 0x67, 0x3a, 0x7c, 0x0f, 0xa0, 0x4c, 0x7b, 0xfc, 0xfc, - 0xdd, 0x5c, 0xe4, 0x86, 0x58, 0x13, 0xb8, 0x97, 0xae, 0x8c, 0x75, 0xc8, - 0x02, 0x1e, 0x33, 0x45, 0xa9, 0x54, 0x09, 0x15, 0x53, 0x4f, 0x28, 0x47, - 0x4d, 0x5f, 0xd0, 0xc7, 0x09, 0xbd, 0x93, 0xb0, 0x08, 0x79, 0x05, 0xbc, - 0xbc, 0xaf, 0x2c, 0xbd, 0xbb, 0x21, 0xd1, 0x60, 0xb8, 0x81, 0x4c, 0x6c, - 0x5e, 0x45, 0x39, 0xa3, 0x31, 0x54, 0xb7, 0x82, 0xef, 0x86, 0xe4, 0x5e, - 0xca, 0xd6, 0xb8, 0x31, 0xa2, 0x4c, 0x84, 0x5b, 0xac, 0xe5, 0x29, 0xbf, - 0xbf, 0x89, 0xb4, 0x4c, 0xd3, 0x69, 0x66, 0x50, 0xeb, 0xda, 0x7d, 0x00, - 0xbb, 0x45, 0x0f, 0xe1, 0xd1, 0x30, 0x1a, 0xc6, 0x94, 0x66, 0xdc, 0x01, - 0x75, 0xce, 0xf8, 0xfc, 0xd9, 0xce, 0xcf, 0x1f, 0x9e, 0x5a, 0x55, 0xa4, - 0x3e, 0xe6, 0x51, 0xc7, 0x74, 0x40, 0x82, 0x09, 0xea, 0xa0, 0xf5, 0xb2, - 0x70, 0x9f, 0x0e, 0xfb, 0x46, 0x8a, 0x69, 0xbf, 0x07, 0x92, 0xdc, 0x74, - 0x03, 0x70, 0xc6, 0x44, 0x81, 0x66, 0x40, 0xc7, 0xf5, 0xb8, 0xf0, 0x45, - 0x0f, 0xca, 0xd8, 0xb0, 0x9e, 0x48, 0x94, 0xff, 0x85, 0xcb, 0x7b, 0xec, - 0x67, 0x5d, 0xfe, 0xe9, 0x13, 0xd1, 0x67, 0x95, 0xd9, 0x35, 0x9e, 0x8a, - 0x53, 0x4d, 0x6b, 0x9d, 0x42, 0x53, 0xb1, 0x6b, 0x51, 0x1e, 0x35, 0x40, - 0x81, 0x92, 0x91, 0x5f, 0x1f, 0x8e, 0xbe, 0x37, 0xd3, 0x85, 0xab, 0x85, - 0x37, 0x1c, 0x0f, 0xae, 0xd9, 0xf7, 0xa2, 0x75, 0x3d, 0xd9, 0xd7, 0x2a, - 0x80, 0xb0, 0x4c, 0x14, 0x04, 0x40, 0xc5, 0xba, 0x0e, 0xbe, 0xab, 0xcc, - 0x38, 0x35, 0x62, 0x6c, 0xa5, 0xce, 0x49, 0x15, 0x2a, 0x10, 0xb5, 0x6a, - 0xd2, 0x3b, 0xd2, 0x6a, 0xad, 0x2e, 0x34, 0x46, 0x8b, 0x78, 0x57, 0x6e, - 0xc4, 0xde, 0x65, 0x68, 0x05, 0x8f, 0xd6, 0x6e, 0x34, 0xb9, 0xaa, 0x80, - 0x77, 0xff, 0x6c, 0x1a, 0x37, 0x87, 0xdd, 0x33, 0x13, 0x33, 0xa7, 0xa9, - 0x3a, 0x90, 0x32, 0x7b, 0x9b, 0x21, 0x31, 0xc8, 0xf5, 0x4c, 0xa6, 0x73, - 0x42, 0x79, 0x46, 0x14, 0x1b, 0xef, 0xf4, 0x78, 0xd9, 0x7e, 0x6f, 0x31, - 0xaa, 0x59, 0x97, 0x34, 0xe5, 0xe6, 0x67, 0xf3, 0x86, 0xf5, 0x61, 0xe7, - 0x51, 0x6d, 0xce, 0xb3, 0xdc, 0x86, 0xc7, 0x55, 0x43, 0xfa, 0x38, 0x78, - 0xb0, 0x8d, 0x03, 0x9c, 0xe4, 0x6c, 0xca, 0x73, 0x94, 0xa1, 0x0c, 0xb8, - 0x11, 0xda, 0x0c, 0x0b, 0x18, 0x1b, 0xd0, 0x99, 0xe7, 0xa9, 0x0d, 0xc3, - 0x36, 0xd7, 0x8c, 0x16, 0xad, 0x16, 0x1f, 0xb2, 0x3c, 0x07, 0x32, 0x11, - 0x6c, 0xd2, 0x8f, 0x33, 0x37, 0x5c, 0x3e, 0x4f, 0x7a, 0x76, 0xf7, 0x85, - 0xcc, 0x68, 0x1a, 0xf9, 0x26, 0x74, 0x42, 0xc9, 0xea, 0x21, 0x7e, 0x74, - 0x3c, 0x4f, 0xde, 0xfb, 0xd7, 0x83, 0x62, 0x12, 0xc7, 0x4f, 0xfc, 0x47, - 0x18, 0x9d, 0xc5, 0xf5, 0xe9, 0xd7, 0xaa, 0x76, 0x20, 0x99, 0x79, 0xae, - 0x9b, 0x7a, 0xde, 0x8b, 0x95, 0xc2, 0xa5, 0xa3, 0x6a, 0x30, 0x9b, 0x99, - 0x63, 0x34, 0x7c, 0xd1, 0x53, 0xa1, 0x6c, 0xd6, 0xed, 0x7d, 0x8c, 0xba, - 0xc8, 0x21, 0xf3, 0xe1, 0x31, 0x55, 0x3d, 0x88, 0x87, 0x04, 0xc7, 0xc9, - 0x65, 0x0c, 0x53, 0x1e, 0xd4, 0xd9, 0xaa, 0xda, 0xc2, 0x14, 0x88, 0xf2, - 0x07, 0x2c, 0x12, 0x4d, 0x79, 0x54, 0xaa, 0xd9, 0x47, 0x95, 0xf9, 0x7e, - 0x26, 0x89, 0x4b, 0x63, 0x7e, 0x44, 0x06, 0x0e, 0xe2, 0x8d, 0x9a, 0x0a, - 0xc3, 0xee, 0x55, 0x13, 0x55, 0x04, 0xcc, 0xb5, 0x2e, 0xa0, 0x0d, 0xec, - 0x76, 0x84, 0xc1, 0x1e, 0xdd, 0xe6, 0xfa, 0x54, 0x6e, 0x38, 0x30, 0x6f, - 0xcc, 0xa4, 0x8d, 0x76, 0x1e, 0xa3, 0x8e, 0x2c, 0x5e, 0x37, 0xeb, 0x0b, - 0xf4, 0xb5, 0x80, 0xde, 0x58, 0x13, 0x5a, 0x52, 0xdc, 0x65, 0x99, 0x1a, - 0x1b, 0x75, 0x0c, 0xbd, 0x83, 0xe8, 0x90, 0x8e, 0xa9, 0xbf, 0x42, 0x22, - 0xe1, 0x3a, 0x31, 0x4e, 0x54, 0xad, 0xd4, 0x6f, 0x80, 0xb4, 0xb5, 0x82, - 0x05, 0x20, 0xd7, 0x38, 0xd7, 0xeb, 0x25, 0x33, 0xe9, 0x4b, 0xc3, 0x5e, - 0xd1, 0x11, 0xb0, 0xd9, 0x8e, 0x90, 0x48, 0x2a, 0xe3, 0xa0, 0x60, 0x16, - 0x70, 0xe3, 0xd1, 0x45, 0x11, 0x64, 0x91, 0x69, 0x87, 0x1c, 0xbb, 0x91, - 0xc4, 0x43, 0x12, 0x62, 0x99, 0x69, 0xe5, 0x96, 0x01, 0x15, 0xdb, 0xdf, - 0x05, 0x55, 0x34, 0xbb, 0xd6, 0x76, 0x89, 0xcd, 0xb5, 0x4f, 0x2e, 0xa7, - 0x6e, 0x15, 0xc9, 0xc0, 0x8e, 0xa8, 0x63, 0x79, 0x12, 0xfb, 0x7e, 0x69, - 0x8f, 0x52, 0x5e, 0xe7, 0x76, 0x16, 0x28, 0x76, 0xca, 0xcb, 0xd8, 0x0e, - 0x4a, 0x93, 0x9d, 0x16, 0x68, 0x98, 0xf8, 0xc3, 0x39, 0xb2, 0x2d, 0xea, - 0xba, 0x72, 0x16, 0x33, 0xb7, 0xec, 0x61, 0x9e, 0x94, 0x32, 0x01, 0x22, - 0xde, 0x66, 0xfd, 0x68, 0xfa, 0xcf, 0xf2, 0x52, 0x4f, 0x02, 0xe8, 0x25, - 0xd3, 0xa3, 0x5b, 0x29, 0xae, 0xe9, 0x62, 0xfa, 0xd6, 0x1a, 0x50, 0x80, - 0x95, 0x96, 0xdf, 0x00, 0xfc, 0x23, 0xf1, 0x95, 0xef, 0xbb, 0xf5, 0x23, - 0x9d, 0x6b, 0xd6, 0xed, 0xb4, 0xe2, 0x4a, 0xf6, 0xb8, 0x20, 0x83, 0x6b, - 0x45, 0x92, 0x29, 0x5a, 0x02, 0xe9, 0xf7, 0x8e, 0x5c, 0x02, 0xde, 0xb4, - 0x9a, 0xdf, 0x18, 0x10, 0x17, 0x7f, 0xd8, 0x2e, 0x17, 0xc0, 0xf0, 0x6b, - 0x3b, 0x88, 0x09, 0x58, 0xf2, 0x18, 0x22, 0x09, 0x80, 0x4a, 0xe0, 0x51, - 0x6f, 0x7a, 0x70, 0x09, 0x1f, 0xe5, 0xfa, 0xa9, 0x4d, 0x24, 0x1f, 0x18, - 0x1c, 0x74, 0xcd, 0x87, 0x04, 0xfd, 0x85, 0x33, 0x4c, 0x28, 0xbd, 0xa3, - 0x66, 0x6c, 0x99, 0x7e, 0x50, 0x5e, 0xb5, 0x22, 0x33, 0x92, 0xd4, 0xd8, - 0x82, 0x4e, 0x38, 0xbe, 0xcb, 0x3d, 0x5f, 0x19, 0xd1, 0x0f, 0x8b, 0xa1, - 0x78, 0x08, 0x1c, 0x10, 0x0b, 0x77, 0xa7, 0x39, 0x2e, 0x91, 0x83, 0xee, - 0x1d, 0x36, 0xd8, 0x77, 0x87, 0x8a, 0x38, 0x45, 0x3c, 0xbd, 0xb9, 0x88, - 0xbb, 0x1b, 0x20, 0xd1, 0x95, 0xb9, 0x8f, 0x03, 0x46, 0xfa, 0xab, 0x70, - 0x68, 0x26, 0xd9, 0xb1, 0x25, 0x52, 0x5a, 0x77, 0x2d, 0x92, 0xc2, 0x1d, - 0xb6, 0x6e, 0xec, 0x67, 0xef, 0x34, 0xe2, 0x64, 0xb3, 0xa0, 0xae, 0x0c, - 0xd9, 0x36, 0xa1, 0xc7, 0xd8, 0xbf, 0x7a, 0x43, 0xbf, 0xc0, 0xc6, 0x90, - 0x60, 0x6a, 0x23, 0xc0, 0x6a, 0x5d, 0x62, 0x18, 0xac, 0xc1, 0x20, 0x35, - 0x17, 0xba, 0x4e, 0x54, 0xb7, 0xec, 0xd4, 0xad, 0x99, 0x94, 0xa4, 0xda, - 0x57, 0xe7, 0x46, 0xed, 0x47, 0xd1, 0xb4, 0xa2, 0x3e, 0x0f, 0x4a, 0xb6, - 0xa6, 0x68, 0x3e, 0x94, 0xb9, 0x18, 0x30, 0xe0, 0x75, 0x08, 0xe8, 0xf3, - 0x21, 0x79, 0x26, 0x68, 0x6a, 0x65, 0xb6, 0xbe, 0x03, 0x98, 0x8f, 0x04, - 0xad, 0x1e, 0xb0, 0x54, 0xd2, 0x28, 0xdd, 0x4a, 0xe9, 0xf3, 0xa0, 0x06, - 0xbf, 0x0b, 0x2a, 0xee, 0xf8, 0x03, 0x7e, 0x1d, 0x37, 0xc1, 0x32, 0xd1, - 0x41, 0xf4, 0x9b, 0xc5, 0x02, 0x10, 0x6f, 0x55, 0x5a, 0xec, 0x5b, 0xe7, - 0x61, 0x05, 0x17, 0xf0, 0xf8, 0xc6, 0x89, 0xe8, 0xad, 0x32, 0x57, 0x14, - 0xe5, 0xf8, 0xf5, 0x88, 0xd9, 0x73, 0x17, 0x10, 0xa7, 0xc3, 0xf8, 0x78, - 0x0b, 0x66, 0xab, 0x63, 0x4f, 0x96, 0x5d, 0xdf, 0x36, 0x83, 0xc4, 0x6f, - 0x20, 0xbd, 0xcb, 0x4c, 0xd2, 0xfa, 0x35, 0x87, 0xd8, 0xb6, 0xbb, 0xcc, - 0xb6, 0xd2, 0x85, 0x03, 0x6a, 0xea, 0xbb, 0x6d, 0x2f, 0xa2, 0x06, 0xc0, - 0xd6, 0x68, 0xd9, 0x7f, 0xd6, 0xa2, 0x3b, 0x08, 0x6a, 0x98, 0x26, 0x6d, - 0x9a, 0x2b, 0x68, 0x51, 0x78, 0xde, 0xa6, 0x96, 0x50, 0x7b, 0xfc, 0x03, - 0x43, 0xf8, 0x21, 0x01, 0x9d, 0xe2, 0x89, 0x65, 0x47, 0xae, 0x9c, 0x45, - 0x5e, 0xa5, 0xce, 0x97, 0xb3, 0xe6, 0xf6, 0xd4, 0x5a, 0xe8, 0x6b, 0x87, - 0xd6, 0xdf, 0xfb, 0x1f, 0xaf, 0xfb, 0xaf, 0x19, 0xa5, 0xfd, 0xba, 0xe0, - 0x22, 0x2f, 0x91, 0x97, 0xdf, 0xae, 0xe9, 0x39, 0xb1, 0xe4, 0xd3, 0x10, - 0xcb, 0xb3, 0x03, 0xb5, 0x0b, 0xf0, 0xd9, 0x70, 0x1e, 0x9c, 0x63, 0x6f, - 0x3a, 0xcf, 0x3c, 0x1b, 0x86, 0xa3, 0xad, 0x1a, 0xe7, 0x4c, 0x09, 0xd0, - 0x80, 0xf6, 0x8b, 0x72, 0x96, 0x53, 0x7e, 0x66, 0xfb, 0x7c, 0x7c, 0x8a, - 0xb0, 0x60, 0xa6, 0x4c, 0x20, 0xc4, 0x63, 0x69, 0x6a, 0xc3, 0x53, 0xf8, - 0x9a, 0x28, 0x30, 0x9d, 0x6f, 0x0e, 0x1b, 0xb2, 0x2c, 0xe6, 0x94, 0x9f, - 0xfc, 0xc0, 0x8d, 0x71, 0xbe, 0x37, 0xa6, 0xc9, 0xbd, 0x3c, 0x4a, 0xf3, - 0xc4, 0xb3, 0x88, 0x4c, 0x45, 0x26, 0x4e, 0x2f, 0x83, 0x16, 0x70, 0xb6, - 0xc7, 0xb2, 0x36, 0xf0, 0x0c, 0x67, 0xd2, 0x0a, 0xd3, 0xd9, 0x7c, 0x35, - 0x29, 0xac, 0xd4, 0x9c, 0x6d, 0xfc, 0xec, 0x58, 0x92, 0xf0, 0xba, 0x32, - 0x00, 0xae, 0xb1, 0xeb, 0x4d, 0x8c, 0x1a, 0x20, 0xe7, 0x5c, 0xfc, 0x9a, - 0x4d, 0x51, 0x24, 0x7b, 0x52, 0xeb, 0x13, 0x3d, 0xb4, 0xab, 0xda, 0xb3, - 0x74, 0x39, 0xd2, 0xf8, 0x2d, 0xef, 0x9b, 0x0f, 0xae, 0xf5, 0x3c, 0x99, - 0x34, 0xbe, 0x15, 0x5c, 0x9f, 0x5d, 0xae, 0xf4, 0x72, 0xc2, 0xac, 0x06, - 0xbe, 0xad, 0xe4, 0x68, 0xea, 0xd5, 0xa1, 0xdc, 0xdb, 0xf4, 0x61, 0x51, - 0xf5, 0x1a, 0x62, 0x15, 0xfd, 0x00, 0x51, 0x35, 0x53, 0x6c, 0x39, 0x3e, - 0xdb, 0x60, 0x0a, 0x52, 0xc1, 0x52, 0x3c, 0xd7, 0xab, 0x73, 0xea, 0x1e, - 0x38, 0x38, 0x65, 0x35, 0x35, 0x2b, 0x28, 0x04, 0x5c, 0x82, 0xea, 0x4a, - 0x9e, 0x96, 0x72, 0xa4, 0x8e, 0x42, 0xfd, 0x55, 0xa8, 0x66, 0x7a, 0x40, - 0xc9, 0xf2, 0xc2, 0x1e, 0x5d, 0x09, 0x90, 0x32, 0x18, 0xdb, 0x11, 0x4c, - 0x6c, 0x9c, 0x27, 0x62, 0x0a, 0xe6, 0xc1, 0xdf, 0xf2, 0x6a, 0x8c, 0x26, - 0xb4, 0xfb, 0xda, 0xa9, 0x08, 0x10, 0x3a, 0xf0, 0xe1, 0x64, 0xe5, 0x03, - 0x81, 0x7d, 0x15, 0x74, 0xa1, 0x8d, 0x10, 0xc8, 0xbb, 0x6a, 0x7c, 0x60, - 0xa1, 0x09, 0x35, 0x19, 0x2d, 0x70, 0xb5, 0x36, 0xc8, 0x8b, 0x66, 0x5f, - 0xe0, 0xe7, 0xea, 0x70, 0x2f, 0x5d, 0x3f, 0xae, 0x5e, 0x25, 0x84, 0xdd, - 0x9b, 0x69, 0x44, 0x37, 0x7c, 0x6b, 0x9e, 0x81, 0x18, 0x36, 0x4b, 0xff, - 0x86, 0x44, 0x2a, 0x39, 0x66, 0x7f, 0x71, 0x43, 0xe7, 0x65, 0xfe, 0xfd, - 0x34, 0xb9, 0xd9, 0x5a, 0x00, 0xd1, 0x41, 0x43, 0xc7, 0xbc, 0x65, 0x68, - 0xb7, 0x73, 0xff, 0x19, 0xd3, 0xed, 0x15, 0xa4, 0x67, 0xa1, 0x53, 0x0e, - 0xa6, 0xfb, 0x25, 0xce, 0x9d, 0x5b, 0x73, 0x08, 0xf3, 0x3b, 0x69, 0xe4, - 0x94, 0x9b, 0x94, 0x03, 0xb3, 0x8a, 0x2e, 0x07, 0x0c, 0xef, 0x18, 0x4c, - 0x2b, 0x1c, 0x83, 0x9f, 0x25, 0x20, 0x29, 0x72, 0x11, 0xa0, 0xaa, 0xed, - 0x0c, 0xf9, 0xce, 0x94, 0x0d, 0x7a, 0xb6, 0xb3, 0xa4, 0x57, 0xd6, 0x61, - 0xca, 0x1a, 0x0e, 0x89, 0x6d, 0x99, 0x4d, 0x06, 0xcd, 0x83, 0x7e, 0x09, - 0x14, 0x5b, 0xe7, 0x4c, 0x72, 0xa8, 0x98, 0xc8, 0x27, 0xf3, 0x70, 0x89, - 0x87, 0x11, 0xbb, 0x98, 0x82, 0x77, 0x9d, 0xaa, 0x95, 0x8c, 0xc1, 0xf8, - 0x39, 0x27, 0xd5, 0x64, 0x59, 0x6a, 0x8c, 0xbe, 0xe2, 0xe1, 0xd1, 0x6b, - 0xe3, 0xaf, 0x30, 0x6f, 0xf4, 0x9e, 0x35, 0x0b, 0x10, 0x24, 0x77, 0xd8, - 0xa4, 0x30, 0x2e, 0xf7, 0x97, 0xfd, 0xef, 0x1e, 0x9e, 0xf2, 0xbd, 0xf2, - 0x41, 0x73, 0x19, 0xe6, 0x7b, 0x7f, 0x74, 0x11, 0x91, 0x38, 0xc5, 0xac, - 0xd5, 0xb0, 0x48, 0xc4, 0xe9, 0x41, 0xd4, 0x50, 0x76, 0x13, 0xbf, 0xec, - 0xe8, 0x3a, 0xa8, 0x84, 0x42, 0x98, 0x12, 0x64, 0x95, 0x85, 0x79, 0x29, - 0xea, 0x3a, 0xf9, 0xa4, 0x5c, 0x9c, 0x35, 0x01, 0x68, 0x71, 0xb9, 0x5b, - 0xbe, 0xaa, 0x76, 0x9e, 0x63, 0x1c, 0xc1, 0x83, 0x94, 0xc6, 0x89, 0x2b, - 0x1d, 0x00, 0x43, 0x74, 0x00, 0x41, 0x93, 0x58, 0x52, 0xf9, 0x13, 0xfe, - 0x9f, 0x7a, 0xb7, 0x3d, 0x6b, 0x70, 0x4e, 0x4f, 0x8f, 0xf4, 0x9c, 0xe4, - 0x97, 0x62, 0xaf, 0x69, 0x45, 0xec, 0xf4, 0x53, 0x71, 0xdc, 0xc7, 0x8d, - 0x6f, 0xb2, 0x9d, 0xec, 0x43, 0xdd, 0xc0, 0xe5, 0xd1, 0x6c, 0x1a, 0x82, - 0x19, 0xf6, 0x18, 0xd3, 0x59, 0x0e, 0x07, 0x81, 0x5a, 0x23, 0x10, 0x8b, - 0xaa, 0x0b, 0x99, 0xc8, 0x34, 0xc2, 0xd0, 0xa9, 0x69, 0x7f, 0x54, 0xe3, - 0xc4, 0xa0, 0xe7, 0x4b, 0x31, 0x90, 0xe7, 0x3b, 0x45, 0x9b, 0x7f, 0xae, - 0xd2, 0xab, 0x22, 0xb9, 0xfc, 0x07, 0x39, 0x4b, 0x45, 0x83, 0x8d, 0x41, - 0x7a, 0x52, 0xb2, 0xae, 0x71, 0x78, 0x17, 0x63, 0xfa, 0xbe, 0x59, 0xca, - 0xf0, 0xfd, 0x68, 0xe5, 0xc4, 0x9a, 0x74, 0x3d, 0xec, 0xd4, 0x8b, 0xa1, - 0x2c, 0x31, 0x4d, 0x73, 0xfd, 0x5c, 0x1e, 0xeb, 0x5f, 0xf6, 0x42, 0x0d, - 0x79, 0x5f, 0x64, 0x10, 0xae, 0xb2, 0xf6, 0x9e, 0xa8, 0xab, 0xa5, 0x2b, - 0x9a, 0xcf, 0x25, 0xfa, 0xa2, 0xb3, 0xdc, 0x30, 0x3d, 0x08, 0x4e, 0xbb, - 0x7b, 0x0c, 0x28, 0x34, 0x9d, 0xda, 0xc4, 0x94, 0xa4, 0xf4, 0x1e, 0x78, - 0x8b, 0xa9, 0xd3, 0xa7, 0x1c, 0x2a, 0x27, 0x14, 0xa0, 0x44, 0x1a, 0x9a, - 0x87, 0x72, 0xa5, 0x6d, 0x69, 0x46, 0xe5, 0xc1, 0x4f, 0x29, 0x87, 0xc0, - 0xa7, 0xa8, 0x96, 0xde, 0xa9, 0x63, 0x08, 0xd8, 0x4a, 0xa1, 0x25, 0x43, - 0x76, 0x41, 0xf7, 0x9f, 0x17, 0xe3, 0xe1, 0x4b, 0xc6, 0x2b, 0x79, 0xea, - 0xd5, 0xa7, 0x72, 0x16, 0x0a, 0x8c, 0xcd, 0x49, 0x70, 0x75, 0xd4, 0x59, - 0x4a, 0x19, 0x7b, 0x31, 0x02, 0x7a, 0x3a, 0x20, 0x15, 0x62, 0x7e, 0x4e, - 0x6f, 0xac, 0xd0, 0xd1, 0x29, 0xbd, 0x2d, 0xa1, 0xc6, 0x3e, 0xa6, 0x1a, - 0x26, 0x18, 0x96, 0x98, 0x12, 0x56, 0x37, 0xbf, 0xb4, 0x91, 0x57, 0xe8, - 0xda, 0x61, 0x7c, 0x2f, 0x3e, 0xd4, 0x51, 0xfe, 0xe8, 0x5b, 0x00, 0x30, - 0x08, 0xf6, 0x4e, 0x69, 0xa8, 0x1a, 0x2b, 0x82, 0x41, 0x85, 0xa9, 0xd9, - 0x3c, 0xc8, 0x02, 0x91, 0x99, 0xd4, 0xa2, 0xfd, 0x9d, 0x1b, 0x08, 0xfc, - 0x41, 0x3e, 0x10, 0x6b, 0x80, 0x74, 0x3d, 0x72, 0x61, 0x97, 0xdd, 0x96, - 0xec, 0xf4, 0xd6, 0x6d, 0x68, 0x02, 0x6e, 0xbb, 0x55, 0x9d, 0x6f, 0x11, - 0xde, 0xd1, 0xad, 0x6d, 0x42, 0x96, 0x2c, 0x42, 0x1e, 0xa9, 0x19, 0x42, - 0x22, 0x38, 0x38, 0x18, 0x3c, 0x4b, 0xc1, 0x9c, 0x0f, 0xe1, 0x34, 0x61, - 0x06, 0x77, 0x54, 0x04, 0xe0, 0x87, 0x94, 0x5c, 0xc9, 0xa1, 0x35, 0x55, - 0x3d, 0x4a, 0xf2, 0x4f, 0x05, 0x11, 0x98, 0x6f, 0x3c, 0x85, 0x84, 0xe6, - 0xf8, 0x71, 0x8a, 0xdf, 0xe9, 0x9a, 0xe3, 0x70, 0xd6, 0x36, 0xd6, 0xc8, - 0x66, 0x3e, 0xba, 0x7c, 0x0a, 0x23, 0x0a, 0xd0, 0xb6, 0x66, 0x68, 0xa8, - 0xdf, 0x37, 0x17, 0xfb, 0xdd, 0x9c, 0x8b, 0xc7, 0x8e, 0xc4, 0x4f, 0x40, - 0x08, 0x23, 0x58, 0x15, 0xa2, 0xba, 0xef, 0xdf, 0x67, 0xcd, 0x1f, 0xb6, - 0xc4, 0xea, 0xce, 0x81, 0x38, 0x58, 0x92, 0x57, 0xcf, 0x83, 0x47, 0x29, - 0x9f, 0xde, 0x9b, 0xde, 0x01, 0xfe, 0x68, 0x91, 0x67, 0x06, 0x9d, 0x31, - 0xd0, 0xb9, 0xc3, 0xbb, 0xc3, 0x6b, 0xa0, 0x04, 0x1e, 0x34, 0xd5, 0x38, - 0xd4, 0xac, 0x70, 0xae, 0xab, 0xb2, 0xbd, 0x4b, 0xa0, 0xad, 0x2b, 0x82, - 0xaf, 0x8c, 0x90, 0x4d, 0xd3, 0xca, 0x71, 0x35, 0x75, 0x89, 0xe5, 0x42, - 0x91, 0x46, 0x8d, 0x18, 0x04, 0x7a, 0xb9, 0xaa, 0x3b, 0xe7, 0x1e, 0x8c, - 0x4e, 0xf9, 0x6e, 0x74, 0xaa, 0x2e, 0x36, 0x86, 0xfb, 0xef, 0x9c, 0xd7, - 0xba, 0x5e, 0x2e, 0x3c, 0x40, 0xce, 0x8b, 0x2b, 0x94, 0x55, 0xf2, 0xd4, - 0x7d, 0xbf, 0x8c, 0x8a, 0xa8, 0x59, 0x84, 0x6f, 0x32, 0x95, 0xc5, 0xcc, - 0xad, 0xee, 0x30, 0x23, 0x7c, 0x54, 0xea, 0x60, 0xb8, 0x88, 0x12, 0x45, - 0x03, 0xbc, 0xe3, 0x92, 0x9f, 0xa8, 0x5b, 0x07, 0x97, 0x53, 0x0d, 0xe1, - 0xe3, 0x3d, 0xdf, 0xf2, 0x2a, 0x12, 0xee, 0xdf, 0x73, 0x8d, 0x41, 0xf4, - 0xe4, 0x2c, 0xb4, 0xd4, 0x9e, 0xfe, 0xf2, 0xe6, 0xa0, 0x9e, 0x2a, 0x3a, - 0x36, 0x26, 0x7e, 0xd9, 0xe1, 0x22, 0xee, 0x0b, 0x5b, 0x48, 0xd2, 0xa9, - 0x55, 0xab, 0x50, 0x7c, 0xf6, 0xc8, 0x56, 0x31, 0xbb, 0x51, 0xe9, 0x31, - 0x4d, 0xaa, 0x13, 0x3a, 0x99, 0x9f, 0x8c, 0x59, 0x6a, 0xc9, 0xf1, 0x0a, - 0x89, 0xcc, 0x39, 0x98, 0xbd, 0xc3, 0x93, 0x97, 0x28, 0xe5, 0x73, 0x94, - 0xf2, 0x0a, 0x7a, 0x09, 0x38, 0x0b, 0xab, 0xd8, 0x49, 0x98, 0x14, 0x34, - 0x32, 0x9d, 0xef, 0x9d, 0x47, 0xdb, 0x82, 0xb9, 0x84, 0xd6, 0xd7, 0x9f, - 0xf7, 0xdf, 0x79, 0x5b, 0xe8, 0x92, 0x44, 0x31, 0x5d, 0x42, 0x80, 0x90, - 0x8d, 0x36, 0xa2, 0x39, 0x02, 0x64, 0x21, 0xa2, 0xb8, 0xfc, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xc8, 0xeb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0xd8, 0x03, 0x00, 0x00, 0xdc, 0x03, 0x00, 0x00, - 0xe0, 0x03, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xa8, 0x03, 0x00, 0x00, - 0x50, 0x03, 0x00, 0x00, 0x04, 0x03, 0x00, 0x00, 0xac, 0x02, 0x00, 0x00, - 0x74, 0x02, 0x00, 0x00, 0x2c, 0x02, 0x00, 0x00, 0xf4, 0x01, 0x00, 0x00, - 0xac, 0x01, 0x00, 0x00, 0x74, 0x01, 0x00, 0x00, 0x2c, 0x01, 0x00, 0x00, - 0xe4, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x9e, 0xfc, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x00, 0x00, 0x00, 0x5e, 0xfd, 0xff, 0xff, 0x1c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, - 0x96, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x88, 0xfd, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x31, 0x00, 0x00, 0x00, 0xca, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x78, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, - 0x2c, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x00, 0x00, 0x2e, 0x00, 0x00, 0x00, - 0x0e, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, - 0xbc, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x25, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x52, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x25, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x21, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x96, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x88, 0xfe, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, - 0x1e, 0x00, 0x00, 0x00, 0x1f, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x78, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x1d, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x19, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x0e, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, - 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x42, 0xff, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xf0, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x15, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x11, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x86, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x78, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xba, 0xff, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x68, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0d, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x14, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x14, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x07, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, - 0x34, 0x10, 0x00, 0x00, 0xd4, 0x0f, 0x00, 0x00, 0x7c, 0x0f, 0x00, 0x00, - 0x38, 0x0f, 0x00, 0x00, 0xdc, 0x0e, 0x00, 0x00, 0x74, 0x0e, 0x00, 0x00, - 0x2c, 0x0e, 0x00, 0x00, 0xe8, 0x0d, 0x00, 0x00, 0x90, 0x0d, 0x00, 0x00, - 0x48, 0x0d, 0x00, 0x00, 0x04, 0x0d, 0x00, 0x00, 0xc0, 0x0c, 0x00, 0x00, - 0x64, 0x0c, 0x00, 0x00, 0x0c, 0x0c, 0x00, 0x00, 0xc4, 0x0b, 0x00, 0x00, - 0x80, 0x0b, 0x00, 0x00, 0x28, 0x0b, 0x00, 0x00, 0xe0, 0x0a, 0x00, 0x00, - 0x9c, 0x0a, 0x00, 0x00, 0x58, 0x0a, 0x00, 0x00, 0xfc, 0x09, 0x00, 0x00, - 0xa4, 0x09, 0x00, 0x00, 0x5c, 0x09, 0x00, 0x00, 0x18, 0x09, 0x00, 0x00, - 0xc0, 0x08, 0x00, 0x00, 0x78, 0x08, 0x00, 0x00, 0x34, 0x08, 0x00, 0x00, - 0xf0, 0x07, 0x00, 0x00, 0x94, 0x07, 0x00, 0x00, 0x3c, 0x07, 0x00, 0x00, - 0xf4, 0x06, 0x00, 0x00, 0xb0, 0x06, 0x00, 0x00, 0x58, 0x06, 0x00, 0x00, - 0x10, 0x06, 0x00, 0x00, 0xcc, 0x05, 0x00, 0x00, 0x88, 0x05, 0x00, 0x00, - 0x2c, 0x05, 0x00, 0x00, 0xd4, 0x04, 0x00, 0x00, 0x8c, 0x04, 0x00, 0x00, - 0x48, 0x04, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0xa8, 0x03, 0x00, 0x00, - 0x50, 0x03, 0x00, 0x00, 0x08, 0x03, 0x00, 0x00, 0xc4, 0x02, 0x00, 0x00, - 0x80, 0x02, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0xcc, 0x01, 0x00, 0x00, - 0x84, 0x01, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, - 0x8c, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xb2, 0xf0, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0xfc, 0xf0, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x38, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0xf2, 0xf0, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x28, 0x00, 0x00, 0x00, 0x3c, 0xf1, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0a, 0xd7, 0x23, 0x3a, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x32, 0xf1, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x40, 0x00, 0x00, 0x00, - 0x24, 0xf1, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x37, 0x01, 0x00, 0x00, 0x00, - 0xc2, 0xff, 0x7f, 0x3f, 0x01, 0x00, 0x00, 0x00, 0xd2, 0x6f, 0x75, 0x36, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x8a, 0xf1, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xf1, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x06, 0x16, 0x49, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x87, 0x19, 0xb1, 0x40, 0x01, 0x00, 0x00, 0x00, 0x58, 0x80, 0xdf, 0xc0, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x3a, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1f, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x2c, 0xf2, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x5d, 0xd1, 0xce, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x7a, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x6c, 0xf2, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x23, 0x20, 0xb6, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x62, 0xf2, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x54, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0xa2, 0x5a, 0x91, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x47, 0xc9, 0x90, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x00, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, 0xac, 0xf2, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x81, 0xb7, 0xf1, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x9e, 0xb5, 0x71, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x20, 0x70, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x6a, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0x5c, 0xf3, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7a, 0x08, 0x97, 0x35, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xaa, 0xf3, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x24, 0x00, 0x00, 0x00, 0x9c, 0xf3, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0xf5, 0x1f, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xea, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0xdc, 0xf3, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xc7, 0xea, 0x1a, 0x3c, 0x02, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xd2, 0xf3, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xc4, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0xb2, 0x78, 0x3f, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x39, 0xb9, 0x3e, 0x41, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70, 0xf5, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x3c, 0x00, 0x00, 0x00, 0x1c, 0xf4, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x89, 0x25, 0xf2, 0x39, 0x01, 0x00, 0x00, 0x00, - 0xde, 0xdc, 0x1d, 0x41, 0x01, 0x00, 0x00, 0x00, 0xa5, 0x23, 0x72, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0xda, 0xf4, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0xcc, 0xf4, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x42, 0xe0, 0x90, 0x35, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x1a, 0xf5, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, - 0x0c, 0xf5, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x1a, 0x2a, 0x19, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x5a, 0xf5, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x4c, 0xf5, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xe9, 0x36, 0xdd, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x42, 0xf5, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x34, 0xf5, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0xdd, 0x43, 0x7e, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x99, 0x45, 0x7d, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xe0, 0xf6, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0x8c, 0xf5, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x5c, 0xfd, 0xa9, 0x39, 0x01, 0x00, 0x00, 0x00, 0x1e, 0xaa, 0x87, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x08, 0xfc, 0x29, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x4a, 0xf6, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x3c, 0xf6, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x55, 0xf7, 0x52, 0x35, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x8a, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, 0x7c, 0xf6, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xd0, 0xda, 0x1e, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xca, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0xbc, 0xf6, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x8e, 0x0b, 0xa8, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xb2, 0xf6, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0xa4, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x12, 0x1c, 0x6e, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0xdd, 0x4a, 0x00, 0x41, 0x01, 0x00, 0x00, 0x00, 0x31, 0xc6, 0xd9, 0xc0, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x62, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x54, 0xf7, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x9d, 0x16, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xa2, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x94, 0xf7, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xa4, 0x34, 0xab, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x8a, 0xf7, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x2e, 0x36, 0xe1, 0x3c, 0x01, 0x00, 0x00, 0x00, - 0xf8, 0x54, 0xe0, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x28, 0xf9, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, 0xd4, 0xf7, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xe1, 0xd0, 0xa2, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x9b, 0xcf, 0x22, 0x41, 0x01, 0x00, 0x00, 0x00, - 0xea, 0x23, 0x12, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x02, 0x00, 0x00, 0x92, 0xf8, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0x84, 0xf8, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x99, 0xd3, 0xf7, 0x34, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xd2, 0xf8, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x24, 0x00, 0x00, 0x00, 0xc4, 0xf8, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0xd5, 0xc2, 0x3a, - 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x12, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0x04, 0xf9, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x8f, 0x84, 0xa2, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xfa, 0xf8, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xec, 0xf8, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x64, 0xeb, 0x8e, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x3b, 0xf3, 0x17, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xb7, 0xc5, 0x04, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xaa, 0xf9, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x9c, 0xf9, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x92, 0xa8, 0x98, 0x39, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xea, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0xdc, 0xf9, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x58, 0x76, 0xb9, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xd2, 0xf9, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xc4, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x43, 0xb8, 0x52, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x8b, 0xe5, 0x51, 0x41, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70, 0xfb, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x3c, 0x00, 0x00, 0x00, 0x1c, 0xfa, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xe3, 0xa1, 0xf0, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x02, 0xa0, 0x70, 0x41, 0x01, 0x00, 0x00, 0x00, 0x87, 0x08, 0x65, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, - 0xda, 0xfa, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0xcc, 0xfa, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xcc, 0x98, 0x41, 0x35, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x1a, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, - 0x0c, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xed, 0xf5, 0xcd, 0x3a, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x5a, 0xfb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x4c, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x9d, 0xca, 0xd4, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x42, 0xfb, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x34, 0xfb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x58, 0x58, 0xce, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x49, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x06, 0x52, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xf2, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0xe4, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x9b, 0x9c, 0xe1, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x32, 0xfc, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x24, 0xfc, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xf8, 0xb6, 0xc3, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x1a, 0xfc, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x0c, 0xfc, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x94, 0x8d, 0x93, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x06, 0xfa, 0x92, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xb8, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0x64, 0xfc, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x7a, 0xf6, 0x5f, 0x3a, 0x01, 0x00, 0x00, 0x00, 0xba, 0xf4, 0xdf, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xf4, 0x7c, 0xcf, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x22, 0xfd, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x14, 0xfd, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x46, 0x2f, 0xc4, 0x35, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x62, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, 0x54, 0xfd, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x8f, 0x3f, 0xe0, 0x3a, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0xa2, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x94, 0xfd, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x25, 0xd7, 0xa9, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x8a, 0xfd, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0xc4, 0xf4, 0x39, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0xf4, 0x1f, 0xe3, 0x41, 0x01, 0x00, 0x00, 0x00, 0xaa, 0x55, 0x8f, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x3a, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x2c, 0xfe, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x8b, 0x00, 0x4b, 0x3a, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x7a, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x6c, 0xfe, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xd7, 0xdf, 0xc3, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x62, 0xfe, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x54, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x68, 0xa8, 0x04, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0xc0, 0x23, 0x04, 0x42, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0xbc, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x3b, 0xda, 0x75, 0x3b, 0x01, 0x00, 0x00, 0x00, 0x4f, 0xd8, 0xf5, 0x42, - 0x01, 0x00, 0x00, 0x00, 0xa8, 0x2a, 0x61, 0xc2, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x7a, 0xff, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x6c, 0xff, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xcf, 0x37, 0x69, 0x37, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xba, 0xff, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x28, 0x00, 0x00, 0x00, 0xac, 0xff, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x14, 0xd8, 0x72, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x30, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xd4, 0x42, 0x16, 0x3c, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x10, 0x00, 0x0c, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x4c, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xa8, 0x41, 0x5b, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x66, 0x66, 0x5a, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, - 0x0f, 0x00, 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, - 0xa4, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, - 0x5c, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, - 0x38, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x0b, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x96, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x72, - 0x9e, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x19, 0xa6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0xae, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, - 0xb6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, 0xbe, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x1b, 0xc6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0xce, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, 0xd6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0xde, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, - 0xe6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xfa, 0xff, 0xff, 0xff, - 0x00, 0x1b, 0x06, 0x00, 0x06, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x09, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x1b}; - -const unsigned int g_keyword_scrambled_model_data_length = 34520; diff --git a/src/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h b/src/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h deleted file mode 100644 index ce34426..0000000 --- a/src/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ -#define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ - -extern const unsigned char g_keyword_scrambled_model_data[]; -extern const unsigned int g_keyword_scrambled_model_data_length; - -#endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ diff --git a/src/tensorflow/lite/micro/benchmarks/micro_benchmark.h b/src/tensorflow/lite/micro/benchmarks/micro_benchmark.h index 83b5cbb..2775e26 100644 --- a/src/tensorflow/lite/micro/benchmarks/micro_benchmark.h +++ b/src/tensorflow/lite/micro/benchmarks/micro_benchmark.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,75 +18,48 @@ limitations under the License. #include -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_interpreter.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_op_resolver.h" +#include "tensorflow/lite/micro/micro_profiler_interface.h" +#include "tensorflow/lite/micro/micro_resource_variable.h" #include "tensorflow/lite/micro/micro_time.h" +#include "tensorflow/lite/micro/recording_micro_interpreter.h" -namespace micro_benchmark { -extern tflite::ErrorReporter* reporter; -} // namespace micro_benchmark - -#define TF_LITE_MICRO_BENCHMARKS_BEGIN \ - namespace micro_benchmark { \ - tflite::ErrorReporter* reporter; \ - } \ - \ - int main(int argc, char** argv) { \ - tflite::MicroErrorReporter error_reporter; \ - micro_benchmark::reporter = &error_reporter; \ - int32_t start_ticks; \ - int32_t duration_ticks; \ - int32_t duration_ms; - -#define TF_LITE_MICRO_BENCHMARKS_END \ - return 0; \ - } - -#define TF_LITE_MICRO_BENCHMARK(func) \ - if (tflite::ticks_per_second() == 0) { \ - TF_LITE_REPORT_ERROR(micro_benchmark::reporter, \ - "no timer implementation found"); \ - return 0; \ - } \ - start_ticks = tflite::GetCurrentTimeTicks(); \ - func; \ - duration_ticks = tflite::GetCurrentTimeTicks() - start_ticks; \ - if (duration_ticks > INT_MAX / 1000) { \ - duration_ms = duration_ticks / (tflite::ticks_per_second() / 1000); \ - } else { \ - duration_ms = (duration_ticks * 1000) / tflite::ticks_per_second(); \ - } \ - micro_benchmark::reporter->Report("%s took %d ticks (%d ms)", #func, \ - duration_ticks, duration_ms); +namespace tflite { template class MicroBenchmarkRunner { public: - // The lifetimes of model, op_resolver and tensor_arena must exceed that of - // the created MicroBenchmarkRunner object. + // The lifetimes of model, op_resolver, tensor_arena, profiler must exceed + // that of the created MicroBenchmarkRunner object. MicroBenchmarkRunner(const uint8_t* model, const tflite::MicroOpResolver* op_resolver, - uint8_t* tensor_arena, int tensor_arena_size) - : model_(tflite::GetModel(model)), - reporter_(µ_reporter_), - interpreter_(model_, *op_resolver, tensor_arena, tensor_arena_size, - reporter_) { + uint8_t* tensor_arena, int tensor_arena_size, + MicroProfilerInterface* profiler, + int num_resource_variables = 0) + : allocator_( + RecordingMicroAllocator::Create(tensor_arena, tensor_arena_size)), + interpreter_( + GetModel(model), *op_resolver, allocator_, + MicroResourceVariables::Create(allocator_, num_resource_variables), + profiler) { interpreter_.AllocateTensors(); } void RunSingleIteration() { // Run the model on this input and make sure it succeeds. TfLiteStatus invoke_status = interpreter_.Invoke(); - if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(reporter_, "Invoke failed."); + if (invoke_status == kTfLiteError) { + MicroPrintf("Invoke failed."); } } - void SetRandomInput(const int random_seed) { + int NumInputs() { return interpreter_.inputs().size(); } + + void SetRandomInput(const int random_seed, int input_index = 0) { // The pseudo-random number generator is initialized to a constant seed std::srand(random_seed); - TfLiteTensor* input = interpreter_.input(0); + TfLiteTensor* input = interpreter_.input(input_index); // Pre-populate input tensor with random values. int input_length = input->bytes / sizeof(inputT); @@ -99,8 +72,8 @@ class MicroBenchmarkRunner { } } - void SetInput(const inputT* custom_input) { - TfLiteTensor* input = interpreter_.input(0); + void SetInput(const inputT* custom_input, int input_index = 0) { + TfLiteTensor* input = interpreter_.input(input_index); inputT* input_buffer = tflite::GetTensorData(input); int input_length = input->bytes / sizeof(inputT); for (int i = 0; i < input_length; i++) { @@ -108,11 +81,15 @@ class MicroBenchmarkRunner { } } + void PrintAllocations() const { + interpreter_.GetMicroAllocator().PrintAllocations(); + } + private: - const tflite::Model* model_; - tflite::MicroErrorReporter micro_reporter_; - tflite::ErrorReporter* reporter_; - tflite::MicroInterpreter interpreter_; + tflite::RecordingMicroAllocator* allocator_; + tflite::RecordingMicroInterpreter interpreter_; }; -#endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_MICRO_BENCHMARK_H_ +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_MICRO_BENCHMARK_H_ \ No newline at end of file diff --git a/src/tensorflow/lite/micro/compatibility.h b/src/tensorflow/lite/micro/compatibility.h index 49acb28..983de7d 100644 --- a/src/tensorflow/lite/micro/compatibility.h +++ b/src/tensorflow/lite/micro/compatibility.h @@ -22,7 +22,7 @@ limitations under the License. // nothing to avoid linking in ::delete(). // This macro needs to be included in all subclasses of a virtual base class in // the private section. -#ifdef TF_LITE_STATIC_MEMORY +#ifdef ARDUINO #define TF_LITE_REMOVE_VIRTUAL_DELETE \ void operator delete(void* p) {} #else diff --git a/src/tensorflow/lite/micro/compression.h b/src/tensorflow/lite/micro/compression.h new file mode 100644 index 0000000..f944a8a --- /dev/null +++ b/src/tensorflow/lite/micro/compression.h @@ -0,0 +1,68 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_COMPRESSION_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_COMPRESSION_H_ + +#ifdef USE_TFLM_COMPRESSION + +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +// +// Compressed tensors +// + +static constexpr const char* kCompressionMetadataString = + "COMPRESSION_METADATA"; + +enum class CompressionScheme : uint8_t { + kBinQuant, +}; + +struct LookupTableData { + static constexpr size_t kMaxBitWidth = 7; + static constexpr size_t kMaxValueTableChannelStride = 128; + + const void* value_table; // Pointer into FlatBuffer Values. + uint8_t value_table_channel_stride; // elements per channel + uint8_t compressed_bit_width : 3; // 1 to 7 bits + bool is_per_channel_quantized : 1; // tensor is per-channel quantized + bool use_alternate_axis : 1; // shape default channel: + // 0 = first, 1 = last + uint8_t reserved : 3; +}; + +union CompressionData { + LookupTableData* lut_data; +}; + +struct CompressionTensorData { + CompressionScheme scheme; + CompressionData data; +}; + +struct CompressedTensorList { + // Sparsely populated array with the same number of elements as there are + // tensors in the Subgraph. An alternative would include a tensor index in + // the struct for each and walk the list on look up. This could be slow. + const CompressionTensorData** tensors; +}; + +} // namespace tflite + +#endif // USE_TFLM_COMPRESSION +#endif // TENSORFLOW_LITE_MICRO_MICRO_COMPRESSION_H_ diff --git a/src/tensorflow/lite/micro/debug_log.cpp b/src/tensorflow/lite/micro/debug_log.cpp new file mode 100644 index 0000000..f1babc1 --- /dev/null +++ b/src/tensorflow/lite/micro/debug_log.cpp @@ -0,0 +1,21 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// This file is empty to ensure that a specialized implementation of +// debug_log.h is used (instead of the default implementation from +// tensorflow/lite/micro/debug_log.cc). +// +// The actual target-specific implementation of debug_log.h is in +// system_setup.cc since that allows us to consolidate all the target-specific +// specializations into one source file. diff --git a/src/tensorflow/lite/micro/debug_log.h b/src/tensorflow/lite/micro/debug_log.h index c2840d0..6e2e69e 100644 --- a/src/tensorflow/lite/micro/debug_log.h +++ b/src/tensorflow/lite/micro/debug_log.h @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,14 +15,25 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ #define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ +#ifdef __cplusplus +#include +#include +#else +#include +#include +#endif // __cplusplus + #ifdef __cplusplus extern "C" { #endif // __cplusplus -// This function should be implemented by each target platform, and provide a +// These functions should be implemented by each target platform, and provide a // way for strings to be output to some text stream. For more information, see -// tensorflow/lite/micro/debug_log.cc. -void DebugLog(const char* s); +// the tensorflow/lite/micro/debug_log.cc file. These functions should support +// standard C/C++ stdio style formatting operations. +void DebugLog(const char* format, va_list args); +int DebugVsnprintf(char* buffer, size_t buf_size, const char* format, + va_list vlist); #ifdef __cplusplus } // extern "C" diff --git a/src/tensorflow/lite/micro/fake_micro_context.cpp b/src/tensorflow/lite/micro/fake_micro_context.cpp new file mode 100644 index 0000000..8874798 --- /dev/null +++ b/src/tensorflow/lite/micro/fake_micro_context.cpp @@ -0,0 +1,184 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/fake_micro_context.h" + +#include "tensorflow/lite/c/c_api_types.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" +#include "tensorflow/lite/micro/micro_arena_constants.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +FakeMicroContext::FakeMicroContext( + TfLiteTensor* tensors, SingleArenaBufferAllocator* allocator, + MicroGraph* micro_graph +#ifdef USE_TFLM_COMPRESSION + , + const CompressedTensorList* compressed_tensors +#endif // USE_TFLM_COMPRESSION + ) + : graph_(*micro_graph), + tensors_(tensors), + allocator_(allocator) +#ifdef USE_TFLM_COMPRESSION + , + compressed_tensors_(compressed_tensors) +#endif // USE_TFLM_COMPRESSION +{ +} + +TfLiteTensor* FakeMicroContext::AllocateTempTfLiteTensor(int tensor_index) { + allocated_temp_count_++; + return &tensors_[tensor_index]; +} + +void FakeMicroContext::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) { + allocated_temp_count_--; +} + +bool FakeMicroContext::IsAllTempTfLiteTensorDeallocated() { + return !allocated_temp_count_; +} + +uint8_t* FakeMicroContext::AllocateTempBuffer(size_t size, size_t alignment) { + allocated_temp_count_++; + return allocator_->AllocateTemp(size, alignment); +} + +void FakeMicroContext::DeallocateTempBuffer(uint8_t* buffer) { + allocated_temp_count_--; + allocator_->DeallocateTemp(buffer); +} + +TfLiteEvalTensor* FakeMicroContext::GetEvalTensor(int tensor_index) { + TfLiteEvalTensor* eval_tensor = + reinterpret_cast(allocator_->AllocateTemp( + sizeof(TfLiteEvalTensor), alignof(TfLiteEvalTensor))); + TFLITE_DCHECK(eval_tensor != nullptr); + + // In unit tests, the TfLiteTensor pointer contains the source of truth for + // buffers and values: + eval_tensor->data = tensors_[tensor_index].data; + eval_tensor->dims = tensors_[tensor_index].dims; + eval_tensor->type = tensors_[tensor_index].type; + return eval_tensor; +} + +void* FakeMicroContext::AllocatePersistentBuffer(size_t bytes) { + // FakeMicroContext use SingleArenaBufferAllocator, which does not + // automatically apply the buffer alignment like MicroAllocator. The buffer + // alignment is potentially wasteful but allows the fake_micro_context to work + // correctly with optimized kernels. + return allocator_->AllocatePersistentBuffer(bytes, + MicroArenaBufferAlignment()); +} + +TfLiteStatus FakeMicroContext::RequestScratchBufferInArena(size_t bytes, + int* buffer_index) { + TFLITE_DCHECK(buffer_index != nullptr); + + if (scratch_buffer_count_ == kNumScratchBuffers_) { + MicroPrintf("Exceeded the maximum number of scratch tensors allowed (%d).", + kNumScratchBuffers_); + return kTfLiteError; + } + + // For tests, we allocate scratch buffers from the tail and keep them around + // for the lifetime of model. This means that the arena size in the tests will + // be more than what we would have if the scratch buffers could share memory. + scratch_buffers_[scratch_buffer_count_] = + allocator_->AllocatePersistentBuffer(bytes, MicroArenaBufferAlignment()); + TFLITE_DCHECK(scratch_buffers_[scratch_buffer_count_] != nullptr); + + *buffer_index = scratch_buffer_count_++; + return kTfLiteOk; +} + +void* FakeMicroContext::GetScratchBuffer(int buffer_index) { + TFLITE_DCHECK(scratch_buffer_count_ <= kNumScratchBuffers_); + if (buffer_index >= scratch_buffer_count_) { + return nullptr; + } + return scratch_buffers_[buffer_index]; +} + +TfLiteStatus FakeMicroContext::set_external_context( + void* external_context_payload) { + return kTfLiteError; +} + +void* FakeMicroContext::external_context() { return nullptr; } + +MicroGraph& FakeMicroContext::graph() { return graph_; } + +#ifdef USE_TFLM_COMPRESSION + +// Available during Prepare & Eval. Returns false if tensor is not +// compressed. +bool FakeMicroContext::IsTensorCompressed(const TfLiteNode* node, + int tensor_idx) { + if (compressed_tensors_ != nullptr && tensor_idx < node->inputs->size) { + int index = node->inputs->data[tensor_idx]; + if (index >= 0 && compressed_tensors_->tensors[index] != nullptr) { + return true; + } + } + + return false; +} + +// Only available during Prepare. The kernel is responsible for storing the +// scratch buffer handle. +int FakeMicroContext::AllocateDecompressionScratchBuffer(const TfLiteNode* node, + int tensor_idx) { + if (compressed_tensors_ == nullptr || tensor_idx >= node->inputs->size) { + return -1; + } + int index = node->inputs->data[tensor_idx]; + if (index < 0 || compressed_tensors_->tensors[index] == nullptr) { + return -1; + } + TfLiteTensor* tensor = &tensors_[index]; + int scratch_index = -1; + TfLiteStatus result = + RequestScratchBufferInArena(tensor->bytes, &scratch_index); + if (result != kTfLiteOk) { + return -1; + } + + return scratch_index; +} + +// Available during Prepare & Eval. Returns nullptr if tensor is not +// compressed. +const CompressionTensorData* FakeMicroContext::GetTensorCompressionData( + const TfLiteNode* node, int tensor_idx) { + if (compressed_tensors_ == nullptr || tensor_idx >= node->inputs->size) { + return nullptr; + } + + int index = node->inputs->data[tensor_idx]; + if (index < 0) { + return nullptr; + } + + return compressed_tensors_->tensors[index]; +} + +#endif // USE_TFLM_COMPRESSION + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/fake_micro_context.h b/src/tensorflow/lite/micro/fake_micro_context.h new file mode 100644 index 0000000..7cf9c68 --- /dev/null +++ b/src/tensorflow/lite/micro/fake_micro_context.h @@ -0,0 +1,102 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_FAKE_MICRO_CONTEXT_H_ +#define TENSORFLOW_LITE_MICRO_FAKE_MICRO_CONTEXT_H_ + +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_graph.h" + +namespace tflite { +// A fake of MicroContext for kernel util tests. +// TODO(b/272759060): FakeMicroContext currently inherits from MicroContext. +// Which allow tests to use functions from MicroContext that weren't added to +// FakeMicroContext in tests. This should be looked into further. + +class FakeMicroContext : public MicroContext { + public: + ~FakeMicroContext() = default; + + FakeMicroContext(TfLiteTensor* tensors, SingleArenaBufferAllocator* allocator, + MicroGraph* micro_graph +#ifdef USE_TFLM_COMPRESSION + , + const CompressedTensorList* compressed_tensors = nullptr +#endif // USE_TFLM_COMPRESSION + ); + + void* AllocatePersistentBuffer(size_t bytes) override; + TfLiteStatus RequestScratchBufferInArena(size_t bytes, + int* buffer_index) override; + void* GetScratchBuffer(int buffer_index) override; + + TfLiteTensor* AllocateTempTfLiteTensor(int tensor_index) override; + void DeallocateTempTfLiteTensor(TfLiteTensor* tensor) override; + bool IsAllTempTfLiteTensorDeallocated(); + + uint8_t* AllocateTempBuffer(size_t size, size_t alignment) override; + void DeallocateTempBuffer(uint8_t* buffer) override; + + TfLiteEvalTensor* GetEvalTensor(int tensor_index) override; + + TfLiteStatus set_external_context(void* external_context_payload) override; + void* external_context() override; + MicroGraph& graph() override; + +#ifdef USE_TFLM_COMPRESSION + + // Available during Prepare & Eval. Returns false if tensor is not + // compressed. + bool IsTensorCompressed(const TfLiteNode* node, int tensor_idx) override; + + // Only available during Prepare. The kernel is responsible for storing the + // scratch buffer handle. + int AllocateDecompressionScratchBuffer(const TfLiteNode* node, + int tensor_idx) override; + + // Available during Prepare & Eval. Returns nullptr if tensor is not + // compressed. + const CompressionTensorData* GetTensorCompressionData( + const TfLiteNode* node, int tensor_idx) override; + +#endif // USE_TFLM_COMPRESSION + + private: + static constexpr int kNumScratchBuffers_ = 12; + + MicroGraph& graph_; + int scratch_buffer_count_ = 0; + uint8_t* scratch_buffers_[kNumScratchBuffers_]; + + TfLiteTensor* tensors_; + int allocated_temp_count_ = 0; + + SingleArenaBufferAllocator* allocator_; + +#ifdef USE_TFLM_COMPRESSION + + // + // Compression + // + const CompressedTensorList* compressed_tensors_; + +#endif // USE_TFLM_COMPRESSION + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_FAKE_MICRO_CONTEXT_H_ diff --git a/src/tensorflow/lite/micro/flatbuffer_utils.cpp b/src/tensorflow/lite/micro/flatbuffer_utils.cpp new file mode 100644 index 0000000..9996172 --- /dev/null +++ b/src/tensorflow/lite/micro/flatbuffer_utils.cpp @@ -0,0 +1,84 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/flatbuffer_utils.h" + +namespace tflite { + +FlexbufferWrapper::FlexbufferWrapper(const uint8_t* buffer, size_t size) + : flexbuffers::Vector(flexbuffers::GetRoot(buffer, size).AsVector()) {} + +int64_t FlexbufferWrapper::ElementAsInt64(size_t i) const { + const uint8_t* elem = data_ + i * byte_width_; + return ::flexbuffers::ReadInt64(elem, byte_width_); +} + +uint64_t FlexbufferWrapper::ElementAsUInt64(size_t i) const { + const uint8_t* elem = data_ + i * byte_width_; + return ::flexbuffers::ReadUInt64(elem, byte_width_); +} + +int32_t FlexbufferWrapper::ElementAsInt32(size_t i) const { + return static_cast(ElementAsInt64(i)); +} + +bool FlexbufferWrapper::ElementAsBool(size_t i) const { + return static_cast(ElementAsUInt64(i)); +} + +double FlexbufferWrapper::ElementAsDouble(size_t i) const { + const uint8_t* elem = data_ + i * byte_width_; + return ::flexbuffers::ReadDouble(elem, byte_width_); +} + +float FlexbufferWrapper::ElementAsFloat(size_t i) const { + return static_cast(FlexbufferWrapper::ElementAsDouble(i)); +} + +// TODO(b/192589496): Ops must always be there. Remove this function when fixed +uint32_t NumSubgraphOperators(const SubGraph* subgraph) { + if (subgraph->operators() != nullptr) { + return subgraph->operators()->size(); + } else { + return 0; + } +} +// TODO(b/192589496): Ops must always be there. Remove this function when fixed +uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + return NumSubgraphOperators(subgraph); +} + +TfLiteIntArray* FlatBufferVectorToTfLiteTypeArray( + const flatbuffers::Vector* flatbuffer_array) { + // On little-endian machines, TfLiteIntArray happens to have the same memory + // layout as flatbuffers:Vector, so we can reinterpret_cast the + // flatbuffer vector and avoid a copy and malloc. + // TODO(b/188459715): audit this usage of const_cast. + return const_cast( + reinterpret_cast(flatbuffer_array)); +} + +TfLiteFloatArray* FlatBufferVectorToTfLiteTypeArray( + const flatbuffers::Vector* flatbuffer_array) { + // On little-endian machines, TfLiteFloatArray happens to have the same memory + // layout as flatbuffers:Vector, so we can reinterpret_cast the + // flatbuffer vector and avoid a copy and malloc. + // TODO(b/188459715): audit this usage of const_cast. + return const_cast( + reinterpret_cast(flatbuffer_array)); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/flatbuffer_utils.h b/src/tensorflow/lite/micro/flatbuffer_utils.h new file mode 100644 index 0000000..e3f4210 --- /dev/null +++ b/src/tensorflow/lite/micro/flatbuffer_utils.h @@ -0,0 +1,65 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_ +#define THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_ + +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +#include "third_party/flatbuffers/include/flatbuffers/flexbuffers.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +// Kernels use flexbuffers::Map to pack their init parameters in a tflite file, +// with the parameter names as map keys and the parameter values as the +// corresponding map values. +// Accessing the map values using the flexbuffers:Map class is inline heavy, +// which can cause the code size to bloat beyond what's reasonable for a micro +// application. Use this class instead, when possible. +// FlexbufferWrapper takes advantage of the following properties of +// flexbuffers::Map: +// 1. It can be viewed as a flexbuffers::Vector of the values. +// 2. The values in the vector are ordered alphabetically by their keys. +// 3. All integer and Boolean values are stored as 64-bit numbers. +// 4. All floating point values are stored as double precision numbers. +// The properties are mentioned in the flexbuffers docs, but we rely on +// a unit test to catch design changes. +class FlexbufferWrapper : public flexbuffers::Vector { + public: + // Construct with a serialized flexbuffer 'buffer' of 'size' bytes + explicit FlexbufferWrapper(const uint8_t* buffer, size_t size); + int64_t ElementAsInt64(size_t i) const; + uint64_t ElementAsUInt64(size_t i) const; + int32_t ElementAsInt32(size_t i) const; + bool ElementAsBool(size_t i) const; + double ElementAsDouble(size_t i) const; + float ElementAsFloat(size_t i) const; +}; + +// Return the number of operators in a subgraph tflite +uint32_t NumSubgraphOperators(const SubGraph* subgraph); +uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx); + +// Converts a flatbuffer array to a TfLiteArray. +// TODO(b/188459715): These function convert a const input to a non-const via a +// const_cast. It is unclear exactly why this is required. +TfLiteIntArray* FlatBufferVectorToTfLiteTypeArray( + const flatbuffers::Vector* flatbuffer_array); +TfLiteFloatArray* FlatBufferVectorToTfLiteTypeArray( + const flatbuffers::Vector* flatbuffer_array); + +} // namespace tflite + +#endif // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_ diff --git a/src/tensorflow/lite/micro/hexdump.cpp b/src/tensorflow/lite/micro/hexdump.cpp new file mode 100644 index 0000000..fd0f6f7 --- /dev/null +++ b/src/tensorflow/lite/micro/hexdump.cpp @@ -0,0 +1,103 @@ +// Copyright 2024 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tensorflow/lite/micro/hexdump.h" + +#include +#include + +#include "tensorflow/lite/micro/debug_log.h" +#include "tensorflow/lite/micro/static_vector.h" + +namespace { + +tflite::Span output(const tflite::Span& buf, const char* format, + ...) { + // Writes formatted output, printf-style, to either a buffer or DebugLog. + // Writes to DebugLog if the buffer data pointer is null. Does not exceed + // the size of the buffer. Returns the unused remainder of the buffer, or a + // buffer with a null data pointer in the case of printing to DebugLog. + + tflite::Span result{nullptr, 0}; + + va_list args; + va_start(args, format); + + if (buf.data() == nullptr) { + DebugLog(format, args); + result = {nullptr, 0}; + } else { + size_t len = DebugVsnprintf(buf.data(), buf.size(), format, args); + // Returns the number of characters that would have been written if + // there were enough room, so cap it at the size of the buffer in order to + // know how much was actually written. + size_t consumed = std::min(len, buf.size()); + result = {buf.data() + consumed, buf.size() - consumed}; + } + + va_end(args); + return result; +} + +} // end anonymous namespace + +tflite::Span tflite::hexdump(const tflite::Span region, + const tflite::Span out) { + tflite::Span buffer{out}; + std::size_t byte_nr = 0; + constexpr int per_line = 16; + const int lines = (region.size() + per_line - 1) / per_line; // round up + + for (int line = 0; line < lines; ++line) { + tflite::StaticVector ascii; + + // print address + buffer = output(buffer, "%08X:", line); + + for (int pos = 0; pos < per_line; ++pos) { + if (byte_nr < region.size()) { + // print byte + int as_int = static_cast(region[byte_nr++]); + buffer = output(buffer, " %02X", as_int); + + // buffer an ascii printable value + char c{'.'}; + if (std::isprint(as_int)) { + c = static_cast(as_int); + } + ascii.push_back(c); + } else { + buffer = output(buffer, " "); + } + + // print extra space in middle of the line + if (pos == per_line / 2 - 1) { + buffer = output(buffer, " "); + } + } + + // print the ascii value + buffer = output(buffer, " "); + for (const auto& c : ascii) { + buffer = output(buffer, "%c", c); + } + buffer = output(buffer, "%c", '\n'); + } + + return {out.data(), out.size() - buffer.size()}; +} + +void tflite::hexdump(const tflite::Span region) { + hexdump(region, {nullptr, 0}); +} diff --git a/src/tensorflow/lite/micro/hexdump.h b/src/tensorflow/lite/micro/hexdump.h new file mode 100644 index 0000000..0bdfcc4 --- /dev/null +++ b/src/tensorflow/lite/micro/hexdump.h @@ -0,0 +1,35 @@ +// Copyright 2024 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TENSORFLOW_LITE_MICRO_HEXDUMP_H_ +#define TENSORFLOW_LITE_MICRO_HEXDUMP_H_ + +#include + +#include "tensorflow/lite/micro/span.h" + +namespace tflite { + +// Displays the contents of a memory region, formatted in hexadecimal and ASCII +// in a style matching Python's hexdump module, using DebugLog(). +void hexdump(Span region); + +// Writes the contents of a memory region, formatted in hexadecimal and ASCII +// in a style matching Python's hexdump module, to a buffer. Returns the portion +// of the buffer written. +Span hexdump(Span region, Span buffer); + +} // end namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_HEXDUMP_H_ diff --git a/src/tensorflow/lite/micro/hexdump_test.cpp b/src/tensorflow/lite/micro/hexdump_test.cpp new file mode 100644 index 0000000..89d3a04 --- /dev/null +++ b/src/tensorflow/lite/micro/hexdump_test.cpp @@ -0,0 +1,58 @@ +// Copyright 2024 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tensorflow/lite/micro/hexdump.h" + +#include + +#include "tensorflow/lite/micro/span.h" +#include "tensorflow/lite/micro/testing/micro_test.h" + +constexpr tflite::Span input{ + "This is an input string for testing."}; + +const tflite::Span region{ + reinterpret_cast(input.data()), input.size()}; + +// clang-format off +constexpr tflite::Span expected{ + "00000000: 54 68 69 73 20 69 73 20 61 6E 20 69 6E 70 75 74 This is an input\n" + "00000001: 20 73 74 72 69 6E 67 20 66 6F 72 20 74 65 73 74 string for test\n" + "00000002: 69 6E 67 2E 00 ing..\n"}; +// clang-format on + +// String literals have null terminators, but don't expect a null terminator +// in the hexdump output. +constexpr tflite::Span expected_no_null{expected.data(), + expected.size() - 1}; + +TF_LITE_MICRO_TESTS_BEGIN + +TF_LITE_MICRO_TEST(TestOutputToBuffer) { + // Allocate a buffer with an arbitrary amount of extra room so the test has + // the possibility of failing if hexdump mishandles the extra space. + std::array buffer; + + tflite::Span output = tflite::hexdump(region, buffer); + TF_LITE_MICRO_EXPECT(output == expected_no_null); +} + +TF_LITE_MICRO_TEST(TestOutputToDebugLog) { + // There's no easy way to verify DebugLog output; however, test it anyhow to + // catch an outright crash, and so the output appears in the log should + // someone wish to examine it. + tflite::hexdump(region); +} + +TF_LITE_MICRO_TESTS_END diff --git a/src/tensorflow/lite/micro/kernels/activations.cpp b/src/tensorflow/lite/micro/kernels/activations.cpp index a92d5c7..6772e47 100644 --- a/src/tensorflow/lite/micro/kernels/activations.cpp +++ b/src/tensorflow/lite/micro/kernels/activations.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/lite/micro/kernels/activations.h" + #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/common.h" @@ -22,144 +24,25 @@ limitations under the License. #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_utils.h" namespace tflite { -namespace ops { -namespace micro { -namespace activations { namespace { -struct ReluOpData { - ReluParams params; -}; - -struct Relu6OpData { - int8_t six_int8; - int8_t zero_int8; - uint8_t six_uint8; - uint8_t zero_uint8; -}; - -} // namespace - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -template -inline void ReluQuantized(const ReluOpData& data, - const RuntimeShape& input_shape, - const RuntimeShape& output_shape, const T* input_data, - T* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const int32_t val = static_cast(input_data[i]); - int32_t clamped = - data.params.output_offset + - MultiplyByQuantizedMultiplier(val - data.params.input_offset, - data.params.output_multiplier, - data.params.output_shift); - clamped = std::max(data.params.quantized_activation_min, clamped); - clamped = std::min(data.params.quantized_activation_max, clamped); - output_data[i] = static_cast(clamped); - } -} - -template -inline void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output, - ReluOpData* data) { - float act_min = 0.0; - float act_max = std::numeric_limits::infinity(); - double real_multiplier = - static_cast(input->params.scale / output->params.scale); - - const RuntimeShape input_shape = GetTensorShape(input); - const RuntimeShape output_shape = GetTensorShape(output); - - QuantizeMultiplier(real_multiplier, &data->params.output_multiplier, - &data->params.output_shift); - - data->params.quantized_activation_min = std::max( - static_cast(std::numeric_limits::min()), - output->params.zero_point + - static_cast(roundf(act_min / output->params.scale))); - data->params.quantized_activation_max = - act_max == std::numeric_limits::infinity() - ? static_cast(std::numeric_limits::max()) - : std::min(static_cast(std::numeric_limits::max()), - output->params.zero_point + - static_cast( - roundf(act_max / output->params.scale))); - data->params.input_offset = input->params.zero_point; - data->params.output_offset = output->params.zero_point; -} - -inline void ReluFloat(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const float val = input_data[i]; - const float lower = 0.0f; - const float clamped = val < lower ? lower : val; - output_data[i] = clamped; - } -} - -inline void Relu6Float(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const float val = input_data[i]; - const float upper = 6.0f; - const float lower = 0.0f; - const float clamped = val > upper ? upper : val < lower ? lower : val; - output_data[i] = clamped; - } -} - -template -inline void Relu6Quantized(Q lower, Q upper, const RuntimeShape& input_shape, - const Q* input_data, - const RuntimeShape& output_shape, Q* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const Q val = input_data[i]; - const Q clamped = val > upper ? upper : val < lower ? lower : val; - output_data[i] = clamped; - } -} - void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(ReluOpData)); } -TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - ReluOpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - if (input->type == kTfLiteInt8) { - CalculateReluOpData(input, output, data); - } else if (input->type == kTfLiteUInt8) { - CalculateReluOpData(input, output, data); - } - - return kTfLiteOk; -} - TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); const ReluOpData& data = *(static_cast(node->user_data)); const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kActivationsInputTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kActivationsOutputTensor); switch (input->type) { case kTfLiteFloat32: { @@ -171,22 +54,15 @@ TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } case kTfLiteInt8: { - ReluQuantized(data, tflite::micro::GetTensorShape(input), + tflite::ReluQuantized(data, tflite::micro::GetTensorShape(input), tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(input), tflite::micro::GetTensorData(output)); return kTfLiteOk; } - case kTfLiteUInt8: { - ReluQuantized(data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } default: { - TF_LITE_KERNEL_LOG(context, "Only float32 is supported currently, got %s", - TfLiteTypeGetName(input->type)); + MicroPrintf("Only float32 is supported currently, got %s", + TfLiteTypeGetName(input->type)); return kTfLiteError; } } @@ -197,34 +73,15 @@ void* Relu6Init(TfLiteContext* context, const char* buffer, size_t length) { return context->AllocatePersistentBuffer(context, sizeof(Relu6OpData)); } -TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - Relu6OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - - if (input->type == kTfLiteInt8) { - data->six_int8 = FloatToQuantizedType(6.0f, input->params.scale, - input->params.zero_point); - data->zero_int8 = input->params.zero_point; - } else if (input->type == kTfLiteUInt8) { - data->six_uint8 = FloatToQuantizedType(6.0f, input->params.scale, - input->params.zero_point); - data->zero_uint8 = input->params.zero_point; - } - - return kTfLiteOk; -} - TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); + const Relu6OpData& data = *(static_cast(node->user_data)); const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kActivationsInputTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kActivationsOutputTensor); switch (input->type) { case kTfLiteFloat32: { @@ -236,53 +93,37 @@ TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } case kTfLiteInt8: { - Relu6Quantized(data.zero_int8, data.six_int8, + Relu6Quantized(data.zero, data.six, tflite::micro::GetTensorShape(input), tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); return kTfLiteOk; } - case kTfLiteUInt8: { - Relu6Quantized(data.zero_uint8, data.six_uint8, + case kTfLiteInt16: { + Relu6Quantized(data.zero, data.six, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); + tflite::micro::GetTensorData(output)); return kTfLiteOk; } default: { - TF_LITE_KERNEL_LOG(context, "Only float32 is supported currently, got %s", - TfLiteTypeGetName(input->type)); + MicroPrintf("Only float32 is supported currently, got %s", + TfLiteTypeGetName(input->type)); return kTfLiteError; } } } -} // namespace activations +} // namespace -TfLiteRegistration Register_RELU() { - return {/*init=*/activations::ReluInit, - /*free=*/nullptr, - /*prepare=*/activations::ReluPrepare, - /*invoke=*/activations::ReluEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_RELU() { + return tflite::micro::RegisterOp(ReluInit, ReluPrepare, ReluEval); } -TfLiteRegistration Register_RELU6() { - return {/*init=*/activations::Relu6Init, - /*free=*/nullptr, - /*prepare=*/activations::Relu6Prepare, - /*invoke=*/activations::Relu6Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_RELU6() { + return tflite::micro::RegisterOp(Relu6Init, Relu6Prepare, Relu6Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/activations.h b/src/tensorflow/lite/micro/kernels/activations.h new file mode 100644 index 0000000..eaf93c2 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/activations.h @@ -0,0 +1,71 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +extern const int kActivationsInputTensor; +extern const int kActivationsOutputTensor; + +struct ReluOpData { + ReluParams params; +}; + +struct Relu6OpData { + int32_t six; + int32_t zero; +}; + +void ReluQuantized(const ReluOpData& data, const RuntimeShape& input_shape, + const RuntimeShape& output_shape, const int8_t* input_data, + int8_t* output_data); + +template +void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output, + ReluOpData* data); + +void ReluFloat(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data); + +void Relu6Float(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data); + +template +void Relu6Quantized(T lower, T upper, const RuntimeShape& input_shape, + const T* input_data, const RuntimeShape& output_shape, + T* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const T val = input_data[i]; + const T clamped = val > upper ? upper : val < lower ? lower : val; + output_data[i] = clamped; + } +} + +TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node); + +TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_ diff --git a/src/tensorflow/lite/micro/kernels/activations_common.cpp b/src/tensorflow/lite/micro/kernels/activations_common.cpp new file mode 100644 index 0000000..9006244 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/activations_common.cpp @@ -0,0 +1,154 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/activations.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +const int kActivationsInputTensor = 0; +const int kActivationsOutputTensor = 0; + +void ReluQuantized(const ReluOpData& data, const RuntimeShape& input_shape, + const RuntimeShape& output_shape, const int8_t* input_data, + int8_t* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const int32_t val = static_cast(input_data[i]); + int32_t clamped = + data.params.output_offset + + MultiplyByQuantizedMultiplier(val - data.params.input_offset, + data.params.output_multiplier, + data.params.output_shift); + clamped = std::max(data.params.quantized_activation_min, clamped); + clamped = std::min(data.params.quantized_activation_max, clamped); + output_data[i] = static_cast(clamped); + } +} + +template +void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output, + ReluOpData* data) { + float act_min = 0.0; + float act_max = std::numeric_limits::infinity(); + double real_multiplier = + static_cast(input->params.scale / output->params.scale); + + const RuntimeShape input_shape = GetTensorShape(input); + const RuntimeShape output_shape = GetTensorShape(output); + + QuantizeMultiplier(real_multiplier, &data->params.output_multiplier, + &data->params.output_shift); + + data->params.quantized_activation_min = std::max( + static_cast(std::numeric_limits::min()), + output->params.zero_point + + static_cast(roundf(act_min / output->params.scale))); + data->params.quantized_activation_max = + act_max == std::numeric_limits::infinity() + ? static_cast(std::numeric_limits::max()) + : std::min(static_cast(std::numeric_limits::max()), + output->params.zero_point + + static_cast( + roundf(act_max / output->params.scale))); + data->params.input_offset = input->params.zero_point; + data->params.output_offset = output->params.zero_point; +} + +void ReluFloat(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const float val = input_data[i]; + const float lower = 0.0f; + const float clamped = val < lower ? lower : val; + output_data[i] = clamped; + } +} + +void Relu6Float(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const float val = input_data[i]; + const float upper = 6.0f; + const float lower = 0.0f; + const float clamped = val > upper ? upper : val < lower ? lower : val; + output_data[i] = clamped; + } +} + +TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + ReluOpData* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kActivationsInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kActivationsOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + if (input->type == kTfLiteInt8) { + CalculateReluOpData(input, output, data); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + + Relu6OpData* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kActivationsInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + + if (input->type == kTfLiteInt8) { + data->zero = input->params.zero_point; + data->six = FloatToQuantizedType(6.0f, input->params.scale, + input->params.zero_point); + TF_LITE_ENSURE(context, data->six >= INT8_MIN && data->six <= INT8_MAX); + } else if (input->type == kTfLiteInt16) { + data->zero = input->params.zero_point; + data->six = FloatToQuantizedType(6.0f, input->params.scale, + input->params.zero_point); + TF_LITE_ENSURE(context, data->six >= INT16_MIN && data->six <= INT16_MAX); + } + + micro_context->DeallocateTempTfLiteTensor(input); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/add.h b/src/tensorflow/lite/micro/kernels/add.h new file mode 100644 index 0000000..267c13c --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/add.h @@ -0,0 +1,78 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +extern const int kAddInputTensor1; +extern const int kAddInputTensor2; +extern const int kAddOutputTensor; + +struct OpDataAdd { + bool requires_broadcast; + + // These fields are used in both the general 8-bit -> 8bit quantized path, + // and the special 16-bit -> 16bit quantized path + int input1_shift; + int input2_shift; + int32_t output_activation_min; + int32_t output_activation_max; + + // These fields are used only in the general 8-bit -> 8bit quantized path + int32_t input1_multiplier; + int32_t input2_multiplier; + int32_t output_multiplier; + int output_shift; + int left_shift; + int32_t input1_offset; + int32_t input2_offset; + int32_t output_offset; + + // Used only for float evals: + float output_activation_min_f32; + float output_activation_max_f32; +}; + +TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + TfLiteTensor* output, OpDataAdd* data); + +TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node); + +// Generic must define registration function. +TFLMRegistration Register_ADD(); + +#if defined(ARDUINO) +TFLMRegistration Register_ADD_INT8(); + +TFLMRegistration Register_ADD_INT16(); +#else +// Fallback registration +inline TFLMRegistration Register_ADD_INT8() { return Register_ADD(); } + +inline TFLMRegistration Register_ADD_INT16() { return Register_ADD(); } +#endif +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ diff --git a/src/tensorflow/lite/micro/kernels/add_common.cpp b/src/tensorflow/lite/micro/kernels/add_common.cpp new file mode 100644 index 0000000..cc94509 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/add_common.cpp @@ -0,0 +1,116 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/add.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/add.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" + +namespace tflite { + +const int kAddInputTensor1 = 0; +const int kAddInputTensor2 = 1; +const int kAddOutputTensor = 0; + +TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + TfLiteTensor* output, OpDataAdd* data) { + data->requires_broadcast = !HaveSameShapes(input1, input2); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TFLITE_CHECK_NE(output->quantization.type, kTfLiteNoQuantization); + + // 8bit -> 8bit general quantized path, with general rescalings + data->input1_offset = -input1->params.zero_point; + data->input2_offset = -input2->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = (output->type == kTfLiteInt16) ? 15 : 20; + const double twice_max_input_scale = + 2 * static_cast( + std::max(input1->params.scale, input2->params.scale)); + const double real_input1_multiplier = + static_cast(input1->params.scale) / twice_max_input_scale; + const double real_input2_multiplier = + static_cast(input2->params.scale) / twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + } else if (output->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, + &data->output_activation_min_f32, + &data->output_activation_max_f32); + } + + return kTfLiteOk; +} + +TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kAddInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kAddInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kAddOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + OpDataAdd* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_STATUS( + CalculateOpDataAdd(context, params, input1, input2, output, data)); + + if (output->type == kTfLiteInt32) { + // Only support int32 unquantized add for now. + TF_LITE_ENSURE_EQ(context, input1->quantization.type, + kTfLiteNoQuantization); + TF_LITE_ENSURE_EQ(context, input2->quantization.type, + kTfLiteNoQuantization); + } + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/add_n.cpp b/src/tensorflow/lite/micro/kernels/add_n.cpp new file mode 100644 index 0000000..765d5d6 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/add_n.cpp @@ -0,0 +1,215 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/add_n.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor0 = 0; +constexpr int kOutputTensor = 0; + +constexpr int kAddNIntegerShift = 20; + +// only used with INT8 tensors +struct OpData { + int32_t output_activation_min; + int32_t output_activation_max; + int32_t input_offset; + int32_t output_offset; + int32_t input_multiplier; + int32_t output_multiplier; + int input_shift; + int output_shift; + int left_shift; + int scratch_index; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + int num_inputs = NumInputs(node); + TF_LITE_ENSURE(context, num_inputs >= 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input_tensor_first = + micro_context->AllocateTempInputTensor(node, kInputTensor0); + TF_LITE_ENSURE(context, input_tensor_first != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + // Check that all tensors have the same shape and type. + TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_tensor_first->type); + for (int i = kInputTensor0 + 1; i < num_inputs; ++i) { + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, i); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, HaveSameShapes(input_tensor_first, input)); + TF_LITE_ENSURE_TYPES_EQ(context, input_tensor_first->type, input->type); + + // Check that all INT8 input tensors have the same zero-point and scale. + if (input_tensor_first->type == kTfLiteInt8) { + TF_LITE_ENSURE(context, input_tensor_first->params.zero_point == + input->params.zero_point); + TF_LITE_ENSURE(context, + input_tensor_first->params.scale == input->params.scale); + } + + micro_context->DeallocateTempTfLiteTensor(input); + } + + if (output->type == kTfLiteFloat32) { + // Allocate scratch buffer space for pointer to each tensor's data + // and store the scratch buffer index in the node's user_data + int scratch_index; + size_t scratch_size = sizeof(float*) * num_inputs; + TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( + context, scratch_size, &scratch_index)); + node->user_data = + reinterpret_castuser_data)>(scratch_index); + } else if (output->type == kTfLiteInt8) { + node->user_data = + context->AllocatePersistentBuffer(context, sizeof(OpData)); + OpData* data = static_cast(node->user_data); + + // Allocate scratch buffer space for pointer to each tensor's data + // and store the scratch buffer index in OpData + size_t scratch_size = sizeof(int8_t*) * num_inputs; + TF_LITE_ENSURE_OK( + context, context->RequestScratchBufferInArena(context, scratch_size, + &data->scratch_index)); + + // 8bit -> 8bit general quantized path, with general rescalings + data->input_offset = -input_tensor_first->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = kAddNIntegerShift; + const double twice_max_input_scale = + 2 * static_cast(input_tensor_first->params.scale); + const double real_input_multiplier = + static_cast(input_tensor_first->params.scale) / + twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input_multiplier, &data->input_multiplier, &data->input_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, kTfLiteActNone, output, &data->output_activation_min, + &data->output_activation_max)); + } else { + MicroPrintf("ADD_N only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input_tensor_first); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +template +inline const T** CopyInputsToScratchBuffer(TfLiteContext* context, + TfLiteNode* node, + const int scratch_index) { + int num_inputs = NumInputs(node); + void* scratch_buffer = context->GetScratchBuffer(context, scratch_index); + const T** all_inputs = static_cast(scratch_buffer); + for (int i = 0; i < num_inputs; i++) { + const TfLiteEvalTensor* next_input = + tflite::micro::GetEvalInput(context, node, kInputTensor0 + i); + all_inputs[i] = tflite::micro::GetTensorData(next_input); + } + + return all_inputs; +} + +template +void EvalAddN(TfLiteContext* context, TfLiteNode* node, + TfLiteEvalTensor* output) { + int num_inputs = NumInputs(node); + + int scratch_index = + static_cast(reinterpret_cast(node->user_data)); + const T** all_inputs = + CopyInputsToScratchBuffer(context, node, scratch_index); + + reference_ops::AddN(tflite::micro::GetTensorShape(output), num_inputs, + all_inputs, tflite::micro::GetTensorData(output)); +} + +template +void EvalAddNQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteEvalTensor* output) { + int num_inputs = NumInputs(node); + + OpData* data = static_cast(node->user_data); + const T** all_inputs = + CopyInputsToScratchBuffer(context, node, data->scratch_index); + + ArithmeticParams params; + params.left_shift = data->left_shift; + params.input1_offset = data->input_offset; + params.input1_multiplier = data->input_multiplier; + params.input1_shift = data->input_shift; + params.output_offset = data->output_offset; + params.output_multiplier = data->output_multiplier; + params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + ¶ms); + + reference_ops::AddN(params, tflite::micro::GetTensorShape(output), num_inputs, + all_inputs, tflite::micro::GetTensorData(output)); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + if (output->type == kTfLiteFloat32) { + EvalAddN(context, node, output); + } else if (output->type == kTfLiteInt8) { + EvalAddNQuantized(context, node, output); + } else { + MicroPrintf("ADD_N only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_ADD_N() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/arg_min_max.cpp b/src/tensorflow/lite/micro/kernels/arg_min_max.cpp index 12ac001..2ba058c 100644 --- a/src/tensorflow/lite/micro/kernels/arg_min_max.cpp +++ b/src/tensorflow/lite/micro/kernels/arg_min_max.cpp @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,15 +17,15 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/comparisons.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/micro_utils.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace arg_min_max { + +namespace { constexpr int kInputTensor = 0; constexpr int kAxis = 1; @@ -36,12 +36,17 @@ inline void ArgMinMaxHelper(const RuntimeShape& input1_shape, const T1* input1_data, const T3* input2_data, const RuntimeShape& output_shape, T2* output_data, bool is_arg_max) { + // Use Greater/Less from comparisons.h (formerly from kernels/micro_utils.h + // which was deprecated). Same as gtl::Greater but used here to reduce + // dependencies and binary size for micro environment. if (is_arg_max) { reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, - output_shape, output_data, micro::Greater()); + output_shape, output_data, + reference_ops::GreaterFn); } else { reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, - output_shape, output_data, micro::Less()); + output_shape, output_data, + reference_ops::LessFn); } } @@ -66,28 +71,24 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); break; - case kTfLiteUInt8: - TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); - break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); break; default: - TF_LITE_KERNEL_LOG(context, - "Only float32, uint8_t and int8_t are " - "supported currently, got %s.", - TfLiteTypeGetName(input->type)); + MicroPrintf( + "Only float32, uint8_t and int8_t are " + "supported currently, got %s.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } } else { - TF_LITE_KERNEL_LOG(context, - "Only int32_t are supported currently, got %s.", - TfLiteTypeGetName(output->type)); + MicroPrintf("Only int32_t are supported currently, got %s.", + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { - TF_LITE_KERNEL_LOG(context, "Only int32_t are supported currently, got %s.", - TfLiteTypeGetName(axis->type)); + MicroPrintf("Only int32_t are supported currently, got %s.", + TfLiteTypeGetName(axis->type)); return kTfLiteError; } @@ -104,30 +105,14 @@ TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { return Eval(context, node, true); } -} // namespace arg_min_max - -TfLiteRegistration Register_ARG_MAX() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/arg_min_max::ArgMaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +} // namespace + +TFLMRegistration Register_ARG_MAX() { + return tflite::micro::RegisterOp(nullptr, nullptr, ArgMaxEval); } -TfLiteRegistration Register_ARG_MIN() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/arg_min_max::ArgMinEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_ARG_MIN() { + return tflite::micro::RegisterOp(nullptr, nullptr, ArgMinEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/assign_variable.cpp b/src/tensorflow/lite/micro/kernels/assign_variable.cpp new file mode 100644 index 0000000..9374279 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/assign_variable.cpp @@ -0,0 +1,154 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_resource_variable.h" +#include "tensorflow/lite/micro/micro_utils.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +constexpr int kInputVariableId = 0; +constexpr int kInputValue = 1; + +#ifdef USE_TFLM_COMPRESSION + +struct OpData { + // scratch buffer for compressed input tensor + int scratch_index; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +#endif // USE_TFLM_COMPRESSION + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); + + // This must be a TfLiteEvalTensor despite this being in Prepare, because + // CreateTensor allocates a temp tensor from the flatbuffer, which does not + // contain the correct ID generated within the VAR_HANDLE op. EvalTensors are + // all allocated during StartModelAllocation which happens before + // init/prepare, and VAR_HANDLE Prepare() references its own op_data in the + // TfLiteEvalTensor, so reading the ID here is valid. + const TfLiteEvalTensor* input_resource_id_tensor = + tflite::micro::GetEvalInput(context, node, kInputVariableId); + TFLITE_DCHECK(input_resource_id_tensor != nullptr); + TF_LITE_ENSURE(context, (input_resource_id_tensor->type == kTfLiteResource || + input_resource_id_tensor->type == kTfLiteInt32)); + TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor->dims), 1); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + TfLiteTensor* input_value = + micro_context->AllocateTempInputTensor(node, kInputValue); + TFLITE_DCHECK(input_value != nullptr); + + MicroGraph& graph_info = micro_context->graph(); + + MicroResourceVariables* resources = graph_info.GetResourceVariables(); + // If the data field of this tensor is nullptr, we assume that this is a case + // of using resource variables in another subgraph, and the resource_id + // will be valid during Eval time. In case it wasn't valid, this will + // still be caught during Invoke. More info in b/277231654. + if (input_resource_id_tensor->data.i32 != nullptr) { + TF_LITE_ENSURE_OK(context, + resources->Allocate(input_resource_id_tensor->data.i32[0], + context, input_value)); + } + +#ifdef USE_TFLM_COMPRESSION + + TFLITE_DCHECK(node->user_data != nullptr); + OpData* data = static_cast(node->user_data); + // Compression scratch buffers. + // These will only be allocated if the tensor is compressed. + data->scratch_index = + micro_context->AllocateDecompressionScratchBuffer(node, kInputValue); + +#endif // USE_TFLM_COMPRESSION + + micro_context->DeallocateTempTfLiteTensor(input_value); + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input_id = + tflite::micro::GetEvalInput(context, node, kInputVariableId); + TFLITE_DCHECK(input_id != nullptr); + + const TfLiteEvalTensor* input_value = + tflite::micro::GetEvalInput(context, node, kInputValue); + TFLITE_DCHECK(input_value != nullptr); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + MicroResourceVariables* resources = graph_info.GetResourceVariables(); + if (resources == nullptr) { + MicroPrintf( + "ASSIGN_VARIABLE requires resource variables. Please create " + "ResourceVariables and pass it to the interpreter."); + return kTfLiteError; + } + +#ifdef USE_TFLM_COMPRESSION + OpData* data = static_cast(node->user_data); + const CompressionTensorData* comp_td = + micro_context->GetTensorCompressionData(node, kInputValue); + const void* buffer = tflite::micro::GetTensorData( + micro_context, input_value, comp_td, data->scratch_index); +#else // USE_TFLM_COMPRESSION + const void* buffer = tflite::micro::GetTensorData(input_value); +#endif // USE_TFLM_COMPRESSION + + TF_LITE_ENSURE_OK(context, + resources->Assign(input_id->data.i32[0], + EvalTensorBytes(input_value), buffer)); + return kTfLiteOk; +} + +} // namespace. + +#ifdef USE_TFLM_COMPRESSION + +TFLMRegistration Register_ASSIGN_VARIABLE() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); + +#else // USE_TFLM_COMPRESSION + +TFLMRegistration Register_ASSIGN_VARIABLE() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); + +#endif // USE_TFLM_COMPRESSION +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/batch_matmul.h b/src/tensorflow/lite/micro/kernels/batch_matmul.h new file mode 100644 index 0000000..d5d0bfc --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/batch_matmul.h @@ -0,0 +1,176 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_BATCH_MATMUL_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_BATCH_MATMUL_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/reference/transpose.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_common.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +extern constexpr int kBatchMatmulInputLhsTensor = 0; +extern constexpr int kBatchMatmulInputRhsTensor = 1; +extern constexpr int kBatchMatmulOutputTensor = 0; + +struct QuantizationOpDataBatchMatmul { + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; // exponent + + // The range of the fused activation layer. For example for kNone and + // int8_t these would be -128 and 127. + int32_t output_activation_min; + int32_t output_activation_max; + + int32_t lhs_zero_point; + int32_t rhs_zero_point; + int32_t output_zero_point; +}; + +struct OpDataBatchMatmul { + QuantizationOpDataBatchMatmul* quantization; + + // Transpose tensors and state + TfLiteEvalTensor* lhs_transposed_tensor; + TfLiteEvalTensor* rhs_transposed_tensor; + bool rhs_is_transposed; + bool lhs_is_constant_tensor; + bool rhs_is_constant_tensor; +}; + +TfLiteStatus ReshapeOutputTensor(TfLiteContext* context, TfLiteNode* node, + const RuntimeShape& extended_lhs_shape, + const RuntimeShape& extended_rhs_shape, + bool adj_x, bool adj_y, int output_rank, + TfLiteTensor* output) { + int64_t orig_size = NumElements(output); + + // make sure the new output dims rank does not exceed the original rank + TF_LITE_ENSURE(context, output_rank <= NumDimensions(output)); + + // make sure output tensor dims are not in the FlatBuffer + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kBatchMatmulOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + // Fill in any broadcast dimensions. + for (int i = 0; i < output_rank - 2; ++i) { + const int lhs_dim = extended_lhs_shape.Dims(i); + const int rhs_dim = extended_rhs_shape.Dims(i); + int broadcast_dim = lhs_dim; + if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) { + broadcast_dim = rhs_dim; + } + output->dims->data[i] = broadcast_dim; + } + // Fill in the matmul dimensions. + int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2; + int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1; + + output->dims->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index); + output->dims->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index); + output->dims->size = output_rank; + + // Check that output tensor has not been resized + // since TFLM doesn't support tensor resizing. + TF_LITE_ENSURE_EQ(context, orig_size, NumElements(output)); + + return kTfLiteOk; +} + +template +void TransposeRowsColumnsImpl(const TfLiteEvalTensor& tensor_in, + TfLiteEvalTensor* tensor_out) { + const T* input = tflite::micro::GetTensorData(&tensor_in); + T* output = tflite::micro::GetTensorData(tensor_out); + RuntimeShape transposed_shape(tflite::micro::GetTensorShape(&tensor_in)); + RuntimeShape shape(transposed_shape); + TransposeParams params; + const int rank = shape.DimensionsCount(); + params.perm_count = rank; + for (int i = 0; i < rank - 2; ++i) { + params.perm[i] = i; + } + // Transpose the last two dimensions. + params.perm[rank - 2] = rank - 1; + params.perm[rank - 1] = rank - 2; + transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2)); + transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1)); + reference_ops::Transpose(params, shape, input, transposed_shape, output); +} + +TfLiteStatus TransposeRowsColumns(const TfLiteEvalTensor& tensor_in, + TfLiteEvalTensor* tensor_out) { + if (tensor_in.type == kTfLiteFloat32) { + TransposeRowsColumnsImpl(tensor_in, tensor_out); + return kTfLiteOk; + } else if (tensor_in.type == kTfLiteInt8) { + TransposeRowsColumnsImpl(tensor_in, tensor_out); + return kTfLiteOk; + } else if (tensor_in.type == kTfLiteInt16) { + TransposeRowsColumnsImpl(tensor_in, tensor_out); + return kTfLiteOk; + } else { + MicroPrintf( + "BATCH_MATMUL can only transpose tensors with FLOAT32, INT8, INT16 " + "type."); + } + return kTfLiteError; +} + +RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) { + RuntimeShape swapped_shape(shape); + const int32_t dims = shape.DimensionsCount(); + swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1)); + swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2)); + return swapped_shape; +} + +TFLMRegistration Register_BATCH_MATMUL(); + +#if defined(ARDUINO) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 matrix multiplication and uses the latency optimized +// implementations. +TFLMRegistration Register_BATCH_MATMUL_INT8(); + +// Returns a TFLMRegistration struct for kernel variant that only supports +// int16 matrix multiplication and uses the latency optimized +// implementations. +TFLMRegistration Register_BATCH_MATMUL_INT16(); + +#else +inline TFLMRegistration Register_BATCH_MATMUL_INT8() { + return Register_BATCH_MATMUL(); +} + +inline TFLMRegistration Register_BATCH_MATMUL_INT16() { + return Register_BATCH_MATMUL(); +} +#endif // defined(ARDUINO) + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_BATCH_MATMUL_H_ diff --git a/src/tensorflow/lite/micro/kernels/batch_to_space_nd.cpp b/src/tensorflow/lite/micro/kernels/batch_to_space_nd.cpp new file mode 100644 index 0000000..31a1c28 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/batch_to_space_nd.cpp @@ -0,0 +1,113 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kBlockShapeTensor = 1; +constexpr int kCropsTensor = 2; +constexpr int kOutputTensor = 0; + +// Currently, only 3D NHC and 4D NHWC input/output op_context are supported. +// In case of 3D input, it will be extended to 3D NHWC by adding W=1. +// The 4D array need to have exactly 2 spatial dimensions. +// TODO(b/149952582): Support arbitrary dimension in SpaceToBatchND. +const int kInputOutputMinDimensionNum = 3; +const int kInputOutputMaxDimensionNum = 4; + +TfLiteStatus BatchToSpaceNDPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr && output != nullptr); + + TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(output) >= kInputOutputMinDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(input) <= kInputOutputMaxDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus BatchToSpaceNDEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* block_shape = + tflite::micro::GetEvalInput(context, node, kBlockShapeTensor); + const TfLiteEvalTensor* crops = + tflite::micro::GetEvalInput(context, node, kCropsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + reference_ops::BatchToSpaceND( + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(block_shape), + tflite::micro::GetTensorData(block_shape), + tflite::micro::GetTensorShape(crops), + tflite::micro::GetTensorData(crops), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::BatchToSpaceND( + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(block_shape), + tflite::micro::GetTensorData(block_shape), + tflite::micro::GetTensorShape(crops), + tflite::micro::GetTensorData(crops), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace. + +TFLMRegistration Register_BATCH_TO_SPACE_ND() { + return tflite::micro::RegisterOp(nullptr, BatchToSpaceNDPrepare, + BatchToSpaceNDEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/broadcast_args.cpp b/src/tensorflow/lite/micro/kernels/broadcast_args.cpp new file mode 100644 index 0000000..283410a --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/broadcast_args.cpp @@ -0,0 +1,91 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/broadcast_args.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_context.h" + +namespace tflite { +namespace { +constexpr int kShape1Tensor = 0; +constexpr int kShape2Tensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus BroadcastArgsPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE(context, NumInputs(node) == 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* shape1 = + micro_context->AllocateTempInputTensor(node, kShape1Tensor); + TfLiteTensor* shape2 = + micro_context->AllocateTempInputTensor(node, kShape2Tensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE(context, + shape1->type == kTfLiteInt32 || shape1->type == kTfLiteInt64); + TF_LITE_ENSURE_EQ(context, shape1->type, shape2->type); + TF_LITE_ENSURE_EQ(context, shape1->type, output->type); + + // Ensures the shapes are 1D tensor. + TF_LITE_ENSURE_EQ(context, NumDimensions(shape1), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(shape2), 1); + + // Ensure the shape of the output tensor is compatible + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + + micro_context->DeallocateTempTfLiteTensor(shape1); + micro_context->DeallocateTempTfLiteTensor(shape2); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus BroadcastArgsEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* shape1 = + micro::GetEvalInput(context, node, kShape1Tensor); + const TfLiteEvalTensor* shape2 = + micro::GetEvalInput(context, node, kShape2Tensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteInt32) { + reference_ops::BroadcastArgs( + micro::GetTensorShape(shape1), micro::GetTensorData(shape1), + micro::GetTensorShape(shape2), micro::GetTensorData(shape2), + micro::GetTensorShape(output), micro::GetTensorData(output)); + } else { + reference_ops::BroadcastArgs( + micro::GetTensorShape(shape1), micro::GetTensorData(shape1), + micro::GetTensorShape(shape2), micro::GetTensorData(shape2), + micro::GetTensorShape(output), micro::GetTensorData(output)); + } + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_BROADCAST_ARGS() { + return tflite::micro::RegisterOp(nullptr, BroadcastArgsPrepare, + BroadcastArgsEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/broadcast_to.cpp b/src/tensorflow/lite/micro/kernels/broadcast_to.cpp new file mode 100644 index 0000000..61deaa3 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/broadcast_to.cpp @@ -0,0 +1,123 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/broadcast_to.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_context.h" + +namespace tflite { + +namespace { +constexpr int kInputTensor = 0; +constexpr int kShapeTensor = 1; +constexpr int kOutputTensor = 0; +// Support a maximum of 5 dimensions in TFLM. +constexpr int kMaxDims = 5; + +TfLiteStatus ValidateOutputTensor(TfLiteContext* context, TfLiteTensor* input, + TfLiteTensor* shape, TfLiteTensor* output) { + // Ensures the shape is 1D tensor. + TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1); + + // Ensure output dims is not less than input dims. + int input_num_dims = NumDimensions(input); + int output_num_dims = NumDimensions(output); + int shape_num_dims = SizeOfDimension(shape, 0); + TF_LITE_ENSURE_MSG(context, output_num_dims == shape_num_dims, + "Output must match with the expected shape dimension."); + TF_LITE_ENSURE_MSG(context, input_num_dims <= output_num_dims, + "Output shape must be broadcastable from input shape."); + TF_LITE_ENSURE_MSG(context, output_num_dims <= kMaxDims, + "BroadcastTo only supports 1-5D tensor."); + + // Check if output shape is broadcastable from input shape. + auto get_shape_data = [shape](int i) -> int32_t { + if (shape->type == kTfLiteInt32) { + return GetTensorData(shape)[i]; + } else { + return GetTensorData(shape)[i]; + } + }; + + int extending_dims = output_num_dims - input_num_dims; + for (int idx = 0; idx < input_num_dims; ++idx) { + TF_LITE_ENSURE_MSG( + context, + (SizeOfDimension(input, idx) == 1 || + SizeOfDimension(input, idx) == get_shape_data(extending_dims + idx)), + "Output shape must be broadcastable from input shape."); + } + + // Validating the shape of the output tensor. + tflite::RuntimeShape output_shape = tflite::GetTensorShape(output); + for (int idx = 0; idx < output_num_dims; ++idx) { + TF_LITE_ENSURE(context, output_shape.Dims(idx) == get_shape_data(idx)); + } + return kTfLiteOk; +} + +TfLiteStatus BroadcastToPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE(context, NumInputs(node) == 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* shape = + micro_context->AllocateTempInputTensor(node, kShapeTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE_MSG(context, (NumDimensions(input) <= kMaxDims), + "BroadcastTo only supports 1-5D tensor."); + + TF_LITE_ENSURE(context, + shape->type == kTfLiteInt32 || shape->type == kTfLiteInt64); + TF_LITE_ENSURE_EQ(context, input->type, output->type); + + // Does not support String type due to its variable size. This limitation is + // the same as TFLite. + TF_LITE_ENSURE(context, input->type != kTfLiteString); + + TF_LITE_ENSURE_STATUS(ValidateOutputTensor(context, input, shape, output)); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(shape); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus BroadcastToEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + // BroadcastTo op support up to 5 dims, different from 8 dims in TFLite. + reference_ops::BroadcastTo( + micro::GetTensorShape(input), input->data.raw, + micro::GetTensorShape(output), output->data.raw, input->type); + return kTfLiteOk; +} +} // namespace + +TFLMRegistration Register_BROADCAST_TO() { + return tflite::micro::RegisterOp(nullptr, BroadcastToPrepare, + BroadcastToEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/call_once.cpp b/src/tensorflow/lite/micro/kernels/call_once.cpp new file mode 100644 index 0000000..65857ef --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/call_once.cpp @@ -0,0 +1,88 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +struct OpData { + int init_subgraph_index; + bool has_run; +}; + +void* CallOnceInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus CallOncePrepare(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + const auto* params = + reinterpret_cast(node->builtin_data); + op_data->init_subgraph_index = params->init_subgraph_index; + op_data->has_run = false; + + TF_LITE_ENSURE(context, NumInputs(node) == 0); + TF_LITE_ENSURE(context, NumOutputs(node) == 0); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + TF_LITE_ENSURE(context, + op_data->init_subgraph_index < graph_info.NumSubgraphs()); + + return kTfLiteOk; +} + +TfLiteStatus CallOnceEval(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + + // Call once only runs one time then is a no-op for every subsequent call. + if (op_data->has_run) { + return kTfLiteOk; + } + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + TF_LITE_ENSURE_OK(context, + graph_info.InvokeSubgraph(op_data->init_subgraph_index)); + + op_data->has_run = true; + + return kTfLiteOk; +} + +} // namespace. + +TFLMRegistration Register_CALL_ONCE() { + return tflite::micro::RegisterOp(CallOnceInit, CallOncePrepare, CallOnceEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cast.cpp b/src/tensorflow/lite/micro/kernels/cast.cpp new file mode 100644 index 0000000..0b450d6 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cast.cpp @@ -0,0 +1,120 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus CastPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +template +void copyCast(const FromT* in, ToT* out, int num_elements) { + std::transform(in, in + num_elements, out, + [](FromT a) { return static_cast(a); }); +} + +template +TfLiteStatus copyToTensor(TfLiteContext* context, const FromT* in, + TfLiteEvalTensor* out, int num_elements) { + switch (out->type) { + case kTfLiteInt8: + copyCast(in, out->data.int8, num_elements); + break; + case kTfLiteInt16: + copyCast(in, out->data.i16, num_elements); + break; + case kTfLiteInt32: + copyCast(in, out->data.i32, num_elements); + break; + case kTfLiteUInt32: + copyCast(in, out->data.u32, num_elements); + break; + case kTfLiteFloat32: + copyCast(in, tflite::micro::GetTensorData(out), num_elements); + break; + default: + // Unsupported type. + MicroPrintf("Output type %s (%d) not supported.", + TfLiteTypeGetName(out->type), out->type); + } + return kTfLiteOk; +} + +TfLiteStatus CastEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + int num_elements = MatchingFlatSize(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output)); + + switch (input->type) { + case kTfLiteInt8: + return copyToTensor(context, input->data.int8, output, num_elements); + case kTfLiteInt16: + return copyToTensor(context, tflite::micro::GetTensorData(input), + output, num_elements); + case kTfLiteInt32: + return copyToTensor(context, tflite::micro::GetTensorData(input), + output, num_elements); + case kTfLiteUInt32: + return copyToTensor(context, + tflite::micro::GetTensorData(input), output, + num_elements); + case kTfLiteFloat32: + return copyToTensor(context, tflite::micro::GetTensorData(input), + output, num_elements); + case kTfLiteBool: + return copyToTensor(context, tflite::micro::GetTensorData(input), + output, num_elements); + default: + // Unsupported type. + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + } + return kTfLiteOk; +} +} // namespace + +TFLMRegistration Register_CAST() { + return tflite::micro::RegisterOp(nullptr, CastPrepare, CastEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/ceil.cpp b/src/tensorflow/lite/micro/kernels/ceil.cpp index f929ce6..36139f9 100644 --- a/src/tensorflow/lite/micro/kernels/ceil.cpp +++ b/src/tensorflow/lite/micro/kernels/ceil.cpp @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,17 +21,20 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/kernel_util.h" namespace tflite { -namespace ops { -namespace micro { -namespace ceil { + +namespace { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); +TfLiteStatus CeilPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -42,10 +45,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < output->dims->size; ++i) { TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus CeilEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); TfLiteEvalTensor* output = @@ -58,19 +63,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } -} // namespace ceil - -TfLiteRegistration Register_CEIL() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ceil::Prepare, - /*invoke=*/ceil::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + +} // namespace + +TFLMRegistration Register_CEIL() { + return tflite::micro::RegisterOp(nullptr, CeilPrepare, CeilEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/circular_buffer.cpp b/src/tensorflow/lite/micro/kernels/circular_buffer.cpp index 5ce8dbe..3e90104 100644 --- a/src/tensorflow/lite/micro/kernels/circular_buffer.cpp +++ b/src/tensorflow/lite/micro/kernels/circular_buffer.cpp @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/lite/micro/kernels/circular_buffer.h" + #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" @@ -20,7 +22,9 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" /* * The circular buffer custom operator is used to implement strided streaming @@ -44,77 +48,22 @@ limitations under the License. * - Input and output quantization params must be identical. */ namespace tflite { -namespace ops { -namespace micro { -namespace circular_buffer { - -namespace { - -// The CircularBuffer op has one input and one output tensor. -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -// TODO(b/149795762): Add this to TfLiteStatus enum. -constexpr int kTfLiteAbort = -9; - -// These fields control the stride period of a strided streaming model. This op -// returns kTfLiteAbort until cycles_until_run-- is zero. At this time, -// cycles_until_run is reset to cycles_max. -struct OpData { - int cycles_until_run; - int cycles_max; -}; -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* CircularBufferInit(TfLiteContext* context, const char* buffer, + size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + OpDataCircularBuffer* op_data = static_cast( + context->AllocatePersistentBuffer(context, sizeof(OpDataCircularBuffer))); - TFLITE_DCHECK(node->user_data != nullptr); - OpData* op_data = static_cast(node->user_data); - - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]); - TF_LITE_ENSURE_EQ(context, 1, input->dims->data[1]); - TF_LITE_ENSURE_EQ(context, input->dims->data[2], output->dims->data[2]); - TF_LITE_ENSURE_EQ(context, output->dims->data[3], input->dims->data[3]); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - // The circular buffer custom operator currently only supports int8. - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); - - // The last circular buffer layer simply accumulates outputs, and does not run - // periodically. - // TODO(b/150001379): Move this special case logic to the tflite flatbuffer. - static int cb_prepare_count = 0; - cb_prepare_count++; - // These checks specifically work for the only two streaming models supported - // on TFLM. They use the shape of the output tensor along with the layer - // number to determine if the circular buffer period should be 1 or 2. - - // These models are outlined int the following documents: - // https://docs.google.com/document/d/1lc_G2ZFhjiKFo02UHjBaljye1xsL0EkfybkaVELEE3Q/edit?usp=sharing - // https://docs.google.com/document/d/1pGc42PuWyrk-Jy1-9qeqtggvsmHr1ifz8Lmqfpr2rKA/edit?usp=sharing - if (output->dims->data[1] == 5 || output->dims->data[1] == 13 || - (cb_prepare_count == 5 && output->dims->data[2] == 2 && - output->dims->data[3] == 96)) { - op_data->cycles_max = 1; - cb_prepare_count = 0; + if (buffer != nullptr && length > 0) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + tflite::FlexbufferWrapper wrapper(buffer_t, length); + op_data->cycles_max = wrapper.ElementAsInt32(kCircularBufferCyclesMaxIndex); } else { - op_data->cycles_max = 2; + op_data->cycles_max = 0; } - op_data->cycles_until_run = op_data->cycles_max; - node->user_data = op_data; - return kTfLiteOk; + return op_data; } // Shifts buffer over by the output depth, and write new input to end of buffer. @@ -125,14 +74,15 @@ void EvalInt8(const int8_t* input, int num_slots, int depth, int8_t* output) { memcpy(&output[(num_slots - 1) * depth], input, depth); } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus CircularBufferEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kCircularBufferInputTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kCircularBufferOutputTensor); TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = reinterpret_cast(node->user_data); + OpDataCircularBuffer* data = + reinterpret_cast(node->user_data); int num_slots = output->dims->data[1]; int depth = output->dims->data[2] * output->dims->data[3]; @@ -141,7 +91,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { EvalInt8(tflite::micro::GetTensorData(input), num_slots, depth, tflite::micro::GetTensorData(output)); } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), input->type); return kTfLiteError; } @@ -149,8 +99,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { if (--data->cycles_until_run != 0) { // Signal the interpreter to end current run if the delay before op invoke // has not been reached. - // TODO(b/149795762): Add kTfLiteAbort to TfLiteStatus enum. - return static_cast(kTfLiteAbort); + return kTfLiteAbort; } data->cycles_until_run = data->cycles_max; @@ -158,20 +107,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } -} // namespace circular_buffer - -TfLiteRegistration* Register_CIRCULAR_BUFFER() { - static TfLiteRegistration r = {/*init=*/circular_buffer::Init, - /*free=*/nullptr, - /*prepare=*/circular_buffer::Prepare, - /*invoke=*/circular_buffer::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration* Register_CIRCULAR_BUFFER() { + static TFLMRegistration r = tflite::micro::RegisterOp( + CircularBufferInit, CircularBufferPrepare, CircularBufferEval); return &r; } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/circular_buffer.h b/src/tensorflow/lite/micro/kernels/circular_buffer.h new file mode 100644 index 0000000..c0a55b8 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/circular_buffer.h @@ -0,0 +1,45 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +// The CircularBuffer op has one input and one output tensor. +extern const int kCircularBufferInputTensor; +extern const int kCircularBufferOutputTensor; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +extern const int kCircularBufferCyclesMaxIndex; // 'cycles_max' + +// These fields control the stride period of a strided streaming model. This op +// returns kTfLiteAbort until cycles_until_run-- is zero. At this time, +// cycles_until_run is reset to cycles_max. +struct OpDataCircularBuffer { + int cycles_until_run; + int cycles_max; +}; + +TfLiteStatus CircularBufferPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_ diff --git a/src/tensorflow/lite/micro/kernels/circular_buffer_common.cpp b/src/tensorflow/lite/micro/kernels/circular_buffer_common.cpp new file mode 100644 index 0000000..bf45c06 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/circular_buffer_common.cpp @@ -0,0 +1,94 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/kernels/circular_buffer.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +// The CircularBuffer op has one input and one output tensor. +const int kCircularBufferInputTensor = 0; +const int kCircularBufferOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +const int kCircularBufferCyclesMaxIndex = 0; // 'cycles_max' + +TfLiteStatus CircularBufferPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kCircularBufferInputTensor); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor( + node, kCircularBufferOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataCircularBuffer* op_data = + static_cast(node->user_data); + + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]); + TF_LITE_ENSURE_EQ(context, 1, input->dims->data[1]); + TF_LITE_ENSURE_EQ(context, input->dims->data[2], output->dims->data[2]); + TF_LITE_ENSURE_EQ(context, output->dims->data[3], input->dims->data[3]); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + // The circular buffer custom operator currently only supports int8. + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); + + if (op_data->cycles_max <= 0) { + // The last circular buffer layer simply accumulates outputs, and does not + // run periodically. + // TODO(b/150001379): Move this special case logic to the tflite flatbuffer. + static int cb_prepare_count = 0; + cb_prepare_count++; + // These checks specifically work for the only two streaming models + // supported on TFLM. They use the shape of the output tensor along with the + // layer number to determine if the circular buffer period should be 1 or 2. + + // These models are outlined int the following documents: + // https://docs.google.com/document/d/1lc_G2ZFhjiKFo02UHjBaljye1xsL0EkfybkaVELEE3Q/edit?usp=sharing + // https://docs.google.com/document/d/1pGc42PuWyrk-Jy1-9qeqtggvsmHr1ifz8Lmqfpr2rKA/edit?usp=sharing + if (output->dims->data[1] == 5 || output->dims->data[1] == 13 || + output->dims->data[1] == 25 || + (cb_prepare_count == 5 && output->dims->data[2] == 2 && + output->dims->data[3] == 96)) { + op_data->cycles_max = 1; + cb_prepare_count = 0; + } else { + op_data->cycles_max = 2; + } + } + op_data->cycles_until_run = op_data->cycles_max; + node->user_data = op_data; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h b/src/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h new file mode 100644 index 0000000..2fbf4fe --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h @@ -0,0 +1,22 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H +#define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H + +extern const int g_gen_data_size_circular_buffer_config; +extern const unsigned char g_gen_data_circular_buffer_config[]; + +#endif diff --git a/src/tensorflow/lite/micro/kernels/cmsis-nn/add.cpp b/src/tensorflow/lite/micro/kernels/cmsis-nn/add.cpp deleted file mode 100644 index 2816e11..0000000 --- a/src/tensorflow/lite/micro/kernels/cmsis-nn/add.cpp +++ /dev/null @@ -1,246 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/add.h" - -#include "CMSIS/NN/Include/arm_nnfunctions.h" -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace add { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - bool requires_broadcast; - - // These fields are used in both the general 8-bit -> 8bit quantized path, - // and the special 16-bit -> 16bit quantized path - int input1_shift; - int input2_shift; - int32_t output_activation_min; - int32_t output_activation_max; - - // These fields are used only in the general 8-bit -> 8bit quantized path - int32_t input1_multiplier; - int32_t input2_multiplier; - int32_t output_multiplier; - int output_shift; - int left_shift; - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, - const TfLiteTensor* input1, - const TfLiteTensor* input2, TfLiteTensor* output, - OpData* data) { - data->requires_broadcast = !HaveSameShapes(input1, input2); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - // 8bit -> 8bit general quantized path, with general rescalings - data->input1_offset = -input1->params.zero_point; - data->input2_offset = -input2->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = 20; - const double twice_max_input_scale = - 2 * static_cast( - std::max(input1->params.scale, input2->params.scale)); - const double real_input1_multiplier = - static_cast(input1->params.scale) / twice_max_input_scale; - const double real_input2_multiplier = - static_cast(input2->params.scale) / twice_max_input_scale; - const double real_output_multiplier = - twice_max_input_scale / - ((1 << data->left_shift) * static_cast(output->params.scale)); - - QuantizeMultiplierSmallerThanOneExp( - real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - } - - return kTfLiteOk; -} - -void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, - const OpData* data, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - tflite::ArithmeticParams op_params; - SetActivationParams(output_activation_min, output_activation_max, &op_params); -#define TF_LITE_ADD(opname) \ - reference_ops::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - if (data->requires_broadcast) { - TF_LITE_ADD(BroadcastAdd4DSlow); - } else { - TF_LITE_ADD(Add); - } -#undef TF_LITE_ADD -} - -TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteAddParams* params, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - tflite::ArithmeticParams op_params; - op_params.left_shift = data->left_shift; - op_params.input1_offset = data->input1_offset; - op_params.input1_multiplier = data->input1_multiplier; - op_params.input1_shift = data->input1_shift; - op_params.input2_offset = data->input2_offset; - op_params.input2_multiplier = data->input2_multiplier; - op_params.input2_shift = data->input2_shift; - op_params.output_offset = data->output_offset; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); -#define TF_LITE_ADD(type, opname, dtype) \ - type::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)); - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - TF_LITE_ADD(reference_integer_ops, BroadcastAdd4DSlow, int8_t); - } else { - arm_elementwise_add_s8( - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorData(input2), - op_params.input1_offset, op_params.input1_multiplier, - op_params.input1_shift, op_params.input2_offset, - op_params.input2_multiplier, op_params.input2_shift, - op_params.left_shift, tflite::micro::GetTensorData(output), - op_params.output_offset, op_params.output_multiplier, - op_params.output_shift, op_params.quantized_activation_min, - op_params.quantized_activation_max, - MatchingElementsSize(tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorShape(output))); - } - } else { - if (need_broadcast) { - TF_LITE_ADD(reference_ops, BroadcastAdd4DSlow, uint8_t); - } else { - TF_LITE_ADD(reference_ops, Add, uint8_t); - } - } -#undef TF_LITE_ADD - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - TF_LITE_ENSURE_STATUS( - CalculateOpData(context, params, input1, input2, output, data)); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - if (output->type == kTfLiteFloat32) { - EvalAdd(context, node, params, data, input1, input2, output); - } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, - input1, input2, output)); - } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace add - -TfLiteRegistration Register_ADD() { - return {/*init=*/add::Init, - /*free=*/nullptr, - /*prepare=*/add::Prepare, - /*invoke=*/add::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis-nn/conv.cpp b/src/tensorflow/lite/micro/kernels/cmsis-nn/conv.cpp deleted file mode 100644 index f74a862..0000000 --- a/src/tensorflow/lite/micro/kernels/cmsis-nn/conv.cpp +++ /dev/null @@ -1,460 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/conv.h" - -#include "CMSIS/NN/Include/arm_nn_types.h" -#include "CMSIS/NN/Include/arm_nnfunctions.h" -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Conv is quantized along dimension 0: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kConvQuantizedDimension = 0; - -struct OpData { - TfLitePaddingValues padding; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - // Per channel output multiplier and shift. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; - - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; - - // Index to buffer for optimizations if applicable. - int buffer_idx; -}; - -inline PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - const TfLiteConvParams* params, int width, - int height, int filter_width, int filter_height, - int out_width, int out_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Matching GetWindowedOutputSize in TensorFlow. - auto padding = params->padding; - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - params->dilation_height_factor, params->dilation_width_factor, height, - width, filter_height, filter_width, padding, &out_height, &out_width); - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - int num_channels = filter->dims->data[kConvQuantizedDimension]; - - TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->output_multiplier, &data->output_shift, - &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), num_channels)); - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - int32_t buf_size = 0; - const auto params = static_cast(node->builtin_data); - OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - const TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - RuntimeShape input_shape = GetTensorShape(input); - RuntimeShape output_shape = GetTensorShape(output); - - // Initialize cmsis-nn input dimensions - cmsis_nn_dims input_dims; - input_dims.n = MatchingDim(input_shape, 0, output_shape, 0); - input_dims.h = input->dims->data[1]; - input_dims.w = input->dims->data[2]; - input_dims.c = input_shape.Dims(3); - - // Initialize cmsis-nn filter dimensions - cmsis_nn_dims filter_dims; - filter_dims.n = output_shape.Dims(3); - filter_dims.h = filter->dims->data[1]; - filter_dims.w = filter->dims->data[2]; - filter_dims.c = input_dims.c; - - // Initialize cmsis-nn output dimensions - cmsis_nn_dims output_dims; - output_dims.n = input_dims.n; - output_dims.h = output->dims->data[1]; - output_dims.w = output->dims->data[2]; - output_dims.c = output_shape.Dims(3); - - // Dynamically allocate per-channel quantization parameters. - // TODO(#42883): This allocation is done even for non-int8 cases to get around - // a bug in kernel_utils.cc which incorrectly uses per_channel_output_shift in - // non-int8 cases. Protect this section with a if (input->type == kTfLiteInt8) - // when the issue is fixed. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - TF_LITE_ENSURE_STATUS(CalculateOpData( - context, node, params, input_dims.w, input_dims.h, filter_dims.w, - filter_dims.h, output_dims.w, output_dims.h, input->type, data)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - if (input->type == kTfLiteInt8) { - // Initialize cmsis-nn convolution parameters - cmsis_nn_conv_params conv_params; - conv_params.input_offset = -input->params.zero_point; - conv_params.output_offset = output->params.zero_point; - conv_params.stride.h = params->stride_height; - conv_params.stride.w = params->stride_width; - conv_params.dilation.h = params->dilation_height_factor; - conv_params.dilation.w = params->dilation_width_factor; - conv_params.padding.h = data->padding.height; - conv_params.padding.w = data->padding.width; - conv_params.activation.min = data->output_activation_min; - conv_params.activation.max = data->output_activation_max; - - buf_size = arm_convolve_wrapper_s8_get_buffer_size( - &conv_params, &input_dims, &filter_dims, &output_dims); - } - - if (buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, buf_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - return kTfLiteOk; -} - -TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* im2col, - TfLiteEvalTensor* hwcn_weights, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - ConvParams op_params; - op_params.padding_type = RuntimePaddingType(params->padding); - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(im2col), - tflite::micro::GetTensorData(im2col), nullptr); - return kTfLiteOk; -} - -TfLiteStatus EvalQuantizedPerChannel( - TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output, TfLiteEvalTensor* im2col) { - cmsis_nn_conv_params conv_params; - conv_params.dilation.h = params->dilation_height_factor; - conv_params.dilation.w = params->dilation_width_factor; - // TODO(#43557) Remove checks for dilation and call to reference - // implementation when dilation is supported in the optimized implementation - // by CMSIS-NN. - if (conv_params.dilation.h == 1 && conv_params.dilation.w == 1) { - // Initialize cmsis-nn convolution parameters - conv_params.input_offset = -data.input_zero_point; - conv_params.output_offset = data.output_zero_point; - conv_params.stride.h = params->stride_height; - conv_params.stride.w = params->stride_width; - conv_params.padding.h = data.padding.height; - conv_params.padding.w = data.padding.width; - conv_params.activation.min = data.output_activation_min; - conv_params.activation.max = data.output_activation_max; - - // Initialize cmsis-nn per channel quantization parameters - cmsis_nn_per_channel_quant_params quant_params; - quant_params.multiplier = - const_cast(data.per_channel_output_multiplier); - quant_params.shift = const_cast(data.per_channel_output_shift); - - RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); - - // Consistency check. - TFLITE_DCHECK_LE(conv_params.activation.min, conv_params.activation.max); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (tflite::micro::GetTensorData(bias)) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - // Initialize cmsis-nn dimensions - // Input - cmsis_nn_dims input_dims; - input_dims.n = batch_size; - input_dims.h = input_shape.Dims(1); - input_dims.w = input_shape.Dims(2); - input_dims.c = input_depth; - - // Filter - cmsis_nn_dims filter_dims; - filter_dims.n = output_depth; - filter_dims.h = filter_shape.Dims(1); - filter_dims.w = filter_shape.Dims(2); - filter_dims.c = input_depth; - - // Bias - cmsis_nn_dims bias_dims; - bias_dims.n = 1; - bias_dims.h = 1; - bias_dims.w = 1; - bias_dims.c = output_depth; - - // Output - cmsis_nn_dims output_dims; - output_dims.n = batch_size; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = output_depth; - - // Initialize cmsis-nn context - cmsis_nn_context ctx; - ctx.buf = nullptr; - ctx.size = 0; - - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - // Note: ctx.size is currently not used in cmsis-nn. - // The buffer should be allocated in the Prepare function through - // arm_convolve_wrapper_s8_get_buffer_size - } - - // arm_convolve_wrapper_s8 dispatches the optimized kernel accordingly with - // the parameters passed - TFLITE_DCHECK_EQ( - arm_convolve_wrapper_s8( - &ctx, &conv_params, &quant_params, &input_dims, - tflite::micro::GetTensorData(input), &filter_dims, - tflite::micro::GetTensorData(filter), &bias_dims, - tflite::micro::GetTensorData(bias), &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - } else { - // TODO(b/154032858): Investigate removing extra copies. - ConvParams op_params; - op_params.input_offset = -data.input_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - - reference_integer_ops::ConvPerChannel( - op_params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - return kTfLiteOk; -} - -TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* im2col, - TfLiteEvalTensor* hwcn_weights, - TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - // TODO(b/154032858): Investigate removing extra copies. - ConvParams op_params; - op_params.padding_type = RuntimePaddingType(params->padding); - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(im2col), - tflite::micro::GetTensorData(im2col)); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input, filter, bias, nullptr, - nullptr, output); - break; - case kTfLiteInt8: - return EvalQuantizedPerChannel(context, node, params, data, input, filter, - bias, output, nullptr); - break; - case kTfLiteUInt8: - return EvalQuantized(context, node, params, data, input, filter, bias, - nullptr, nullptr, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cpp b/src/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cpp deleted file mode 100644 index 7715dbe..0000000 --- a/src/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cpp +++ /dev/null @@ -1,475 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" - -#include "CMSIS/NN/Include/arm_nnfunctions.h" -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" -#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Depthwise conv is quantized along dimension 3: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kDepthwiseConvQuantizedDimension = 3; - -struct OpData { - TfLitePaddingValues padding; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - // Per channel output multiplier and shift. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; - - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; - // Index to buffer for optimizations if applicable. - int buffer_idx; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, int width, - int height, int filter_width, int filter_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - int unused_output_height, unused_output_width; - // Set buffer index to a reset value - data->buffer_idx = -1; - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, 1, 1, height, width, - filter_height, filter_width, params->padding, &unused_output_height, - &unused_output_width); - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - - return tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->output_multiplier, &data->output_shift, - &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), num_channels); - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = - reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - const TfLiteType data_type = input->type; - int width = SizeOfDimension(input, 2); - int height = SizeOfDimension(input, 1); - int filter_width = SizeOfDimension(filter, 2); - int filter_height = SizeOfDimension(filter, 1); - - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - // All per-channel quantized tensors need valid zero point and scale arrays. - const auto* affine_quantization = - reinterpret_cast( - filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - TF_LITE_ENSURE( - context, affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kDepthwiseConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - // Allocate memory for per-channel quantization parameters - const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - - data->per_channel_output_multiplier = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, - filter_width, filter_height, data_type, - data)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - if (input->type == kTfLiteInt8) { - RuntimeShape input_shape = GetTensorShape(input); - RuntimeShape output_shape = GetTensorShape(output); - RuntimeShape filter_shape = GetTensorShape(filter); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(output_shape, 3, filter_shape, 3); - TFLITE_DCHECK_EQ(batch_size, 1); /* Only batch = 1 is supported */ - - cmsis_nn_dims input_dims; - input_dims.n = batch_size; - input_dims.h = height; - input_dims.w = width; - input_dims.c = input_shape.Dims(3); - - cmsis_nn_dims filter_dims; - filter_dims.n = 1; - filter_dims.h = filter_height; - filter_dims.w = filter_width; - filter_dims.c = output_depth; - - cmsis_nn_dims output_dims; - output_dims.n = batch_size; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = output_depth; - - cmsis_nn_dw_conv_params dw_conv_params; - dw_conv_params.padding.h = data->padding.height; - dw_conv_params.padding.w = data->padding.width; - - const int32_t buf_size = arm_depthwise_conv_wrapper_s8_get_buffer_size( - &dw_conv_params, &input_dims, &filter_dims, &output_dims); - - if (buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, buf_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - } - return kTfLiteOk; -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, const OpData* data, - const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - - tflite::DepthwiseParams op_params; - // Padding type is ignored, but still set. - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data->padding.width; - op_params.padding_values.height = data->padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - tflite::reference_ops::DepthwiseConv( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, OpData* data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - cmsis_nn_dw_conv_params dw_conv_params; - dw_conv_params.dilation.h = params->dilation_height_factor; - dw_conv_params.dilation.w = params->dilation_width_factor; - // Call to reference implementation can be removed when dilation is supported - // in the optimized implementations. - if (1 == dw_conv_params.dilation.h && 1 == dw_conv_params.dilation.w) { - dw_conv_params.input_offset = -data->input_zero_point; - dw_conv_params.output_offset = data->output_zero_point; - dw_conv_params.stride.h = params->stride_height; - dw_conv_params.stride.w = params->stride_width; - dw_conv_params.padding.h = data->padding.height; - dw_conv_params.padding.w = data->padding.width; - // TODO(b/130439627): Use calculated value for clamping. - dw_conv_params.activation.min = std::numeric_limits::min(); - dw_conv_params.activation.max = std::numeric_limits::max(); - dw_conv_params.ch_mult = params->depth_multiplier; - - cmsis_nn_per_channel_quant_params quant_params; - quant_params.multiplier = data->per_channel_output_multiplier; - quant_params.shift = data->per_channel_output_shift; - - RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); - - TFLITE_DCHECK_LE(dw_conv_params.activation.min, - dw_conv_params.activation.max); - - const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - - if (tflite::micro::GetTensorData(bias)) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - cmsis_nn_dims input_dims; - input_dims.n = batch_size; - input_dims.h = input_shape.Dims(1); - input_dims.w = input_shape.Dims(2); - input_dims.c = input_shape.Dims(3); - - cmsis_nn_dims filter_dims; - filter_dims.n = filter_shape.Dims(0); - filter_dims.h = filter_shape.Dims(1); - filter_dims.w = filter_shape.Dims(2); - filter_dims.c = output_depth; - - cmsis_nn_dims bias_dims; - bias_dims.n = 1; - bias_dims.h = 1; - bias_dims.w = 1; - bias_dims.c = output_depth; - - cmsis_nn_dims output_dims; - output_dims.n = batch_size; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = output_depth; - - cmsis_nn_context ctx; - ctx.buf = nullptr; - /* 'size' is unused */ - ctx.size = 0; - - if (data->buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data->buffer_idx); - } - - TFLITE_DCHECK_EQ( - arm_depthwise_conv_wrapper_s8( - &ctx, &dw_conv_params, &quant_params, &input_dims, - tflite::micro::GetTensorData(input), &filter_dims, - tflite::micro::GetTensorData(filter), &bias_dims, - tflite::micro::GetTensorData(bias), &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - } else { - DepthwiseParams op_params; - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data->padding.width; - op_params.padding_values.height = data->padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.input_offset = -data->input_zero_point; - op_params.weights_offset = 0; - op_params.output_offset = data->output_zero_point; - // TODO(b/130439627): Use calculated value for clamping. - op_params.quantized_activation_min = std::numeric_limits::min(); - op_params.quantized_activation_max = std::numeric_limits::max(); - - reference_integer_ops::DepthwiseConvPerChannel( - op_params, data->per_channel_output_multiplier, - data->per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, const OpData* data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data->input_zero_point; - const int32_t filter_offset = -data->filter_zero_point; - const int32_t output_offset = data->output_zero_point; - - tflite::DepthwiseParams op_params; - // Padding type is ignored, but still set. - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data->padding.width; - op_params.padding_values.height = data->padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.quantized_activation_min = data->output_activation_min; - op_params.quantized_activation_max = data->output_activation_max; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data->output_multiplier; - // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data->output_shift; - - if (1 == op_params.dilation_width_factor && - 1 == op_params.dilation_height_factor) { - RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int input_depth = input_shape.Dims(3); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - arm_depthwise_conv_u8_basic_ver1( - tflite::micro::GetTensorData(input), input_width, input_height, - input_depth, tflite::micro::GetTensorData(filter), - filter_width, filter_height, op_params.depth_multiplier, - op_params.padding_values.width, op_params.padding_values.height, - op_params.stride_width, op_params.stride_height, - op_params.dilation_width_factor, op_params.dilation_height_factor, - tflite::micro::GetTensorData(bias), op_params.input_offset, - op_params.weights_offset, op_params.output_offset, - tflite::micro::GetTensorData(output), output_width, - output_height, op_params.quantized_activation_min, - op_params.quantized_activation_max, op_params.output_shift, - op_params.output_multiplier); - } else { - tflite::reference_ops::DepthwiseConv( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* params = - reinterpret_cast(node->builtin_data); - OpData& data = *(static_cast(node->user_data)); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - - // TODO(aselle): Consider whether float conv and quantized conv should be - // separate ops to avoid dispatch overhead here. - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - EvalFloat(context, node, params, &data, input, filter, bias, output); - break; - case kTfLiteInt8: - EvalQuantizedPerChannel(context, node, params, &data, input, filter, bias, - output); - break; - case kTfLiteUInt8: - EvalQuantized(context, node, params, &data, input, filter, bias, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DEPTHWISE_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cpp b/src/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cpp deleted file mode 100644 index 11a0f0b..0000000 --- a/src/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cpp +++ /dev/null @@ -1,392 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/fully_connected.h" - -#include "CMSIS/NN/Include/arm_nnfunctions.h" -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/fully_connected.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -struct OpData { - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; - // The index of the temporary tensor where the quantized inputs are cached. - int input_quantized_index; - // Index to buffer for optimizations if applicable. - int buffer_idx; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; -}; - -constexpr int kInputTensor = 0; -constexpr int kWeightsTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// TODO(b/169801227): This global struct is needed for the linker to drop unused -// code (for example, by using Register_FULLY_CONNECTED_INT8 instead of -// Register_FULLY_CONNECTED). -TfLiteRegistration fully_connected_registration; - -TfLiteStatus CalculateOpData(TfLiteContext* context, - TfLiteFusedActivation activation, - TfLiteType data_type, const TfLiteTensor* input, - const TfLiteTensor* filter, - const TfLiteTensor* bias, TfLiteTensor* output, - OpData* data) { - TfLiteStatus status = kTfLiteOk; - // Set buffer index to a reset value - data->buffer_idx = -1; - if (data_type != kTfLiteFloat32) { - double real_multiplier = 0.0; - TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( - context, input, filter, bias, output, &real_multiplier)); - int exponent; - QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent); - data->output_shift = -exponent; - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, activation, output, &data->output_activation_min, - &data->output_activation_max)); - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - } - return status; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = - static_cast(node->builtin_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params->activation, - input->type, input, filter, bias, - output, data)); - - if (input->type == kTfLiteInt8 && nullptr != GetTensorData(bias)) { - RuntimeShape filter_shape = GetTensorShape(filter); - RuntimeShape output_shape = GetTensorShape(output); - - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); - const int filter_dim_count = filter_shape.DimensionsCount(); - cmsis_nn_dims filter_dims; - filter_dims.n = filter_shape.Dims(filter_dim_count - 1); - filter_dims.h = 1; - filter_dims.w = 1; - filter_dims.c = output_shape.Dims(1); - - const int32_t buf_size = - arm_fully_connected_s8_get_buffer_size(&filter_dims); - - if (buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, buf_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - } - return kTfLiteOk; -} - -TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - // The 'if' condition can be removed when null handling of bias is added to - // arm_fully_connected_s8 - if (nullptr != tflite::micro::GetTensorData(bias)) { - const RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); - const int batches = output_shape.Dims(0); - const int output_depth = output_shape.Dims(1); - const RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - const int filter_dim_count = filter_shape.DimensionsCount(); - const int accum_depth = filter_shape.Dims(filter_dim_count - 1); - const RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - - cmsis_nn_fc_params fc_params; - fc_params.input_offset = -data.input_zero_point; - fc_params.output_offset = data.output_zero_point; - fc_params.filter_offset = -data.filter_zero_point; - fc_params.activation.min = data.output_activation_min; - fc_params.activation.max = data.output_activation_max; - - cmsis_nn_per_tensor_quant_params quant_params; - quant_params.multiplier = data.output_multiplier; - // TODO(b/138810107): Figure out whether output shift should be inverted - quant_params.shift = -data.output_shift; - - cmsis_nn_dims input_dims; - input_dims.n = batches; - input_dims.h = 1; - input_dims.w = 1; - input_dims.c = accum_depth; - - cmsis_nn_dims filter_dims; - filter_dims.n = accum_depth; - filter_dims.h = 1; - filter_dims.w = 1; - filter_dims.c = output_depth; - - cmsis_nn_dims bias_dims; - bias_dims.n = 1; - bias_dims.h = 1; - bias_dims.w = 1; - bias_dims.c = output_depth; - - cmsis_nn_dims output_dims; - output_dims.n = batches; - output_dims.h = 1; - output_dims.w = 1; - output_dims.c = output_depth; - - cmsis_nn_context ctx; - ctx.buf = nullptr; - ctx.size = 0; - - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - } - - TF_LITE_ENSURE_EQ( - context, - arm_fully_connected_s8( - &ctx, &fc_params, &quant_params, &input_dims, - tflite::micro::GetTensorData(input), &filter_dims, - tflite::micro::GetTensorData(filter), &bias_dims, - tflite::micro::GetTensorData(bias), &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - } else { - tflite::FullyConnectedParams op_params; - op_params.input_offset = -data.input_zero_point; - op_params.weights_offset = -data.filter_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.output_multiplier = data.output_multiplier; - // TODO(b/138810107): Figure out whether output shift should be inverted - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - - reference_integer_ops::FullyConnected( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - return kTfLiteOk; -} - -TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - tflite::FullyConnectedParams op_params; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - -#define TF_LITE_FULLY_CONNECTED(output_data_type) \ - reference_ops::FullyConnected( \ - op_params, tflite::micro::GetTensorShape(input), \ - tflite::micro::GetTensorData(input), \ - tflite::micro::GetTensorShape(filter), \ - tflite::micro::GetTensorData(filter), \ - tflite::micro::GetTensorShape(bias), \ - tflite::micro::GetTensorData(bias), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - switch (output->type) { - case kTfLiteUInt8: - TF_LITE_FULLY_CONNECTED(uint8_t); - break; - case kTfLiteInt16: - TF_LITE_FULLY_CONNECTED(int16_t); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteFusedActivation activation, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(activation, &output_activation_min, - &output_activation_max); - tflite::FullyConnectedParams op_params; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - tflite::reference_ops::FullyConnected( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto* params = - static_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kWeightsTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kBiasTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - // Checks in Prepare ensure input, output and filter types are all the same. - switch (input->type) { - case kTfLiteFloat32: - return EvalFloat(context, node, params->activation, input, filter, bias, - output); - case kTfLiteInt8: - return EvalQuantizedInt8(context, node, data, input, filter, bias, - output); - - case kTfLiteUInt8: - return EvalQuantized(context, node, data, input, filter, bias, output); - - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -// Note that the current function names are not ideal at all (this EvalInt8 -// function internally calls EvalQuantizedInt8, and there is similar name -// aliasing in the Eval function too). We will be attempting to have a more -// descriptive naming convention but holding off on that for now, since the -// renaming might be coupled with reducing code duplication and some additional -// refactoring. -TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kWeightsTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kBiasTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - // Checks in Prepare ensure input, output and filter types are all the same. - if (input->type != kTfLiteInt8) { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - - return EvalQuantizedInt8(context, node, data, input, filter, bias, output); -} - -} // namespace - -TfLiteRegistration Register_FULLY_CONNECTED() { - fully_connected_registration.init = Init; - fully_connected_registration.free = nullptr; - fully_connected_registration.prepare = Prepare; - fully_connected_registration.invoke = Eval; - fully_connected_registration.profiling_string = nullptr; - fully_connected_registration.builtin_code = 0; - fully_connected_registration.custom_name = nullptr; - fully_connected_registration.version = 0; - return fully_connected_registration; -} - -TfLiteRegistration Register_FULLY_CONNECTED_INT8() { - fully_connected_registration.init = Init; - fully_connected_registration.free = nullptr; - fully_connected_registration.prepare = Prepare; - fully_connected_registration.invoke = EvalInt8; - fully_connected_registration.profiling_string = nullptr; - fully_connected_registration.builtin_code = 0; - fully_connected_registration.custom_name = nullptr; - fully_connected_registration.version = 0; - return fully_connected_registration; -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis-nn/mul.cpp b/src/tensorflow/lite/micro/kernels/cmsis-nn/mul.cpp deleted file mode 100644 index 2068650..0000000 --- a/src/tensorflow/lite/micro/kernels/cmsis-nn/mul.cpp +++ /dev/null @@ -1,226 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/mul.h" - -#include "CMSIS/NN/Include/arm_nnfunctions.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace mul { - -constexpr int kInput1Tensor = 0; -constexpr int kInput2Tensor = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t output_activation_min; - int32_t output_activation_max; - - int32_t output_multiplier; - int output_shift; - - // Cached tensor zero point values for quantized operations. - int32_t input1_zero_point; - int32_t input2_zero_point; - int32_t output_zero_point; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, OpData* data) { - const TfLiteTensor* input1 = GetInput(context, node, kInput1Tensor); - const TfLiteTensor* input2 = GetInput(context, node, kInput2Tensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - - double real_multiplier = static_cast(input1->params.scale) * - static_cast(input2->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &data->output_multiplier, - &data->output_shift); - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input1 = GetInput(context, node, kInput1Tensor); - const TfLiteTensor* input2 = GetInput(context, node, kInput2Tensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - if (output->dims->size == 0) { - return AllocateOutputDimensionsFromInput(context, input1, input2, output); - } - - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - data->input1_zero_point = input1->params.zero_point; - data->input2_zero_point = input2->params.zero_point; - data->output_zero_point = output->params.zero_point; - CalculateOpData(context, node, params, data); - - return kTfLiteOk; -} - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, const OpData& data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - if (output->type == kTfLiteInt8 || output->type == kTfLiteUInt8) { - tflite::ArithmeticParams op_params; - SetActivationParams(data.output_activation_min, data.output_activation_max, - &op_params); - op_params.input1_offset = -data.input1_zero_point; - op_params.input2_offset = -data.input2_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.output_multiplier = data.output_multiplier; - op_params.output_shift = data.output_shift; - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - -#define TF_LITE_MUL(type, opname, dtype) \ - type::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)); - - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - TF_LITE_MUL(reference_integer_ops, BroadcastMul4DSlow, int8_t); - } else { - arm_elementwise_mul_s8( - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorData(input2), - op_params.input1_offset, op_params.input2_offset, - tflite::micro::GetTensorData(output), - op_params.output_offset, op_params.output_multiplier, - op_params.output_shift, op_params.quantized_activation_min, - op_params.quantized_activation_max, - MatchingElementsSize(tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorShape(output))); - } - } else if (output->type == kTfLiteUInt8) { - if (need_broadcast) { - TF_LITE_MUL(reference_ops, BroadcastMul4DSlow, uint8_t); - } else { - TF_LITE_MUL(reference_ops, Mul, uint8_t); - } - } -#undef TF_LITE_MUL - } -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - tflite::ArithmeticParams op_params; - SetActivationParams(output_activation_min, output_activation_max, &op_params); - - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); -#define TF_LITE_MUL(opname) \ - reference_ops::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)); - - if (need_broadcast) { - TF_LITE_MUL(BroadcastMul4DSlow); - } else { - TF_LITE_MUL(Mul); - } -#undef TF_LITE_MUL -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInput1Tensor); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInput2Tensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - switch (input1->type) { - case kTfLiteUInt8: - case kTfLiteInt8: - EvalQuantized(context, node, params, data, input1, input2, output); - break; - case kTfLiteFloat32: - EvalFloat(context, node, params, input1, input2, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - - return kTfLiteOk; -} -} // namespace mul - -TfLiteRegistration Register_MUL() { - return {/* Init=*/mul::Init, - /* Free=*/nullptr, - /* Prepare=*/mul::Prepare, - /*invoke=*/mul::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cpp b/src/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cpp deleted file mode 100644 index e1ac2b5..0000000 --- a/src/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cpp +++ /dev/null @@ -1,397 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/pooling.h" - -#include "CMSIS/NN/Include/arm_nnfunctions.h" -#include "flatbuffers/base.h" // from @flatbuffers -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pooling { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - TfLitePaddingValues padding; - // Index to buffer for optimizations if applicable. - int buffer_idx; - - int32_t activation_min; - int32_t activation_max; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, - const TfLitePoolParams* params, - const TfLiteTensor* input, TfLiteTensor* output, - OpData* data) { - // input: batch, height, width, channel - int height = SizeOfDimension(input, 1); - int width = SizeOfDimension(input, 2); - - int out_height, out_width; - - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - /*dilation_rate_height=*/1, - /*dilation_rate_width=*/1, height, width, params->filter_height, - params->filter_width, params->padding, &out_height, &out_width); - - if (input->type != kTfLiteFloat32) { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->activation_min, - &data->activation_max)); - TFLITE_DCHECK_LE(data->activation_min, data->activation_max); - } - - // Set buffer index to a reset value - data->buffer_idx = -1; - - return kTfLiteOk; -} - -void AverageEvalFloat(const TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - float activation_min, activation_max; - CalculateActivationRange(params->activation, &activation_min, - &activation_max); - - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.float_activation_min = activation_min; - op_params.float_activation_max = activation_max; - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - TFLITE_DCHECK(input->type == kTfLiteUInt8 || input->type == kTfLiteInt8); - - if (input->type == kTfLiteUInt8) { - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.activation_min; - op_params.quantized_activation_max = data.activation_max; - - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - - cmsis_nn_dims input_dims; - input_dims.n = 1; - input_dims.h = input_shape.Dims(1); - input_dims.w = input_shape.Dims(2); - input_dims.c = depth; - - cmsis_nn_dims output_dims; - output_dims.n = 1; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = depth; - - cmsis_nn_pool_params pool_params; - pool_params.stride.h = params->stride_height; - pool_params.stride.w = params->stride_width; - pool_params.padding.h = data.padding.height; - pool_params.padding.w = data.padding.width; - pool_params.activation.min = data.activation_min; - pool_params.activation.max = data.activation_max; - - cmsis_nn_dims filter_dims; - filter_dims.n = 1; - filter_dims.h = params->filter_height; - filter_dims.w = params->filter_width; - filter_dims.c = 1; - - cmsis_nn_context ctx; - ctx.buf = nullptr; - ctx.size = 0; - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - } - - TFLITE_DCHECK_EQ( - arm_avgpool_s8(&ctx, &pool_params, &input_dims, - tflite::micro::GetTensorData(input), - &filter_dims, &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - } -} - -void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - float activation_min, activation_max; - CalculateActivationRange(params->activation, &activation_min, - &activation_max); - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.float_activation_min = activation_min; - op_params.float_activation_max = activation_max; - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.activation_min; - op_params.quantized_activation_max = data.activation_max; - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -TfLiteStatus MaxEvalInt8(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - - cmsis_nn_dims input_dims; - input_dims.n = 1; - input_dims.h = input_shape.Dims(1); - input_dims.w = input_shape.Dims(2); - input_dims.c = depth; - - cmsis_nn_dims output_dims; - output_dims.n = 1; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = depth; - - cmsis_nn_pool_params pool_params; - pool_params.stride.h = params->stride_height; - pool_params.stride.w = params->stride_width; - pool_params.padding.h = data.padding.height; - pool_params.padding.w = data.padding.width; - pool_params.activation.min = data.activation_min; - pool_params.activation.max = data.activation_max; - - cmsis_nn_dims filter_dims; - filter_dims.n = 1; - filter_dims.h = params->filter_height; - filter_dims.w = params->filter_width; - filter_dims.c = 1; - - cmsis_nn_context ctx; - ctx.buf = nullptr; - ctx.size = 0; - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - } - - TFLITE_DCHECK_EQ( - arm_max_pool_s8(&ctx, &pool_params, &input_dims, - tflite::micro::GetTensorData(input), &filter_dims, - &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - - return kTfLiteOk; -} - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus MaxPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, data)); - - return kTfLiteOk; -} - -TfLiteStatus AveragePrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, data)); - - if (input->type == kTfLiteInt8) { - RuntimeShape input_shape = GetTensorShape(input); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - - RuntimeShape output_shape = GetTensorShape(output); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int output_width = output_shape.Dims(2); - - const int32_t buffer_size = - arm_avgpool_s8_get_buffer_size(output_width, depth); - - if (buffer_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, buffer_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - } - return kTfLiteOk; -} - -TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - // Inputs and outputs share the same type, guaranteed by the converter. - switch (input->type) { - case kTfLiteFloat32: - AverageEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - case kTfLiteInt8: - AverageEvalQuantized(context, node, params, data, input, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input type %s is not currently supported", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: - MaxEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - MaxEvalQuantizedUInt8(context, node, params, data, input, output); - break; - case kTfLiteInt8: - MaxEvalInt8(context, node, params, data, input, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace pooling - -TfLiteRegistration Register_AVERAGE_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::AveragePrepare, - /*invoke=*/pooling::AverageEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_MAX_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::MaxPrepare, - /*invoke=*/pooling::MaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cpp b/src/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cpp deleted file mode 100644 index 13a90c6..0000000 --- a/src/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cpp +++ /dev/null @@ -1,232 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/softmax.h" - -#include "CMSIS/NN/Include/arm_nnfunctions.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -// Softmax parameter data that persists in user_data -static constexpr int kInt16LUTArraySize = 513; - -TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, - const TfLiteTensor* input, - TfLiteTensor* output, - const TfLiteSoftmaxParams* params, - SoftmaxParams* op_data) { - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8 || - input->type == kTfLiteInt16) { - if (input->type == kTfLiteUInt8) { - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt8); - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - } else if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 32768, - (0.001f * 1.f / 32768)); - } else { // input->type == kTfLiteInt8 - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); - if (output->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768); - TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 65536, - (0.001f * 1.f / 65536)); - } else { // output->type == kTfLiteint8 - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); - TF_LITE_ENSURE(context, output->params.scale == 1.f / 256); - } - } - - static const int kScaledDiffIntegerBits = 5; - - // Calculate input_multiplier and input_left_shift - if (input->type == kTfLiteInt16) { - int input_left_shift; - double input_scale_beta_rescale = - static_cast(input->params.scale) * - static_cast(params->beta) / - (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] - // correspond to [-10.0, 0.0] - QuantizeMultiplier(input_scale_beta_rescale, &op_data->input_multiplier, - &input_left_shift); - op_data->input_left_shift = input_left_shift; - } else { - int input_left_shift; - tflite::PreprocessSoftmaxScaling( - static_cast(params->beta), - static_cast(input->params.scale), kScaledDiffIntegerBits, - &op_data->input_multiplier, &input_left_shift); - op_data->input_left_shift = input_left_shift; - op_data->diff_min = - -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, - op_data->input_left_shift); - } - } else { - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); - op_data->beta = static_cast(params->beta); - } - return kTfLiteOk; -} - -void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(SoftmaxParams)); -} - -TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE(context, NumDimensions(input) >= 1); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE(context, node->user_data != nullptr); - SoftmaxParams* op_data = static_cast(node->user_data); - // Only allocate LUTs for KTfLiteInt16 data type - if (input->type == kTfLiteInt16) { - void* raw_exp_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, raw_exp_lut != nullptr); - op_data->exp_lut = reinterpret_cast(raw_exp_lut); - void* one_over_one_plus_x_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, one_over_one_plus_x_lut != nullptr); - op_data->one_over_one_plus_x_lut = - reinterpret_cast(one_over_one_plus_x_lut); - } - - if (output->type == kTfLiteInt16) { - TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || - input->type == kTfLiteUInt8 || - input->type == kTfLiteInt16); - } else { - TF_LITE_ENSURE_EQ(context, input->type, output->type); - } - - // Populate LUT if required - if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - // exp LUT only used on negative values - // we consider exp(-10.0) is insignificant to accumulation - gen_lut([](float value) { return std::exp(value); }, -10.0f, 0.0f, - op_data->exp_lut, kInt16LUTArraySize); - gen_lut([](float value) { return 1.0f / (1.0f + value); }, 0.0f, 1.0f, - op_data->one_over_one_plus_x_lut, kInt16LUTArraySize); - op_data->zero_point = output->params.zero_point; - op_data->scale = output->params.scale; - } - - auto* params = static_cast(node->builtin_data); - return CalculateSoftmaxParams(context, input, output, params, op_data); -} - -// Takes a tensor and performs softmax along the last dimension. -void SoftmaxFloat(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, - const SoftmaxParams& op_data) { - tflite::reference_ops::Softmax(op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void SoftmaxQuantized(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, - const SoftmaxParams& op_data) { - if (input->type == kTfLiteUInt8) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (input->type == kTfLiteInt8) { - if (output->type == kTfLiteInt16) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - const auto input_shape = tflite::micro::GetTensorShape(input); - const auto output_shape = tflite::micro::GetTensorShape(output); - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - - arm_softmax_s8(tflite::micro::GetTensorData(input), outer_size, - depth, op_data.input_multiplier, op_data.input_left_shift, - op_data.diff_min, - tflite::micro::GetTensorData(output)); - } - } else { - tflite::reference_ops::SoftmaxInt16( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - TFLITE_DCHECK(node->user_data != nullptr); - const SoftmaxParams data = - *static_cast(node->user_data); - - switch (input->type) { - case kTfLiteFloat32: { - SoftmaxFloat(input, output, data); - return kTfLiteOk; - } - case kTfLiteInt8: - case kTfLiteUInt8: - case kTfLiteInt16: { - SoftmaxQuantized(input, output, data); - return kTfLiteOk; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } -} - -} // namespace - -TfLiteRegistration Register_SOFTMAX() { - return {/*init=*/SoftmaxInit, - /*free=*/nullptr, - /*prepare=*/SoftmaxPrepare, - /*invoke=*/SoftmaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis-nn/svdf.cpp b/src/tensorflow/lite/micro/kernels/cmsis-nn/svdf.cpp deleted file mode 100644 index f4ee0c7..0000000 --- a/src/tensorflow/lite/micro/kernels/cmsis-nn/svdf.cpp +++ /dev/null @@ -1,476 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include -#include - -#include "CMSIS/NN/Include/arm_nn_types.h" -#include "CMSIS/NN/Include/arm_nnfunctions.h" -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/activation_utils.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -struct OpData { - int32_t effective_scale_1_a; - int32_t effective_scale_2_a; - // b versions of each scale are kept at int since the numbers are just the - // shift value - typically between [-32, 32]. - int effective_scale_1_b; - int effective_scale_2_b; - int scratch_tensor_index; - int scratch_output_tensor_index; - - // Cached tensor zero point values for quantized operations. - int input_zero_point; - int output_zero_point; -}; - -// Input tensors. -constexpr int kInputTensor = 0; -constexpr int kWeightsFeatureTensor = 1; -constexpr int kWeightsTimeTensor = 2; -constexpr int kBiasTensor = 3; -// This is a variable tensor, and will be modified by this op. -constexpr int kInputActivationStateTensor = 4; - -// Output tensor. -constexpr int kOutputTensor = 0; - -/** - * This version of SVDF is specific to TFLite Micro. It contains the following - * differences between the TFLite version: - * - * 1.) Scratch tensor allocation - scratch tensors must be known ahead of time - * for the Micro interpreter. - * 2.) Output dimensions - the TFLite version determines output size and runtime - * and resizes the output tensor. Micro runtime does not support tensor - * resizing. - */ -static inline void ApplyTimeWeightsBiasAndActivation( - int batch_size, int memory_size, int num_filters, int num_units, int rank, - const float* const __restrict__ weights_time_ptr, - const float* const __restrict__ bias_ptr, TfLiteFusedActivation activation, - float* const __restrict__ state_ptr, float* const __restrict__ scratch_ptr, - float* const __restrict__ output_ptr) { - // Compute matmul(activation_state, weights_time). - for (int b = 0; b < batch_size; ++b) { - // Perform batched vector dot product: - float* scratch_ptr_batch = scratch_ptr + b * num_filters; - const float* vector1_ptr = weights_time_ptr; - const float* vector2_ptr = state_ptr + b * memory_size * num_filters; - for (int i = 0; i < num_filters; ++i) { - *scratch_ptr_batch = 0.f; - for (int j = 0; j < memory_size; ++j) { - *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; - } - scratch_ptr_batch++; - } - } - - // Initialize output with bias if provided. - if (bias_ptr) { - // VectorBatchVectorAssign - for (int i = 0; i < batch_size; ++i) { - float* output_data = output_ptr + i * num_units; - const float* bias_data = bias_ptr; - for (int j = 0; j < num_units; ++j) { - *output_data++ = *bias_data++; - } - } - } else { - float* output_data = output_ptr; - for (int i = 0; i < batch_size * num_units; ++i) { - *output_data++ = 0.0f; - } - } - - // Reduction sum. - for (int b = 0; b < batch_size; ++b) { - float* output_ptr_batch = output_ptr + b * num_units; - float* scratch_ptr_batch = scratch_ptr + b * num_filters; - - // Reduction sum vector - for (int i = 0; i < num_units; ++i) { - for (int j = 0; j < rank; j++) { - output_ptr_batch[i] += *scratch_ptr_batch++; - } - } - } - - // Apply activation. - for (int b = 0; b < batch_size; ++b) { - float* output_ptr_batch = output_ptr + b * num_units; - for (int i = 0; i < num_units; ++i) { - *output_ptr_batch = - tflite::ops::micro::ActivationValFloat(activation, *output_ptr_batch); - ++output_ptr_batch; - } - } -} - -inline void EvalFloatSVDF( - TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* weights_feature, - const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, - const TfLiteSVDFParams* params, int scratch_tensor_index, - TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output) { - const int rank = params->rank; - const int batch_size = input->dims->data[0]; - const int input_size = input->dims->data[1]; - const int num_filters = weights_feature->dims->data[0]; - const int num_units = num_filters / rank; - const int memory_size = weights_time->dims->data[1]; - - const float* weights_feature_ptr = - tflite::micro::GetTensorData(weights_feature); - const float* weights_time_ptr = - tflite::micro::GetTensorData(weights_time); - const float* bias_ptr = tflite::micro::GetTensorData(bias); - const float* input_ptr = tflite::micro::GetTensorData(input); - - float* state_ptr = tflite::micro::GetTensorData(activation_state); - - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(context->GetScratchBuffer != nullptr); - - float* scratch_ptr = static_cast( - context->GetScratchBuffer(context, scratch_tensor_index)); - - float* output_ptr = tflite::micro::GetTensorData(output); - - // Left shift the activation_state. - { - float* new_state_start = state_ptr; - const float* old_state_start = state_ptr + 1; - const float* old_state_end = - state_ptr + batch_size * num_filters * memory_size; - while (old_state_start != old_state_end) { - *new_state_start++ = *old_state_start++; - } - } - - // Note: no need to clear the latest activation, matmul is not accumulative. - - // Compute conv1d(inputs, weights_feature). - // The activation_state's rightmost column is used to save current cycle - // activation. This is achieved by starting at state_ptr[memory_size - 1] and - // having the stride equal to memory_size. - - // Perform batched matrix vector multiply operation: - { - const float* matrix = weights_feature_ptr; - const float* vector = input_ptr; - float* result = &state_ptr[memory_size - 1]; - float* result_in_batch = result; - for (int i = 0; i < batch_size; ++i) { - const float* matrix_ptr = matrix; - for (int j = 0; j < num_filters; ++j) { - float dot_prod = 0.0f; - const float* vector_in_batch = vector + i * input_size; - for (int k = 0; k < input_size; ++k) { - dot_prod += *matrix_ptr++ * *vector_in_batch++; - } - *result_in_batch = dot_prod; - result_in_batch += memory_size; - } - } - } - - ApplyTimeWeightsBiasAndActivation( - batch_size, memory_size, num_filters, num_units, rank, weights_time_ptr, - bias_ptr, params->activation, state_ptr, scratch_ptr, output_ptr); -} - -void EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input_tensor, - const TfLiteEvalTensor* weights_feature_tensor, - const TfLiteEvalTensor* weights_time_tensor, - const TfLiteEvalTensor* bias_tensor, - const TfLiteSVDFParams* params, - TfLiteEvalTensor* activation_state_tensor, - TfLiteEvalTensor* output_tensor, const OpData& data) { - cmsis_nn_dims input_dims; - input_dims.n = input_tensor->dims->data[0]; - input_dims.h = input_tensor->dims->data[1]; - - cmsis_nn_dims weights_feature_dims; - weights_feature_dims.n = weights_feature_tensor->dims->data[0]; - weights_feature_dims.h = weights_feature_tensor->dims->data[1]; - - cmsis_nn_dims weights_time_dims; - weights_time_dims.n = weights_time_tensor->dims->data[0]; - weights_time_dims.h = weights_time_tensor->dims->data[1]; - - cmsis_nn_dims bias_dims; - bias_dims.n = bias_tensor->dims->data[0]; - - cmsis_nn_dims state_dims; - state_dims.n = bias_tensor->dims->data[0]; - state_dims.h = bias_tensor->dims->data[1]; - - cmsis_nn_dims output_dims; - output_dims.n = output_tensor->dims->data[0]; - output_dims.h = output_tensor->dims->data[1]; - - cmsis_nn_svdf_params svdf_params; - svdf_params.rank = params->rank; - svdf_params.input_offset = data.input_zero_point; - svdf_params.output_offset = data.output_zero_point; - - svdf_params.input_activation.min = INT16_MIN; - svdf_params.input_activation.max = INT16_MAX; - - svdf_params.output_activation.min = INT8_MIN; - svdf_params.output_activation.max = INT8_MAX; - - cmsis_nn_per_tensor_quant_params in_quant_params; - in_quant_params.multiplier = data.effective_scale_1_a; - in_quant_params.shift = data.effective_scale_1_b; - - cmsis_nn_per_tensor_quant_params out_quant_params; - out_quant_params.multiplier = data.effective_scale_2_a; - out_quant_params.shift = data.effective_scale_2_b; - - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(context->GetScratchBuffer != nullptr); - - cmsis_nn_context scratch_ctx; - scratch_ctx.buf = static_cast( - context->GetScratchBuffer(context, data.scratch_tensor_index)); - - cmsis_nn_context scratch_output_ctx; - scratch_output_ctx.buf = static_cast( - context->GetScratchBuffer(context, data.scratch_output_tensor_index)); - - int8_t* output_data = tflite::micro::GetTensorData(output_tensor); - arm_svdf_s8( - &scratch_ctx, &scratch_output_ctx, &svdf_params, &in_quant_params, - &out_quant_params, &input_dims, - (int8_t*)tflite::micro::GetTensorData(input_tensor), &state_dims, - (int16_t*)tflite::micro::GetTensorData(activation_state_tensor), - &weights_feature_dims, - (int8_t*)tflite::micro::GetTensorData(weights_feature_tensor), - &weights_time_dims, - (int16_t*)tflite::micro::GetTensorData(weights_time_tensor), - &bias_dims, (int32_t*)tflite::micro::GetTensorData(bias_tensor), - &output_dims, output_data); -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - - const auto* params = static_cast(node->builtin_data); - - // Validate Tensor Inputs (dtype depends on quantization): - // [0] = Input, {2, batch_size, input_size} - // [1] = Weights Feature, {2, num_filters, input_size} - // [2] = Weights Time, {2, num_filters, memory_size} - // [3] = Bias (optional), {1, num_units} - // [4] = Activation State (variable), - // {2, batch_size, memory_size * num_filters} - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* weights_feature = - GetInput(context, node, kWeightsFeatureTensor); - const TfLiteTensor* weights_time = - GetInput(context, node, kWeightsTimeTensor); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - const TfLiteTensor* activation_state = - GetInput(context, node, kInputActivationStateTensor); - - // Define input constants based on input tensor definition above: - const int rank = params->rank; - const int input_size = input->dims->data[1]; - const int batch_size = input->dims->data[0]; - const int num_filters = weights_feature->dims->data[0]; - TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); - const int num_units = num_filters / rank; - const int memory_size = weights_time->dims->data[1]; - - // Validate Input Tensor: - TF_LITE_ENSURE(context, - input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); - - // Validate Tensor Output: - // [0] = float/int8, {2, batch_size, num_units} - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2); - TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size); - TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units); - - // Validate Weights Feature Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2); - TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size); - - // Validate Weights Time Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2); - TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters); - TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size); - - // Validate Optional Bias Input Tensor: - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); - } - - // Validate Activation State Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2); - TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size); - TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1], - memory_size * num_filters); - // Since is_variable is not part of TFLiteEvalTensor, check is_variable here. - TF_LITE_ENSURE_EQ(context, activation_state->is_variable, true); - - TF_LITE_ENSURE_EQ(context, node->inputs->size, 5); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteInt16); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteInt16); - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); - } - - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); - - const double effective_scale_1 = static_cast( - input->params.scale * weights_feature->params.scale / - activation_state->params.scale); - const double effective_scale_2 = - static_cast(activation_state->params.scale * - weights_time->params.scale / output->params.scale); - - // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready. - TF_LITE_ENSURE( - context, - std::abs(static_cast(bias->params.scale) - - static_cast(activation_state->params.scale * - weights_time->params.scale)) < 1e-5); - - QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a), - &(data->effective_scale_1_b)); - QuantizeMultiplier(effective_scale_2, &(data->effective_scale_2_a), - &(data->effective_scale_2_b)); - - data->input_zero_point = input->params.zero_point; - data->output_zero_point = output->params.zero_point; - - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - - const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( - context, batch_size * num_filters * sizeof(int32_t), - &(data->scratch_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_status); - - const TfLiteStatus scratch_output_status = - context->RequestScratchBufferInArena( - context, batch_size * num_units * sizeof(int32_t), - &(data->scratch_output_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_output_status); - } else { - TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32); - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); - } - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); - - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( - context, batch_size * num_filters * sizeof(float), - &(data->scratch_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_status); - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* weights_feature = - tflite::micro::GetEvalInput(context, node, kWeightsFeatureTensor); - const TfLiteEvalTensor* weights_time = - tflite::micro::GetEvalInput(context, node, kWeightsTimeTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 5) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( - context, node, kInputActivationStateTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (weights_feature->type) { - case kTfLiteFloat32: { - EvalFloatSVDF(context, node, input, weights_feature, weights_time, bias, - params, data.scratch_tensor_index, activation_state, - output); - return kTfLiteOk; - break; - } - - case kTfLiteInt8: { - EvalIntegerSVDF(context, node, input, weights_feature, weights_time, bias, - params, activation_state, output, data); - return kTfLiteOk; - break; - } - - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(weights_feature->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SVDF() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/add.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/add.cpp new file mode 100644 index 0000000..99395af --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/add.cpp @@ -0,0 +1,420 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/add.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + bool requires_broadcast; + + // These fields are used in both the general 8-bit -> 8bit quantized path, + // and the special 16-bit -> 16bit quantized path + int input1_shift; + int input2_shift; + int32_t output_activation_min; + int32_t output_activation_max; + + // These fields are used only in the general 8-bit -> 8bit quantized path + int32_t input1_multiplier; + int32_t input2_multiplier; + int32_t output_multiplier; + + int output_shift; + int left_shift; + + int32_t input1_offset; + int32_t input2_offset; + int32_t output_offset; + + // Used only for float evals: + float output_activation_min_f32; + float output_activation_max_f32; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, TfLiteTensor* output, + OpData* data) { + data->requires_broadcast = !HaveSameShapes(input1, input2); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + // 8bit -> 8bit general quantized path, with general rescalings + data->input1_offset = -input1->params.zero_point; + data->input2_offset = -input2->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = (output->type == kTfLiteInt16) ? 15 : 20; + const double twice_max_input_scale = + 2 * static_cast( + std::max(input1->params.scale, input2->params.scale)); + const double real_input1_multiplier = + static_cast(input1->params.scale) / twice_max_input_scale; + const double real_input2_multiplier = + static_cast(input2->params.scale) / twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + } else if (output->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, + &data->output_activation_min_f32, + &data->output_activation_max_f32); + } + + return kTfLiteOk; +} + +void UpdateOpParams(tflite::ArithmeticParams* const op_params, + const OpData* data) { + op_params->left_shift = data->left_shift; + op_params->input1_offset = data->input1_offset; + op_params->input1_multiplier = data->input1_multiplier; + op_params->input1_shift = data->input1_shift; + op_params->input2_offset = data->input2_offset; + op_params->input2_multiplier = data->input2_multiplier; + op_params->input2_shift = data->input2_shift; + op_params->output_offset = data->output_offset; + op_params->output_multiplier = data->output_multiplier; + op_params->output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + op_params); +} + +TfLiteStatus EvalAddQuantizedInt8(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + UpdateOpParams(&op_params, data); + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + reference_integer_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + arm_elementwise_add_s8( + tflite::micro::GetTensorData(input1), + + tflite::micro::GetTensorData(input2), op_params.input1_offset, + op_params.input1_multiplier, op_params.input1_shift, + op_params.input2_offset, op_params.input2_multiplier, + op_params.input2_shift, op_params.left_shift, + tflite::micro::GetTensorData(output), op_params.output_offset, + op_params.output_multiplier, op_params.output_shift, + op_params.quantized_activation_min, op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } + + return kTfLiteOk; +} + +TfLiteStatus EvalAddQuantizedInt16(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + UpdateOpParams(&op_params, data); + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + arm_elementwise_add_s16( + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input2), op_params.input1_offset, + op_params.input1_multiplier, op_params.input1_shift, + op_params.input2_offset, op_params.input2_multiplier, + op_params.input2_shift, op_params.left_shift, + tflite::micro::GetTensorData(output), op_params.output_offset, + op_params.output_multiplier, op_params.output_shift, + op_params.quantized_activation_min, op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } + + return kTfLiteOk; +} + +TfLiteStatus EvalAdd(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + switch (output->type) { + case kTfLiteFloat32: { + tflite::ArithmeticParams op_params; + SetActivationParams(data->output_activation_min_f32, + data->output_activation_max_f32, &op_params); + if (data->requires_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } break; + case kTfLiteInt32: { + tflite::ArithmeticParams op_params; + SetActivationParams(std::numeric_limits::lowest(), + std::numeric_limits::max(), &op_params); + if (data->requires_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } break; + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + switch (output->type) { + case kTfLiteInt8: { + EvalAddQuantizedInt8(context, node, params, data, input1, input2, output); + break; + } + case kTfLiteInt16: { + EvalAddQuantizedInt16(context, node, params, data, input1, input2, + output); + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +void* InitAdd(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus PrepareAdd(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, input1->type, output->type); + TF_LITE_ENSURE_MSG( + context, + input1->type == kTfLiteFloat32 || input1->type == kTfLiteInt32 || + input1->type == kTfLiteInt16 || input1->type == kTfLiteInt8, + "Input data type not supported"); + TF_LITE_ENSURE_MSG(context, input1->type == input2->type, + "Hybrid models are not supported on TFLite Micro."); + + if (input1->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + } + + OpData* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_STATUS( + CalculateOpData(context, params, input1, input2, output, data)); + + if (output->type == kTfLiteInt32) { + // Only support int32 unquantized add for now. + TF_LITE_ENSURE_EQ(context, input1->quantization.type, + kTfLiteNoQuantization); + TF_LITE_ENSURE_EQ(context, input2->quantization.type, + kTfLiteNoQuantization); + } + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus EvalAdd(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData* data = static_cast(node->user_data); + + if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { + TF_LITE_ENSURE_OK( + context, EvalAdd(context, node, params, data, input1, input2, output)); + } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, + input1, input2, output)); + } else { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(output->type), + output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus EvalAddInt8(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(output->type == kTfLiteInt8); + const OpData* data = static_cast(node->user_data); + + TF_LITE_ENSURE_OK(context, EvalAddQuantizedInt8(context, node, params, data, + input1, input2, output)); + + return kTfLiteOk; +} + +TfLiteStatus EvalAddInt16(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(output->type == kTfLiteInt16); + const OpData* data = static_cast(node->user_data); + + TF_LITE_ENSURE_OK(context, EvalAddQuantizedInt16(context, node, params, data, + input1, input2, output)); + + return kTfLiteOk; +} + +TFLMRegistration Register_ADD() { + return tflite::micro::RegisterOp(InitAdd, PrepareAdd, EvalAdd); +} + +TFLMRegistration Register_ADD_INT8() { + return tflite::micro::RegisterOp(InitAdd, PrepareAdd, EvalAddInt8); +} + +TFLMRegistration Register_ADD_INT16() { + return tflite::micro::RegisterOp(InitAdd, PrepareAdd, EvalAddInt16); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/batch_matmul.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/batch_matmul.cpp new file mode 100644 index 0000000..6cb23ff --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/batch_matmul.cpp @@ -0,0 +1,535 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/kernels/batch_matmul.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/batch_matmul.h" +#include "tensorflow/lite/kernels/internal/reference/transpose.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_arena_constants.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct OpData { + OpDataBatchMatmul reference_op_data; + + cmsis_nn_dims output_shape; + + int buffer_idx; +}; + +cmsis_nn_dims FillVariableShape(int32_t rank, int32_t* tensor_dims) { + if (rank == 4) { + return {tensor_dims[0], tensor_dims[1], tensor_dims[2], tensor_dims[3]}; + } else if (rank == 3) { + return {1, tensor_dims[0], tensor_dims[1], tensor_dims[2]}; + } else if (rank == 2) { + return {1, 1, tensor_dims[0], tensor_dims[1]}; + } else { + return {1, 1, 1, 1}; + } +} + +inline TfLiteStatus PopulateEvalData( + TfLiteContext* context, OpData* data, const TfLiteBatchMatMulParams* params, + TfLiteNode* node, const TfLiteEvalTensor* original_lhs_input, + RuntimeShape* lhs_shape, TfLiteEvalTensor** updated_lhs_input, + const TfLiteEvalTensor* original_rhs_input, RuntimeShape* rhs_shape, + TfLiteEvalTensor** updated_rhs_input, const TfLiteEvalTensor* output) { + RuntimeShape orig_out_shape = tflite::micro::GetTensorShape(output); + + *updated_rhs_input = params->adj_y + ? const_cast(original_rhs_input) + : data->reference_op_data.rhs_transposed_tensor; + *updated_lhs_input = params->adj_x + ? data->reference_op_data.lhs_transposed_tensor + : const_cast(original_lhs_input); + + TF_LITE_ENSURE(context, *updated_rhs_input != nullptr); + TF_LITE_ENSURE(context, *updated_lhs_input != nullptr); + if (!params->adj_y) { + // TODO(b/154760341): Constant tensors should already be transposed, but + // we transpose once if necessary for now. + if (!(data->reference_op_data.rhs_is_constant_tensor && + data->reference_op_data.rhs_is_transposed)) { + TransposeRowsColumns(*original_rhs_input, *updated_rhs_input); + data->reference_op_data.rhs_is_transposed = true; + } + } + if (params->adj_x) { + TransposeRowsColumns(*original_lhs_input, *updated_lhs_input); + } + + // Compress BatchMatMul when third from last RHS dimension is one. + int32_t rhs_dims_count = rhs_shape->DimensionsCount(); + int32_t lhs_dims_count = lhs_shape->DimensionsCount(); + int32_t out_dims_count = orig_out_shape.DimensionsCount(); + // Compress ops where rhs shape is [..., 1, X, Y] and lhs shape is + // [..., Q, R, S] which is equivalent to rhs: [..., X, Y] and + // lhs: [..., Q * R, S]. + if (rhs_dims_count > 2 && lhs_dims_count > 2) { + int rhs_one = rhs_shape->DimsData()[rhs_dims_count - 3]; + if (rhs_one == 1) { + int32_t* lhs_dims = lhs_shape->DimsData(); + int32_t* rhs_dims = rhs_shape->DimsData(); + int32_t* out_dims = orig_out_shape.DimsData(); + RuntimeShape tmp_l(lhs_dims_count - 1, lhs_dims); + tmp_l.SetDim(lhs_dims_count - 3, + lhs_dims[lhs_dims_count - 3] * lhs_dims[lhs_dims_count - 2]); + tmp_l.SetDim(lhs_dims_count - 2, lhs_dims[lhs_dims_count - 1]); + lhs_shape->ReplaceWith(tmp_l.DimensionsCount(), tmp_l.DimsData()); + RuntimeShape tmp_r(rhs_dims_count - 1, rhs_shape->DimsData()); + tmp_r.SetDim(rhs_dims_count - 3, rhs_dims[rhs_dims_count - 2]); + tmp_r.SetDim(rhs_dims_count - 2, rhs_dims[rhs_dims_count - 1]); + rhs_shape->ReplaceWith(tmp_r.DimensionsCount(), tmp_r.DimsData()); + rhs_dims_count = rhs_shape->DimensionsCount(); + lhs_dims_count = lhs_shape->DimensionsCount(); + + RuntimeShape tmp_o(out_dims_count - 1, out_dims); + tmp_o.SetDim(out_dims_count - 3, lhs_shape->Dims(lhs_dims_count - 2)); + tmp_o.SetDim(out_dims_count - 2, orig_out_shape.Dims(out_dims_count - 1)); + orig_out_shape.ReplaceWith(tmp_o.DimensionsCount(), tmp_o.DimsData()); + out_dims_count = orig_out_shape.DimensionsCount(); + data->output_shape = + FillVariableShape(out_dims_count, orig_out_shape.DimsData()); + } + } + + if (!params->adj_y) { + RuntimeShape tmp_r = SwapRowColumnDims(*rhs_shape); + rhs_shape->ReplaceWith(tmp_r.DimensionsCount(), tmp_r.DimsData()); + } + // ReferenceOps and CMSIS-NN have different requirements for when the + // lhs shape should be transposed, so we have to treat float differently. + if (!params->adj_x && original_lhs_input->type == kTfLiteFloat32) { + RuntimeShape tmp_l = SwapRowColumnDims(*lhs_shape); + lhs_shape->ReplaceWith(tmp_l.DimensionsCount(), tmp_l.DimsData()); + } else if (params->adj_x && original_lhs_input->type != kTfLiteFloat32) { + RuntimeShape tmp_l = SwapRowColumnDims(*lhs_shape); + lhs_shape->ReplaceWith(tmp_l.DimensionsCount(), tmp_l.DimsData()); + } + + return kTfLiteOk; +} + +TfLiteEvalTensor* AllocInitTransposeTensorFromTfLiteTensor( + TfLiteContext* context, MicroContext* micro_context, + const TfLiteTensor& tensor) { + TfLiteEvalTensor* eval_tensor = static_cast( + micro_context->AllocatePersistentBuffer(sizeof(TfLiteEvalTensor))); + if (eval_tensor == nullptr) { + return nullptr; + } + + eval_tensor->type = tensor.type; + + const int tensor_rank = NumDimensions(&tensor); + const size_t eval_dims_size = TfLiteIntArrayGetSizeInBytes(tensor_rank); + eval_tensor->dims = static_cast( + micro_context->AllocatePersistentBuffer(eval_dims_size)); + if (eval_tensor->dims == nullptr) { + return nullptr; + } + eval_tensor->dims->size = tensor_rank; + for (int i = 0; i < tensor_rank - 2; ++i) { + eval_tensor->dims->data[i] = tensor.dims->data[i]; + } + // Swap last two dimensions. + eval_tensor->dims->data[tensor_rank - 2] = tensor.dims->data[tensor_rank - 1]; + eval_tensor->dims->data[tensor_rank - 1] = tensor.dims->data[tensor_rank - 2]; + + const size_t eval_data_size = static_cast(NumElements(&tensor)) * + TfLiteTypeGetSize(tensor.type); + eval_tensor->data.data = + micro_context->AllocatePersistentBuffer(eval_data_size); + if (eval_tensor->data.data == nullptr) { + return nullptr; + } + + return eval_tensor; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = + static_cast(node->builtin_data); + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* lhs_input = + micro_context->AllocateTempInputTensor(node, kBatchMatmulInputLhsTensor); + TF_LITE_ENSURE(context, lhs_input != nullptr); + TfLiteTensor* rhs_input = + micro_context->AllocateTempInputTensor(node, kBatchMatmulInputRhsTensor); + TF_LITE_ENSURE(context, rhs_input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kBatchMatmulOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, lhs_input->type, rhs_input->type); + TF_LITE_ENSURE_EQ(context, lhs_input->type, output->type); + TF_LITE_ENSURE_MSG(context, + lhs_input->type == kTfLiteFloat32 || + lhs_input->type == kTfLiteInt16 || + lhs_input->type == kTfLiteInt8, + "Input data type not supported"); + + const int lhs_rank = NumDimensions(lhs_input); + const int rhs_rank = NumDimensions(rhs_input); + + TF_LITE_ENSURE(context, lhs_rank >= 2); + TF_LITE_ENSURE(context, lhs_rank <= 4); + TF_LITE_ENSURE(context, rhs_rank >= 2); + TF_LITE_ENSURE(context, rhs_rank <= 4); + + data->reference_op_data.rhs_is_transposed = false; + data->reference_op_data.lhs_is_constant_tensor = IsConstantTensor(lhs_input); + data->reference_op_data.rhs_is_constant_tensor = IsConstantTensor(rhs_input); + + const int output_rank = std::max(lhs_rank, rhs_rank); + TFLITE_DCHECK_GE(output_rank, 2); + TFLITE_DCHECK_LE(output_rank, 4); + + const RuntimeShape extended_lhs_shape = + RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_input)); + const RuntimeShape extended_rhs_shape = + RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_input)); + + // Ensure any batch dimensions obey broacasting rules. + for (int i = 0; i < output_rank - 2; ++i) { + const int lhs_dim = extended_lhs_shape.Dims(i); + const int rhs_dim = extended_rhs_shape.Dims(i); + if (lhs_dim != rhs_dim) { + if (lhs_dim != 1) { + TF_LITE_ENSURE_EQ(context, rhs_dim, 1); + } + } + } + + bool adj_x = params->adj_x; + bool adj_y = params->adj_y; + // Ensure other dimensions work for matrix multiplication. + int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2) + : extended_lhs_shape.Dims(output_rank - 1); + int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1) + : extended_rhs_shape.Dims(output_rank - 2); + + TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs); + + // Tensor for transposed LHS; + if (adj_x) { + data->reference_op_data.lhs_transposed_tensor = + AllocInitTransposeTensorFromTfLiteTensor(context, micro_context, + *lhs_input); + TF_LITE_ENSURE(context, + data->reference_op_data.lhs_transposed_tensor != nullptr); + } + + // If RHS needs to be transposed, then it is actually in the correct shape + // already. + if (!adj_y) { + data->reference_op_data.rhs_transposed_tensor = + AllocInitTransposeTensorFromTfLiteTensor(context, micro_context, + *rhs_input); + TF_LITE_ENSURE(context, + data->reference_op_data.rhs_transposed_tensor != nullptr); + } + + TF_LITE_ENSURE_STATUS(ReshapeOutputTensor(context, node, extended_lhs_shape, + extended_rhs_shape, adj_x, adj_y, + output_rank, output)); + + data->output_shape = FillVariableShape( + output_rank, reinterpret_cast(output->dims->data)); + + int buf_size = 0; + if (lhs_input->type != kTfLiteFloat32 && rhs_input->type != kTfLiteFloat32) { + data->reference_op_data.quantization = + static_castreference_op_data.quantization)>( + micro_context->AllocatePersistentBuffer( + sizeof(*data->reference_op_data.quantization))); + TF_LITE_ENSURE(context, data->reference_op_data.quantization != nullptr); + + double real_multiplier = 0.0; + TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( + context, lhs_input, rhs_input, output, &real_multiplier)); + QuantizeMultiplier(real_multiplier, + &data->reference_op_data.quantization->output_multiplier, + &data->reference_op_data.quantization->output_shift); + + data->reference_op_data.quantization->lhs_zero_point = + lhs_input->params.zero_point; + data->reference_op_data.quantization->rhs_zero_point = + rhs_input->params.zero_point; + data->reference_op_data.quantization->output_zero_point = + output->params.zero_point; + + if (lhs_input->type == kTfLiteInt8) { + data->reference_op_data.quantization->output_activation_min = + std::numeric_limits::min(); + data->reference_op_data.quantization->output_activation_max = + std::numeric_limits::max(); + + data->buffer_idx = -1; + buf_size = arm_fully_connected_s8_get_buffer_size(&data->output_shape); + } else { + data->reference_op_data.quantization->output_activation_min = + std::numeric_limits::min(); + data->reference_op_data.quantization->output_activation_max = + std::numeric_limits::max(); + + TF_LITE_ENSURE_EQ(context, lhs_input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, rhs_input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + } + } + + if (buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buf_size, &data->buffer_idx)); + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(lhs_input); + micro_context->DeallocateTempTfLiteTensor(rhs_input); + + return kTfLiteOk; +} + +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const TfLiteEvalTensor* original_lhs_input = + tflite::micro::GetEvalInput(context, node, kBatchMatmulInputLhsTensor); + const TfLiteEvalTensor* original_rhs_input = + tflite::micro::GetEvalInput(context, node, kBatchMatmulInputRhsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kBatchMatmulOutputTensor); + + OpData& data = *(static_cast(node->user_data)); + const auto* params = + static_cast(node->builtin_data); + + RuntimeShape rhs_shape = tflite::micro::GetTensorShape(original_rhs_input); + RuntimeShape lhs_shape = tflite::micro::GetTensorShape(original_lhs_input); + TfLiteEvalTensor* updated_lhs_input; + TfLiteEvalTensor* updated_rhs_input; + + TF_LITE_ENSURE_STATUS( + PopulateEvalData(context, &data, params, node, original_lhs_input, + &lhs_shape, &updated_lhs_input, original_rhs_input, + &rhs_shape, &updated_rhs_input, output)); + + cmsis_nn_dims rhs_dims = + FillVariableShape(rhs_shape.DimensionsCount(), rhs_shape.DimsData()); + cmsis_nn_dims lhs_dims = + FillVariableShape(lhs_shape.DimensionsCount(), lhs_shape.DimsData()); + + cmsis_nn_per_tensor_quant_params quant_params = { + data.reference_op_data.quantization->output_multiplier, + data.reference_op_data.quantization->output_shift}; + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + if (data.buffer_idx > -1) { + ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); + // Note: ctx.size is currently not used in cmsis_nn. + // The buffer should be allocated in the prepare function through + // the corresponding arm_convolve_wrapper_[type]_get_buffer_size + } + + cmsis_nn_fc_params fc_params; + fc_params.input_offset = -data.reference_op_data.quantization->lhs_zero_point; + fc_params.filter_offset = + -data.reference_op_data.quantization->rhs_zero_point; + fc_params.output_offset = + data.reference_op_data.quantization->output_zero_point; + + cmsis_nn_activation activation; + activation.min = data.reference_op_data.quantization->output_activation_min; + activation.max = data.reference_op_data.quantization->output_activation_max; + fc_params.activation = activation; + + cmsis_nn_bmm_params bmm_params = { + params->adj_x, + params->adj_y, + fc_params, + }; + + TF_LITE_ENSURE_EQ( + context, + arm_batch_matmul_s8( + &ctx, &bmm_params, &quant_params, &lhs_dims, + tflite::micro::GetTensorData(updated_lhs_input), &rhs_dims, + tflite::micro::GetTensorData(updated_rhs_input), + &data.output_shape, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus EvalInt16(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const TfLiteEvalTensor* original_lhs_input = + tflite::micro::GetEvalInput(context, node, kBatchMatmulInputLhsTensor); + const TfLiteEvalTensor* original_rhs_input = + tflite::micro::GetEvalInput(context, node, kBatchMatmulInputRhsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kBatchMatmulOutputTensor); + + OpData& data = *(static_cast(node->user_data)); + const auto* params = + static_cast(node->builtin_data); + + RuntimeShape rhs_shape = tflite::micro::GetTensorShape(original_rhs_input); + RuntimeShape lhs_shape = tflite::micro::GetTensorShape(original_lhs_input); + + // These pointers will be updated to point at the actual tensor being used in + // the batch matmul function + TfLiteEvalTensor* updated_lhs_input; + TfLiteEvalTensor* updated_rhs_input; + + TF_LITE_ENSURE_STATUS( + PopulateEvalData(context, &data, params, node, original_lhs_input, + &lhs_shape, &updated_lhs_input, original_rhs_input, + &rhs_shape, &updated_rhs_input, output)); + + cmsis_nn_dims rhs_dims = + FillVariableShape(rhs_shape.DimensionsCount(), rhs_shape.DimsData()); + cmsis_nn_dims lhs_dims = + FillVariableShape(lhs_shape.DimensionsCount(), lhs_shape.DimsData()); + + cmsis_nn_per_tensor_quant_params quant_params = { + data.reference_op_data.quantization->output_multiplier, + data.reference_op_data.quantization->output_shift}; + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + cmsis_nn_fc_params fc_params; + fc_params.input_offset = -data.reference_op_data.quantization->lhs_zero_point; + fc_params.filter_offset = + -data.reference_op_data.quantization->rhs_zero_point; + fc_params.output_offset = + data.reference_op_data.quantization->output_zero_point; + + cmsis_nn_activation activation; + activation.min = data.reference_op_data.quantization->output_activation_min; + activation.max = data.reference_op_data.quantization->output_activation_max; + fc_params.activation = activation; + + cmsis_nn_bmm_params bmm_params = { + params->adj_x, + params->adj_y, + fc_params, + }; + + TF_LITE_ENSURE_EQ( + context, + arm_batch_matmul_s16( + &ctx, &bmm_params, &quant_params, &lhs_dims, + tflite::micro::GetTensorData(updated_lhs_input), &rhs_dims, + tflite::micro::GetTensorData(updated_rhs_input), + &data.output_shape, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + // Checks in Prepare ensure input, output and filter types are all the same. + TFLITE_DCHECK(node->builtin_data != nullptr); + const TfLiteEvalTensor* original_lhs_input = + tflite::micro::GetEvalInput(context, node, kBatchMatmulInputLhsTensor); + switch (original_lhs_input->type) { + case kTfLiteFloat32: { + const TfLiteEvalTensor* original_rhs_input = tflite::micro::GetEvalInput( + context, node, kBatchMatmulInputRhsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kBatchMatmulOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + OpData& data = *(static_cast(node->user_data)); + const auto* params = + static_cast(node->builtin_data); + + RuntimeShape rhs_shape = + tflite::micro::GetTensorShape(original_rhs_input); + RuntimeShape lhs_shape = + tflite::micro::GetTensorShape(original_lhs_input); + TfLiteEvalTensor* updated_lhs_input; + TfLiteEvalTensor* updated_rhs_input; + + TF_LITE_ENSURE_STATUS( + PopulateEvalData(context, &data, params, node, original_lhs_input, + &lhs_shape, &updated_lhs_input, original_rhs_input, + &rhs_shape, &updated_rhs_input, output)); + + // Note we pass RHS args first, LHS args second. + reference_ops::BatchMatMul( + rhs_shape, tflite::micro::GetTensorData(updated_rhs_input), + lhs_shape, tflite::micro::GetTensorData(updated_lhs_input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } break; + case kTfLiteInt8: + return EvalInt8(context, node); + case kTfLiteInt16: + return EvalInt16(context, node); + default: { + MicroPrintf("CMSIS-NN Batch Matmul: Type %s (%d) not supported.", + TfLiteTypeGetName(original_lhs_input->type), + original_lhs_input->type); + return kTfLiteError; + } + } + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_BATCH_MATMUL() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +TFLMRegistration Register_BATCH_MATMUL_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt8); +} + +TFLMRegistration Register_BATCH_MATMUL_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt16); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/conv.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/conv.cpp new file mode 100644 index 0000000..e425f83 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/conv.cpp @@ -0,0 +1,500 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/kernels/conv.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/conv.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct OpData { + OpDataConv reference_op_data; + + // Index to buffer for optimizations if applicable. + int buffer_idx; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + int32_t buf_size = 0; + const auto& params = + *(static_cast(node->builtin_data)); + OpData* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempOutputTensor(node, kConvBiasTensor); + TfLiteType bias_type = bias != nullptr ? bias->type : kTfLiteNoType; + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG(context, + input->type == kTfLiteFloat32 || + input->type == kTfLiteInt16 || + input->type == kTfLiteInt8, + "Input data type not supported"); + TF_LITE_ENSURE_MSG( + context, + (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && + (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)), + "Hybrid models are not supported on TFLite Micro."); + + // Consistency check tensor dims + // Dimensionality + TF_LITE_ENSURE_EQ(context, input->dims->size, 4); + TF_LITE_ENSURE_EQ(context, filter->dims->size, 4); + TF_LITE_ENSURE_EQ(context, output->dims->size, 4); + // Equal batch size in input and output + TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]); + // Input channels should be an even multiple of filter channels + TF_LITE_ENSURE(context, filter->dims->data[3] > 0); + TF_LITE_ENSURE_EQ(context, input->dims->data[3] % filter->dims->data[3], 0); + // Output channels should be an even multiple of the number of groups + const int groups = input->dims->data[3] / filter->dims->data[3]; + TFLITE_DCHECK_EQ(output->dims->data[3] % groups, 0); + // Bias size equal to output channels + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->dims->size, 4); + const int bias_size = NumElements(bias->dims); + TFLITE_DCHECK_EQ(bias_size, output->dims->data[3]); + } + + // Initialize cmsis_nn dimensions + cmsis_nn_dims input_dims; + input_dims.n = input->dims->data[0]; + input_dims.h = input->dims->data[1]; + input_dims.w = input->dims->data[2]; + input_dims.c = input->dims->data[3]; + + cmsis_nn_dims filter_dims; + filter_dims.n = 1; + filter_dims.h = filter->dims->data[1]; + filter_dims.w = filter->dims->data[2]; + filter_dims.c = filter->dims->data[3]; + + cmsis_nn_dims output_dims; + output_dims.n = output->dims->data[0]; + output_dims.h = output->dims->data[1]; + output_dims.w = output->dims->data[2]; + output_dims.c = output->dims->data[3]; + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->reference_op_data.per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->reference_op_data.per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataConv( + context, node, params, input_dims.w, input_dims.h, filter_dims.w, + filter_dims.h, output_dims.w, output_dims.h, input->type, + &data->reference_op_data)); + + // CMSIS_NN allows INT64 or nullptr bias data pointer + if (input->type == kTfLiteInt8 || + (input->type == kTfLiteInt16 && + (bias_type == kTfLiteInt64 || bias_type == kTfLiteNoType))) { + // Initialize cmsis_nn convolution parameters + cmsis_nn_conv_params conv_params; + conv_params.input_offset = -input->params.zero_point; + conv_params.output_offset = output->params.zero_point; + conv_params.stride.h = params.stride_height; + conv_params.stride.w = params.stride_width; + conv_params.dilation.h = params.dilation_height_factor; + conv_params.dilation.w = params.dilation_width_factor; + conv_params.padding.h = data->reference_op_data.padding.height; + conv_params.padding.w = data->reference_op_data.padding.width; + conv_params.activation.min = data->reference_op_data.output_activation_min; + conv_params.activation.max = data->reference_op_data.output_activation_max; + + if (input->type == kTfLiteInt8) { + buf_size = arm_convolve_wrapper_s8_get_buffer_size( + &conv_params, &input_dims, &filter_dims, &output_dims); + } else if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + buf_size = arm_convolve_wrapper_s16_get_buffer_size( + &conv_params, &input_dims, &filter_dims, &output_dims); + } + + if (buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buf_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + if (bias != nullptr) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + + return kTfLiteOk; +} + +template +arm_cmsis_nn_status convolve_wrapper( + const cmsis_nn_context* ctx, const cmsis_nn_conv_params* conv_params, + const cmsis_nn_per_channel_quant_params* quant_params, + const cmsis_nn_dims* input_dims, const ActType* input, + const cmsis_nn_dims* filter_dims, const int8_t* filter, + const cmsis_nn_dims* bias_dims, const BiasType* bias, + const cmsis_nn_dims* output_dims, ActType* output, WeigthsType weightsT) { + return ARM_CMSIS_NN_ARG_ERROR; +} + +template <> +arm_cmsis_nn_status convolve_wrapper( + const cmsis_nn_context* ctx, const cmsis_nn_conv_params* conv_params, + const cmsis_nn_per_channel_quant_params* quant_params, + const cmsis_nn_dims* input_dims, const int8_t* input, + const cmsis_nn_dims* filter_dims, const int8_t* filter, + const cmsis_nn_dims* bias_dims, const int32_t* bias, + const cmsis_nn_dims* output_dims, int8_t* output, TfLiteType weightsT) { + if (weightsT == kTfLiteInt8) { + return arm_convolve_wrapper_s8(ctx, conv_params, quant_params, input_dims, + input, filter_dims, filter, bias_dims, bias, + output_dims, output); + } else if (weightsT == kTfLiteInt4) { + return arm_convolve_wrapper_s4(ctx, conv_params, quant_params, input_dims, + input, filter_dims, filter, bias_dims, bias, + output_dims, output); + } else { + return ARM_CMSIS_NN_ARG_ERROR; + } +} + +template <> +arm_cmsis_nn_status convolve_wrapper( + const cmsis_nn_context* ctx, const cmsis_nn_conv_params* conv_params, + const cmsis_nn_per_channel_quant_params* quant_params, + const cmsis_nn_dims* input_dims, const int16_t* input, + const cmsis_nn_dims* filter_dims, const int8_t* filter, + const cmsis_nn_dims* bias_dims, const int64_t* bias, + const cmsis_nn_dims* output_dims, int16_t* output, TfLiteType weightsT) { + const cmsis_nn_bias_data bias_data = {bias, false}; + + return arm_convolve_wrapper_s16(ctx, conv_params, quant_params, input_dims, + input, filter_dims, filter, bias_dims, + &bias_data, output_dims, output); +} + +template <> +arm_cmsis_nn_status convolve_wrapper( + const cmsis_nn_context* ctx, const cmsis_nn_conv_params* conv_params, + const cmsis_nn_per_channel_quant_params* quant_params, + const cmsis_nn_dims* input_dims, const int16_t* input, + const cmsis_nn_dims* filter_dims, const int8_t* filter, + const cmsis_nn_dims* bias_dims, const int32_t* bias, + const cmsis_nn_dims* output_dims, int16_t* output, TfLiteType weightsT) { + const cmsis_nn_bias_data bias_data = {bias, true}; + + return arm_convolve_wrapper_s16(ctx, conv_params, quant_params, input_dims, + input, filter_dims, filter, bias_dims, + &bias_data, output_dims, output); +} + +template +TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams& params, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_conv_params conv_params; + conv_params.dilation.h = params.dilation_height_factor; + conv_params.dilation.w = params.dilation_width_factor; + + // Initialize cmsis_nn convolution parameters + conv_params.input_offset = -data.reference_op_data.input_zero_point; + conv_params.output_offset = data.reference_op_data.output_zero_point; + conv_params.stride.h = params.stride_height; + conv_params.stride.w = params.stride_width; + conv_params.padding.h = data.reference_op_data.padding.height; + conv_params.padding.w = data.reference_op_data.padding.width; + conv_params.activation.min = data.reference_op_data.output_activation_min; + conv_params.activation.max = data.reference_op_data.output_activation_max; + + // Initialize cmsis_nn per channel quantization parameters + cmsis_nn_per_channel_quant_params quant_params; + quant_params.multiplier = const_cast( + data.reference_op_data.per_channel_output_multiplier); + quant_params.shift = + const_cast(data.reference_op_data.per_channel_output_shift); + + // Initialize cmsis_nn dimension structs, consistency is checked in the + // prepare stage + cmsis_nn_dims input_dims; + input_dims.n = input->dims->data[0]; + input_dims.h = input->dims->data[1]; + input_dims.w = input->dims->data[2]; + input_dims.c = input->dims->data[3]; + + cmsis_nn_dims filter_dims; + filter_dims.n = 1; + filter_dims.h = filter->dims->data[1]; + filter_dims.w = filter->dims->data[2]; + filter_dims.c = filter->dims->data[3]; + + cmsis_nn_dims bias_dims; + bias_dims.n = 1; + bias_dims.h = 1; + bias_dims.w = 1; + bias_dims.c = output->dims->data[3]; + + cmsis_nn_dims output_dims; + output_dims.n = output->dims->data[0]; + output_dims.h = output->dims->data[1]; + output_dims.w = output->dims->data[2]; + output_dims.c = output->dims->data[3]; + + // Initialize cmsis_nn context + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + if (data.buffer_idx > -1) { + ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); + // Note: ctx.size is currently not used in cmsis_nn. + // The buffer should be allocated in the prepare function through + // the corresponding arm_convolve_wrapper_[type]_get_buffer_size + } + + // arm_convolve_wrapper_[type] dispatches the optimized kernel accordingly + // with the parameters passed + TFLITE_DCHECK_EQ( + convolve_wrapper( + &ctx, &conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output), type), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus EvalInt4(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + return EvalQuantizedPerChannel( + context, node, params, data, input, filter, bias, output); +} + +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + return EvalQuantizedPerChannel( + context, node, params, data, input, filter, bias, output); +} + +TfLiteStatus EvalInt16x8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + if (bias == nullptr || bias->type == kTfLiteInt32) { + return EvalQuantizedPerChannel( + context, node, params, data, input, filter, bias, output); + } else if (bias->type == kTfLiteInt64) { + return EvalQuantizedPerChannel( + context, node, params, data, input, filter, bias, output); + } else { + MicroPrintf("Bias type %s (%d) not supported.", + TfLiteTypeGetName(bias->type), bias->type); + return kTfLiteError; + } +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG( + context, + input->type == filter->type || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && filter->type == kTfLiteInt4), + "Hybrid models are not supported on TFLite Micro."); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { + tflite::reference_ops::Conv( + ConvParamsFloat(params, data.reference_op_data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr); + break; + } + case kTfLiteInt8: { + switch (filter->type) { + case kTfLiteInt4: { + return EvalQuantizedPerChannel( + context, node, params, data, input, filter, bias, output); + } + case kTfLiteInt8: { + return EvalQuantizedPerChannel( + context, node, params, data, input, filter, bias, output); + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + } + break; + } + case kTfLiteInt16: { + if (bias == nullptr || bias->type == kTfLiteInt32) { + return EvalQuantizedPerChannel( + context, node, params, data, input, filter, bias, output); + } else if (bias->type == kTfLiteInt64) { + return EvalQuantizedPerChannel( + context, node, params, data, input, filter, bias, output); + } else { + MicroPrintf("Bias type %s (%d) not supported.", + TfLiteTypeGetName(bias->type), bias->type); + return kTfLiteError; + } + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_CONV_2D() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +TFLMRegistration Register_CONV_2D_INT4() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt4); +} + +TFLMRegistration Register_CONV_2D_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt8); +} + +TFLMRegistration Register_CONV_2D_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt16x8); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cpp new file mode 100644 index 0000000..6cd5f21 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cpp @@ -0,0 +1,527 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/kernels/depthwise_conv.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/conv.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct OpData { + OpDataConv reference_op_data; + + // Index to buffer for optimizations if applicable. + int buffer_idx; +}; + +// Always inline for optimal code size. +void PopulateDwConvParams( + cmsis_nn_dw_conv_params* const dw_conv_params, + cmsis_nn_per_channel_quant_params* const quant_params, + cmsis_nn_dims* const input_dims, cmsis_nn_dims* const filter_dims, + cmsis_nn_dims* const bias_dims, cmsis_nn_dims* const output_dims, + const TfLiteDepthwiseConvParams& params, const OpData& data, + const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) + __attribute__((always_inline)); + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kDepthwiseConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kDepthwiseConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kDepthwiseConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG(context, + input->type == kTfLiteFloat32 || + input->type == kTfLiteInt16 || + input->type == kTfLiteInt8, + "Input data type not supported"); + TF_LITE_ENSURE_MSG( + context, + (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && + (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)), + "Hybrid models are not supported on TFLite Micro."); + + const TfLiteType data_type = input->type; + int input_width = SizeOfDimension(input, 2); + int input_height = SizeOfDimension(input, 1); + int filter_width = SizeOfDimension(filter, 2); + int filter_height = SizeOfDimension(filter, 1); + int output_width = SizeOfDimension(output, 2); + int output_height = SizeOfDimension(output, 1); + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + } + + // All per-channel quantized tensors need valid zero point and scale arrays. + const auto* affine_quantization = + reinterpret_cast( + filter->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->zero_point); + TF_LITE_ENSURE( + context, affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kDepthwiseConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + + // Allocate memory for per-channel quantization parameters + const int num_channels = + filter->dims->data[kDepthwiseConvQuantizedDimension]; + + data->reference_op_data.per_channel_output_multiplier = + reinterpret_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->reference_op_data.per_channel_output_shift = + reinterpret_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, data_type, + &data->reference_op_data)); + + if (input->type == kTfLiteInt8) { + RuntimeShape input_shape = GetTensorShape(input); + RuntimeShape output_shape = GetTensorShape(output); + RuntimeShape filter_shape = GetTensorShape(filter); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int output_depth = MatchingDim(output_shape, 3, filter_shape, 3); + TFLITE_DCHECK_EQ(batch_size, 1); /* Only batch = 1 is supported */ + + cmsis_nn_dims input_dims; + input_dims.n = batch_size; + input_dims.h = input_height; + input_dims.w = input_width; + input_dims.c = input_shape.Dims(3); + + cmsis_nn_dims filter_dims; + filter_dims.n = 1; + filter_dims.h = filter_height; + filter_dims.w = filter_width; + filter_dims.c = output_depth; + + cmsis_nn_dims output_dims; + output_dims.n = batch_size; + output_dims.h = output_height; + output_dims.w = output_width; + output_dims.c = output_depth; + + cmsis_nn_dw_conv_params dw_conv_params; + dw_conv_params.padding.h = data->reference_op_data.padding.height; + dw_conv_params.padding.w = data->reference_op_data.padding.width; + dw_conv_params.dilation.h = params.dilation_height_factor; + dw_conv_params.dilation.w = params.dilation_width_factor; + + int32_t buf_size = 0; + if (filter->type == kTfLiteInt8) { + buf_size = arm_depthwise_conv_wrapper_s8_get_buffer_size( + &dw_conv_params, &input_dims, &filter_dims, &output_dims); + } else if (filter->type == kTfLiteInt4) { + buf_size = arm_depthwise_conv_wrapper_s4_get_buffer_size( + &dw_conv_params, &input_dims, &filter_dims, &output_dims); + } else { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + + if (buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buf_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + + return kTfLiteOk; +} + +inline void PopulateDwConvParams( + cmsis_nn_dw_conv_params* const dw_conv_params, + cmsis_nn_per_channel_quant_params* const quant_params, + cmsis_nn_dims* const input_dims, cmsis_nn_dims* const filter_dims, + cmsis_nn_dims* const bias_dims, cmsis_nn_dims* const output_dims, + const TfLiteDepthwiseConvParams& params, const OpData& data, + const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { + dw_conv_params->dilation.h = params.dilation_height_factor; + dw_conv_params->dilation.w = params.dilation_width_factor; + + dw_conv_params->input_offset = -data.reference_op_data.input_zero_point; + dw_conv_params->output_offset = data.reference_op_data.output_zero_point; + dw_conv_params->stride.h = params.stride_height; + dw_conv_params->stride.w = params.stride_width; + dw_conv_params->padding.h = data.reference_op_data.padding.height; + dw_conv_params->padding.w = data.reference_op_data.padding.width; + + dw_conv_params->activation.min = data.reference_op_data.output_activation_min; + dw_conv_params->activation.max = data.reference_op_data.output_activation_max; + + dw_conv_params->ch_mult = params.depth_multiplier; + + quant_params->multiplier = + data.reference_op_data.per_channel_output_multiplier; + quant_params->shift = data.reference_op_data.per_channel_output_shift; + + RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); + RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); + + TFLITE_DCHECK_LE(dw_conv_params->activation.min, + dw_conv_params->activation.max); + + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); + + if (tflite::micro::GetOptionalTensorData(bias)) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + input_dims->n = batch_size; + input_dims->h = input_shape.Dims(1); + input_dims->w = input_shape.Dims(2); + input_dims->c = input_shape.Dims(3); + + filter_dims->n = filter_shape.Dims(0); + filter_dims->h = filter_shape.Dims(1); + filter_dims->w = filter_shape.Dims(2); + filter_dims->c = output_depth; + + bias_dims->n = 1; + bias_dims->h = 1; + bias_dims->w = 1; + bias_dims->c = output_depth; + + output_dims->n = batch_size; + output_dims->h = output_shape.Dims(1); + output_dims->w = output_shape.Dims(2); + output_dims->c = output_depth; +} + +void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, + const OpData& data, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_dw_conv_params dw_conv_params; + cmsis_nn_per_channel_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + + PopulateDwConvParams(&dw_conv_params, &quant_params, &input_dims, + &filter_dims, &bias_dims, &output_dims, params, data, + input, filter, bias, output); + + cmsis_nn_context ctx; + ctx.buf = nullptr; + /* 'size' is unused */ + ctx.size = 0; + + if (data.buffer_idx > -1) { + ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); + } + + TFLITE_DCHECK_EQ( + arm_depthwise_conv_wrapper_s8( + &ctx, &dw_conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); +} + +void EvalQuantizedPerChannelInt4(TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_dw_conv_params dw_conv_params; + cmsis_nn_per_channel_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + + PopulateDwConvParams(&dw_conv_params, &quant_params, &input_dims, + &filter_dims, &bias_dims, &output_dims, params, data, + input, filter, bias, output); + + cmsis_nn_context ctx; + ctx.buf = nullptr; + /* 'size' is unused */ + ctx.size = 0; + + if (data.buffer_idx > -1) { + ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); + } + + TFLITE_DCHECK_EQ( + arm_depthwise_conv_wrapper_s4( + &ctx, &dw_conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); +} + +void EvalQuantizedPerChannel16x8(TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_dw_conv_params dw_conv_params; + cmsis_nn_per_channel_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + + PopulateDwConvParams(&dw_conv_params, &quant_params, &input_dims, + &filter_dims, &bias_dims, &output_dims, params, data, + input, filter, bias, output); + + cmsis_nn_context ctx; + ctx.buf = nullptr; + /* 'size' is unused */ + ctx.size = 0; + + TFLITE_DCHECK_EQ( + arm_depthwise_conv_s16( + &ctx, &dw_conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { + tflite::reference_ops::DepthwiseConv( + DepthwiseConvParamsFloat(params, data.reference_op_data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt8: + switch (filter->type) { + case kTfLiteInt8: { + EvalQuantizedPerChannel(context, node, params, data, input, filter, + bias, output); + break; + } + case kTfLiteInt4: { + EvalQuantizedPerChannelInt4(context, node, params, data, input, + filter, bias, output); + break; + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + } + break; + case kTfLiteInt16: + EvalQuantizedPerChannel16x8(context, node, params, data, input, filter, + bias, output); + break; + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, + output); + return kTfLiteOk; +} + +TfLiteStatus EvalInt16x8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + EvalQuantizedPerChannel16x8(context, node, params, data, input, filter, bias, + output); + return kTfLiteOk; +} + +TfLiteStatus EvalInt4(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + EvalQuantizedPerChannelInt4(context, node, params, data, input, filter, bias, + output); + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_DEPTHWISE_CONV_2D() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +TFLMRegistration Register_DEPTHWISE_CONV_2D_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt8); +} + +TFLMRegistration Register_DEPTHWISE_CONV_2D_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt16x8); +} + +TFLMRegistration Register_DEPTHWISE_CONV_2D_INT4() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt4); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cpp new file mode 100644 index 0000000..160e54e --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cpp @@ -0,0 +1,568 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/fully_connected.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/fully_connected.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_arena_constants.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct OpData { + OpDataFullyConnected reference_op_data; + + // Index to buffers for optimizations if applicable. + int buffer_conv_1x1_idx; + int buffer_idx; + + int32_t* kernel_sums; + + int32_t batches; + int32_t accum_depth; + int32_t output_depth; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = + static_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kFullyConnectedInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = micro_context->AllocateTempInputTensor( + node, kFullyConnectedWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kFullyConnectedBiasTensor); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor( + node, kFullyConnectedOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG(context, + input->type == kTfLiteFloat32 || + input->type == kTfLiteInt16 || + input->type == kTfLiteInt8, + "Input data type not supported"); + TF_LITE_ENSURE_MSG( + context, + (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && + (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)), + "Hybrid models are not supported on TFLite Micro."); + + const RuntimeShape filter_shape = GetTensorShape(filter); + const RuntimeShape output_shape = GetTensorShape(output); + const int filter_dim_count = filter_shape.DimensionsCount(); + const int output_dim_count = output_shape.DimensionsCount(); + + TFLITE_DCHECK_GE(output_dim_count, 2); + TFLITE_DCHECK_LE(output_dim_count, 4); + + cmsis_nn_dims filter_dims; + filter_dims.n = filter_shape.Dims(filter_dim_count - 1); + filter_dims.h = 1; + filter_dims.w = 1; + filter_dims.c = output_shape.Dims(output_dim_count - 1); + + data->accum_depth = filter_shape.Dims(filter_dim_count - 1); + data->batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); + data->output_depth = output_shape.Dims(output_dim_count - 1); + + // Set buffer index to a reset value + data->buffer_idx = -1; + data->buffer_conv_1x1_idx = -1; + + TF_LITE_ENSURE_STATUS(CalculateOpDataFullyConnected( + context, params->activation, input->type, input, filter, bias, output, + &(data->reference_op_data))); + + // Currently only Int8 is supported for per channel quantization. + TF_LITE_ENSURE( + context, !data->reference_op_data.is_per_channel || + (data->reference_op_data.is_per_channel && + input->type == kTfLiteInt8 && filter->type != kTfLiteInt4)); + + int32_t buf_size = 0; + + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + buf_size = arm_fully_connected_s16_get_buffer_size(&filter_dims); + } else if (input->type == kTfLiteInt8 && filter->type != kTfLiteInt4) { + const bool is_conv_1x1_possible = + output_dim_count > 2 && data->accum_depth % 4 == 0; + + if (is_conv_1x1_possible) { + // In case per tensor quantization we use a scratch buffer to fake + // conv1x1 per channel quantization. + if (!data->reference_op_data.is_per_channel) { + const int total_per_channel_quantization_size = + data->output_depth * sizeof(int32_t) * 2; + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, total_per_channel_quantization_size, + &data->buffer_conv_1x1_idx)); + } + + cmsis_nn_dims input_dims; + input_dims.n = data->batches; + input_dims.h = 1; + input_dims.w = 1; + input_dims.c = data->accum_depth; + buf_size = arm_convolve_1x1_s8_fast_get_buffer_size(&input_dims); + } else if (input->type == kTfLiteInt8) { + buf_size = arm_fully_connected_s8_get_buffer_size(&filter_dims); + + data->kernel_sums = nullptr; + +#if defined(KERNELS_OPTIMIZED_FOR_SPEED) + const int8_t* filter_data = GetTensorData(filter); + + if (buf_size > 0 && filter_data != nullptr) { + const int32_t input_offset = -data->reference_op_data.input_zero_point; + const int32_t filter_offset = + -data->reference_op_data.filter_zero_point; + + data->kernel_sums = static_cast( + context->AllocatePersistentBuffer(context, buf_size)); + + arm_vector_sum_s8(data->kernel_sums, filter_dims.n, data->output_depth, + filter_data, input_offset, filter_offset, + tflite::GetTensorData(bias)); + + // Do not request a scratch buffer since using persistent memory + buf_size = 0; + } +#endif + } + } + + if (buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buf_size, &data->buffer_idx)); + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + if (bias != nullptr) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + + return kTfLiteOk; +} + +void PopulateCommonParams(TfLiteContext* context, + cmsis_nn_per_tensor_quant_params* const quant_params, + cmsis_nn_dims* const input_dims, + cmsis_nn_dims* const filter_dims, + cmsis_nn_dims* const bias_dims, + cmsis_nn_dims* const output_dims, + cmsis_nn_context* const ctx, const OpData& data) { + quant_params->multiplier = data.reference_op_data.output_multiplier; + quant_params->shift = data.reference_op_data.output_shift; + + input_dims->n = data.batches; + input_dims->h = 1; + input_dims->w = 1; + input_dims->c = data.accum_depth; + + filter_dims->n = data.accum_depth; + filter_dims->h = 1; + filter_dims->w = 1; + filter_dims->c = data.output_depth; + + bias_dims->n = 1; + bias_dims->h = 1; + bias_dims->w = 1; + bias_dims->c = data.output_depth; + + output_dims->n = data.batches; + output_dims->h = 1; + output_dims->w = 1; + output_dims->c = data.output_depth; + + ctx->buf = nullptr; + ctx->size = 0; + if (data.buffer_idx > -1) { + ctx->buf = context->GetScratchBuffer(context, data.buffer_idx); + } +} + +TfLiteStatus EvalQuantizedInt4(TfLiteContext* context, TfLiteNode* node, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + const RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + + cmsis_nn_per_tensor_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &quant_params, &input_dims, &filter_dims, + &bias_dims, &output_dims, &ctx, data); + + const int32_t* bias_data = + tflite::micro::GetOptionalTensorData(bias); + + cmsis_nn_fc_params fc_params; + fc_params.input_offset = -data.reference_op_data.input_zero_point; + fc_params.output_offset = data.reference_op_data.output_zero_point; + fc_params.filter_offset = 0; + fc_params.activation.min = data.reference_op_data.output_activation_min; + fc_params.activation.max = data.reference_op_data.output_activation_max; + + TF_LITE_ENSURE_EQ( + context, + arm_fully_connected_s4( + &ctx, &fc_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, bias_data, + &output_dims, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + const RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + const int output_dim_count = output_shape.DimensionsCount(); + + cmsis_nn_per_tensor_quant_params per_tensor_quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &per_tensor_quant_params, &input_dims, + &filter_dims, &bias_dims, &output_dims, &ctx, data); + + const int32_t* bias_data = + tflite::micro::GetOptionalTensorData(bias); + + if (output_dim_count > 2 && data.accum_depth % 4 == 0) { + cmsis_nn_conv_params conv_params; + conv_params.dilation.h = 1; + conv_params.dilation.w = 1; + conv_params.input_offset = -data.reference_op_data.input_zero_point; + conv_params.output_offset = data.reference_op_data.output_zero_point; + conv_params.stride.h = 1; + conv_params.stride.w = 1; + conv_params.padding.h = 0; + conv_params.padding.w = 0; + conv_params.activation.min = data.reference_op_data.output_activation_min; + conv_params.activation.max = data.reference_op_data.output_activation_max; + + cmsis_nn_per_channel_quant_params per_channel_quant_params; + if (data.reference_op_data.is_per_channel) { + per_channel_quant_params.multiplier = + data.reference_op_data.per_channel_output_multiplier; + per_channel_quant_params.shift = + data.reference_op_data.per_channel_output_shift; + } else { + TFLITE_DCHECK_GE(data.buffer_conv_1x1_idx, 4); + per_channel_quant_params.multiplier = static_cast( + context->GetScratchBuffer(context, data.buffer_conv_1x1_idx)); + per_channel_quant_params.shift = + per_channel_quant_params.multiplier + data.output_depth; + + for (int i = 0; i < data.output_depth; i++) { + per_channel_quant_params.multiplier[i] = + per_tensor_quant_params.multiplier; + per_channel_quant_params.shift[i] = per_tensor_quant_params.shift; + } + } + + TF_LITE_ENSURE_EQ( + context, + arm_convolve_1x1_s8_fast( + &ctx, &conv_params, &per_channel_quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, bias_data, + &output_dims, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } else { + cmsis_nn_fc_params fc_params; + fc_params.input_offset = -data.reference_op_data.input_zero_point; + fc_params.filter_offset = -data.reference_op_data.filter_zero_point; + fc_params.output_offset = data.reference_op_data.output_zero_point; + fc_params.activation.min = data.reference_op_data.output_activation_min; + fc_params.activation.max = data.reference_op_data.output_activation_max; + + cmsis_nn_quant_params quant_params; + quant_params.is_per_channel = data.reference_op_data.is_per_channel; + + if (quant_params.is_per_channel) { + quant_params.multiplier = + data.reference_op_data.per_channel_output_multiplier; + quant_params.shift = data.reference_op_data.per_channel_output_shift; + } else { + quant_params.multiplier = &per_tensor_quant_params.multiplier; + quant_params.shift = &per_tensor_quant_params.shift; + } + + if (data.kernel_sums != nullptr) { + ctx.buf = data.kernel_sums; + } else if (ctx.buf != nullptr) { + // If behaving like batch matmul we calculate kernel sums in eval. + arm_vector_sum_s8( + static_cast(ctx.buf), filter_dims.n, data.output_depth, + tflite::micro::GetTensorData(filter), fc_params.input_offset, + fc_params.filter_offset, bias_data); + } + + TF_LITE_ENSURE_EQ( + context, + arm_fully_connected_wrapper_s8( + &ctx, &fc_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, bias_data, + &output_dims, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_per_tensor_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &quant_params, &input_dims, &filter_dims, + &bias_dims, &output_dims, &ctx, data); + + const int64_t* bias_data = + tflite::micro::GetOptionalTensorData(bias); + + cmsis_nn_fc_params fc_params; + fc_params.input_offset = -data.reference_op_data.input_zero_point; + fc_params.output_offset = data.reference_op_data.output_zero_point; + fc_params.filter_offset = 0; + fc_params.activation.min = data.reference_op_data.output_activation_min; + fc_params.activation.max = data.reference_op_data.output_activation_max; + + TF_LITE_ENSURE_EQ( + context, + arm_fully_connected_s16( + &ctx, &fc_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, bias_data, + &output_dims, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto* params = + static_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + switch (input->type) { + case kTfLiteFloat32: { + const float* bias_data = + tflite::micro::GetOptionalTensorData(bias); + tflite::reference_ops::FullyConnected( + FullyConnectedParamsFloat(params->activation), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), bias_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt8: { + switch (filter->type) { + case kTfLiteInt4: + return EvalQuantizedInt4(context, node, data, input, filter, bias, + output); + case kTfLiteInt8: + return EvalQuantizedInt8(context, node, data, input, filter, bias, + output); + default: + MicroPrintf("Filter Type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + break; + } + case kTfLiteInt16: { + return EvalQuantizedInt16(context, node, data, input, filter, bias, + output); + } + default: { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + } + return kTfLiteOk; +} + +TfLiteStatus EvalInt4(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + if (input->type != kTfLiteInt8 && filter->type != kTfLiteInt4) { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + return EvalQuantizedInt4(context, node, data, input, filter, bias, output); +} + +// Note that the current function names are not ideal at all (this EvalInt8 +// function internally calls EvalQuantizedInt8, and there is similar name +// aliasing in the Eval function too). We will be attempting to have a more +// descriptive naming convention but holding off on that for now, since the +// renaming might be coupled with reducing code duplication and some additional +// refactoring. +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + if (input->type != kTfLiteInt8) { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + return EvalQuantizedInt8(context, node, data, input, filter, bias, output); +} + +TfLiteStatus EvalInt16(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + if (input->type != kTfLiteInt16) { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + return EvalQuantizedInt16(context, node, data, input, filter, bias, output); +} + +} // namespace + +TFLMRegistration Register_FULLY_CONNECTED() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +TFLMRegistration Register_FULLY_CONNECTED_INT4() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt4); +} + +TFLMRegistration Register_FULLY_CONNECTED_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt8); +} + +TFLMRegistration Register_FULLY_CONNECTED_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt16); +} + +TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED() { + return tflite::micro::RegisterOp(Eval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/mul.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/mul.cpp new file mode 100644 index 0000000..ed8ee7f --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/mul.cpp @@ -0,0 +1,184 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/mul.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/mul.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +void EvalQuantized(TfLiteContext* context, TfLiteNode* node, + const OpDataMul* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + + op_params.quantized_activation_min = data->output_activation_min; + op_params.quantized_activation_max = data->output_activation_max; + op_params.float_activation_max = data->output_activation_max_f32; + op_params.input1_offset = -data->input1_zero_point; + op_params.input2_offset = -data->input2_zero_point; + op_params.output_offset = data->output_zero_point; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + if (input1->type == kTfLiteInt8) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else if (input1->type == kTfLiteInt16) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + + } else { + if (input1->type == kTfLiteInt8) { + arm_elementwise_mul_s8( + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input2), op_params.input1_offset, + op_params.input2_offset, tflite::micro::GetTensorData(output), + op_params.output_offset, op_params.output_multiplier, + op_params.output_shift, op_params.quantized_activation_min, + op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } else if (input1->type == kTfLiteInt16) { + arm_elementwise_mul_s16( + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input2), + op_params.input1_offset, op_params.input2_offset, + tflite::micro::GetTensorData(output), + op_params.output_offset, op_params.output_multiplier, + op_params.output_shift, op_params.quantized_activation_min, + op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } + } +} + +} // namespace + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataMul* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kMulInput1Tensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kMulInput2Tensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kMulOutputTensor); + + switch (input1->type) { + case kTfLiteInt8: + EvalQuantized(context, node, data, input1, input2, output); + break; + case kTfLiteInt16: + EvalQuantized(context, node, data, input1, input2, output); + break; + case kTfLiteInt32: + EvalMulQuantizedReference(context, node, data, input1, input2, output); + break; + case kTfLiteFloat32: + EvalMulFloatReference(context, node, params, data, input1, input2, + output); + break; + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + TFLITE_DCHECK(node->user_data != nullptr); + + const OpDataMul* data = static_cast(node->user_data); + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kMulInput1Tensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kMulInput2Tensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kMulOutputTensor); + TFLITE_DCHECK(input1->type == kTfLiteInt8); + + EvalQuantized(context, node, data, input1, input2, output); + + return kTfLiteOk; +} + +TfLiteStatus EvalInt16(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + TFLITE_DCHECK(node->user_data != nullptr); + + const OpDataMul* data = static_cast(node->user_data); + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kMulInput1Tensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kMulInput2Tensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kMulOutputTensor); + TFLITE_DCHECK(input1->type == kTfLiteInt16); + + EvalQuantized(context, node, data, input1, input2, output); + + return kTfLiteOk; +} + +TFLMRegistration Register_MUL() { + return tflite::micro::RegisterOp(MulInit, MulPrepare, Eval); +} + +TFLMRegistration Register_MUL_INT8() { + return tflite::micro::RegisterOp(MulInit, MulPrepare, EvalInt8); +} + +TFLMRegistration Register_MUL_INT16() { + return tflite::micro::RegisterOp(MulInit, MulPrepare, EvalInt16); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/pooling.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/pooling.cpp new file mode 100644 index 0000000..81aefa8 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/pooling.cpp @@ -0,0 +1,346 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/pooling.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/pooling.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +struct OpData { + OpDataPooling reference_op_data; + + // Index to buffer for optimizations if applicable. + int buffer_idx; +}; + +void PopulateCommonParams( + TfLiteContext* const context, cmsis_nn_dims* const input_dims, + cmsis_nn_dims* const output_dims, cmsis_nn_pool_params* const pool_params, + cmsis_nn_context* const ctx, cmsis_nn_dims* const filter_dims, + const OpData& data, const RuntimeShape& input_shape, + const RuntimeShape& output_shape, const TfLitePoolParams* params) { + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + + input_dims->n = 1; + input_dims->h = input_shape.Dims(1); + input_dims->w = input_shape.Dims(2); + input_dims->c = depth; + + output_dims->n = 1; + output_dims->h = output_shape.Dims(1); + output_dims->w = output_shape.Dims(2); + output_dims->c = depth; + + pool_params->stride.h = params->stride_height; + pool_params->stride.w = params->stride_width; + pool_params->padding.h = data.reference_op_data.padding.height; + pool_params->padding.w = data.reference_op_data.padding.width; + pool_params->activation.min = data.reference_op_data.activation_min; + pool_params->activation.max = data.reference_op_data.activation_max; + + filter_dims->n = 1; + filter_dims->h = params->filter_height; + filter_dims->w = params->filter_width; + filter_dims->c = 1; + ctx->buf = nullptr; + ctx->size = 0; + if (data.buffer_idx > -1) { + ctx->buf = context->GetScratchBuffer(context, data.buffer_idx); + } +} + +void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, const OpData& data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + TFLITE_DCHECK((input->type == kTfLiteInt8) || (input->type == kTfLiteInt16)); + + RuntimeShape input_shape = micro::GetTensorShape(input); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + + RuntimeShape output_shape = micro::GetTensorShape(output); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + cmsis_nn_dims input_dims; + cmsis_nn_dims output_dims; + cmsis_nn_pool_params pool_params; + cmsis_nn_dims filter_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &input_dims, &output_dims, &pool_params, &ctx, + &filter_dims, data, input_shape, output_shape, params); + + if (input->type == kTfLiteInt8) { + TFLITE_DCHECK_EQ( + arm_avgpool_s8(&ctx, &pool_params, &input_dims, + micro::GetTensorData(input), &filter_dims, + &output_dims, micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } else { + TFLITE_DCHECK_EQ( + arm_avgpool_s16(&ctx, &pool_params, &input_dims, + micro::GetTensorData(input), &filter_dims, + &output_dims, micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } +} + +TfLiteStatus MaxEvalQuantized(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, + const OpData& data, const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + TFLITE_DCHECK((input->type == kTfLiteInt8) || (input->type == kTfLiteInt16)); + + RuntimeShape input_shape = micro::GetTensorShape(input); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + + RuntimeShape output_shape = micro::GetTensorShape(output); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + cmsis_nn_dims input_dims; + cmsis_nn_dims output_dims; + cmsis_nn_pool_params pool_params; + cmsis_nn_dims filter_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &input_dims, &output_dims, &pool_params, &ctx, + &filter_dims, data, input_shape, output_shape, params); + + if (input->type == kTfLiteInt8) { + TFLITE_DCHECK_EQ( + arm_max_pool_s8(&ctx, &pool_params, &input_dims, + micro::GetTensorData(input), &filter_dims, + &output_dims, micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } else { + TFLITE_DCHECK_EQ( + arm_max_pool_s16(&ctx, &pool_params, &input_dims, + micro::GetTensorData(input), &filter_dims, + &output_dims, micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } + + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus MaxPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_STATUS(PoolingPrepare(context, node)); + // Set buffer index to a reset value + static_cast(node->user_data)->buffer_idx = -1; + return kTfLiteOk; +} + +TfLiteStatus AveragePrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_STATUS(PoolingPrepare(context, node)); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kPoolingInputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kPoolingOutputTensor); + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + RuntimeShape input_shape = GetTensorShape(input); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + + RuntimeShape output_shape = GetTensorShape(output); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int output_width = output_shape.Dims(2); + + const int32_t buffer_size = + input->type == kTfLiteInt16 + ? arm_avgpool_s16_get_buffer_size(output_width, depth) + : arm_avgpool_s8_get_buffer_size(output_width, depth); + + auto* data = static_cast(node->user_data); + if (buffer_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buffer_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + return kTfLiteOk; +} + +TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + // Inputs and outputs share the same type, guaranteed by the converter. + if (input->type == kTfLiteFloat32) { + AveragePoolingEvalFloat(context, node, params, &data.reference_op_data, + input, output); + } else if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + AverageEvalQuantized(context, node, params, data, input, output); + } else { + MicroPrintf("Input type %s is not currently supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus AverageEvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TFLITE_DCHECK(input->type == kTfLiteInt8); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + AverageEvalQuantized(context, node, params, data, input, output); + + return kTfLiteOk; +} + +TfLiteStatus AverageEvalInt16(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TFLITE_DCHECK(input->type == kTfLiteInt16); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + AverageEvalQuantized(context, node, params, data, input, output); + + return kTfLiteOk; +} +TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + if (input->type == kTfLiteFloat32) { + MaxPoolingEvalFloat(context, node, params, &data.reference_op_data, input, + output); + } else if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + MaxEvalQuantized(context, node, params, data, input, output); + } else { + MicroPrintf("Input type %s is not currently supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus MaxEvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TFLITE_DCHECK(input->type == kTfLiteInt8); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + MaxEvalQuantized(context, node, params, data, input, output); + return kTfLiteOk; +} + +TfLiteStatus MaxEvalInt16(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TFLITE_DCHECK(input->type == kTfLiteInt16); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + MaxEvalQuantized(context, node, params, data, input, output); + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_AVERAGE_POOL_2D_INT8() { + return tflite::micro::RegisterOp(Init, AveragePrepare, AverageEvalInt8); +} + +TFLMRegistration Register_AVERAGE_POOL_2D_INT16() { + return tflite::micro::RegisterOp(Init, AveragePrepare, AverageEvalInt16); +} + +TFLMRegistration Register_AVERAGE_POOL_2D() { + return tflite::micro::RegisterOp(Init, AveragePrepare, AverageEval); +} + +TFLMRegistration Register_MAX_POOL_2D_INT8() { + return tflite::micro::RegisterOp(Init, MaxPrepare, MaxEvalInt8); +} + +TFLMRegistration Register_MAX_POOL_2D_INT16() { + return tflite::micro::RegisterOp(Init, MaxPrepare, MaxEvalInt16); +} + +TFLMRegistration Register_MAX_POOL_2D() { + return tflite::micro::RegisterOp(Init, MaxPrepare, MaxEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/softmax.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/softmax.cpp new file mode 100644 index 0000000..35f935f --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/softmax.cpp @@ -0,0 +1,220 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/kernels/softmax.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/softmax.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct CMSISNNSoftmaxParams { + SoftmaxParams softmax_params; + int32_t num_rows; + int32_t row_size; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, + sizeof(CMSISNNSoftmaxParams)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_MSG( + context, + input->type == output->type || + (input->type == kTfLiteInt8 && output->type == kTfLiteInt16), + "Input and output data types are not supported together."); + TF_LITE_ENSURE_MSG(context, + input->type == kTfLiteFloat32 || + input->type == kTfLiteInt16 || + input->type == kTfLiteInt8, + "Input data type not supported"); + + TF_LITE_ENSURE(context, node->user_data != nullptr); + CMSISNNSoftmaxParams* op_data = + static_cast(node->user_data); + + auto* params = static_cast(node->builtin_data); + auto ret_val = CalculateSoftmaxParams(context, input, output, params, + &op_data->softmax_params); + + const auto input_shape = GetTensorShape(input); + const auto output_shape = GetTensorShape(output); + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int outer_size = + MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int depth = + MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + op_data->num_rows = outer_size; + op_data->row_size = depth; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return ret_val; +} + +TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + const CMSISNNSoftmaxParams op_data = + *static_cast(node->user_data); + + switch (input->type) { + case kTfLiteFloat32: { + tflite::reference_ops::Softmax( + op_data.softmax_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + case kTfLiteInt8: { + if (output->type == kTfLiteInt8) { + arm_softmax_s8(tflite::micro::GetTensorData(input), + op_data.num_rows, op_data.row_size, + op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, + op_data.softmax_params.diff_min, + tflite::micro::GetTensorData(output)); + } else { + arm_softmax_s8_s16(tflite::micro::GetTensorData(input), + op_data.num_rows, op_data.row_size, + op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, + op_data.softmax_params.diff_min, + tflite::micro::GetTensorData(output)); + } + return kTfLiteOk; + } + case kTfLiteInt16: { + const cmsis_nn_softmax_lut_s16 softmax_params = { + .exp_lut = op_data.softmax_params.exp_lut, + .one_by_one_lut = op_data.softmax_params.one_over_one_plus_x_lut}; + + TFLITE_DCHECK_EQ( + arm_softmax_s16( + tflite::micro::GetTensorData(input), op_data.num_rows, + op_data.row_size, op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, &softmax_params, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + return kTfLiteOk; + } + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } +} + +TfLiteStatus SoftmaxEvalInt8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + const CMSISNNSoftmaxParams op_data = + *static_cast(node->user_data); + + arm_softmax_s8(tflite::micro::GetTensorData(input), op_data.num_rows, + op_data.row_size, op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, + op_data.softmax_params.diff_min, + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; +} + +TfLiteStatus SoftmaxEvalInt8_Int16(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + const CMSISNNSoftmaxParams op_data = + *static_cast(node->user_data); + + arm_softmax_s8_s16( + tflite::micro::GetTensorData(input), op_data.num_rows, + op_data.row_size, op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, op_data.softmax_params.diff_min, + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; +} + +TfLiteStatus SoftmaxEvalInt16(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + const CMSISNNSoftmaxParams op_data = + *static_cast(node->user_data); + + const cmsis_nn_softmax_lut_s16 softmax_params = { + .exp_lut = op_data.softmax_params.exp_lut, + .one_by_one_lut = op_data.softmax_params.one_over_one_plus_x_lut}; + + TFLITE_DCHECK_EQ( + arm_softmax_s16(tflite::micro::GetTensorData(input), + op_data.num_rows, op_data.row_size, + op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, &softmax_params, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_SOFTMAX() { + return tflite::micro::RegisterOp(Init, Prepare, SoftmaxEval); +} + +TFLMRegistration Register_SOFTMAX_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, SoftmaxEvalInt8); +} + +TFLMRegistration Register_SOFTMAX_INT8_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, SoftmaxEvalInt8_Int16); +} + +TFLMRegistration Register_SOFTMAX_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, SoftmaxEvalInt16); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/svdf.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/svdf.cpp new file mode 100644 index 0000000..404b659 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/svdf.cpp @@ -0,0 +1,445 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/kernels/svdf.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/activation_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +struct CmsisNnOpDataSvdf { + int32_t effective_scale_1_a; + int32_t effective_scale_2_a; + // b versions of each scale are kept at int since the numbers are just the + // shift value - typically between [-32, 32]. + int effective_scale_1_b; + int effective_scale_2_b; + int scratch_tensor_index; +#if defined(KERNELS_OPTIMIZED_FOR_SIZE) + int scratch_weight_tensor_index; +#endif + int scratch_output_tensor_index; + + // Cached tensor zero point values for quantized operations. + int input_zero_point; + int output_zero_point; + int activation_state_zero_point; + int32_t* kernel_sums; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(CmsisNnOpDataSvdf)); +} + +TfLiteStatus CmsisNnPrepareSvdf(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto* params = static_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + + // Validate Tensor Inputs (dtype depends on quantization): + // [0] = Input, {2, batch_size, input_size} + // [1] = Weights Feature, {2, num_filters, input_size} + // [2] = Weights Time, {2, num_filters, memory_size} + // [3] = Bias (optional), {1, num_units} + // [4] = Activation State (variable), + // {2, batch_size, memory_size * num_filters} + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kSvdfInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* weights_feature = + micro_context->AllocateTempInputTensor(node, kSvdfWeightsFeatureTensor); + TF_LITE_ENSURE(context, weights_feature != nullptr); + TfLiteTensor* weights_time = + micro_context->AllocateTempInputTensor(node, kSvdfWeightsTimeTensor); + TF_LITE_ENSURE(context, weights_time != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kSvdfBiasTensor); + TfLiteTensor* activation_state = micro_context->AllocateTempInputTensor( + node, kSvdfInputActivationStateTensor); + TF_LITE_ENSURE(context, activation_state != nullptr); + + // Define input constants based on input tensor definition above: + const int rank = params->rank; + const int input_size = input->dims->data[1]; + const int batch_size = input->dims->data[0]; + const int num_filters = weights_feature->dims->data[0]; + TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); + const int num_units = num_filters / rank; + const int memory_size = weights_time->dims->data[1]; + + // Validate Input Tensor: + TF_LITE_ENSURE(context, + input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); + + // Validate Tensor Output: + // [0] = float/int8_t, {2, batch_size, num_units} + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kSvdfOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2); + TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size); + TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units); + + // Validate Weights Feature Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2); + TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size); + + // Validate Weights Time Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2); + TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters); + TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size); + + // Validate Optional Bias Input Tensor: + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); + } + + // Validate Activation State Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2); + TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size); + TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1], + memory_size * num_filters); + // Since is_variable is not part of TFLiteEvalTensor, check is_variable here. + TF_LITE_ENSURE_EQ(context, activation_state->is_variable, true); + + TF_LITE_ENSURE_EQ(context, node->inputs->size, 5); + + TFLITE_DCHECK(node->user_data != nullptr); + CmsisNnOpDataSvdf* data = static_cast(node->user_data); + + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8); + TF_LITE_ENSURE(context, (weights_time->type == kTfLiteInt16) || + (weights_time->type == kTfLiteInt8)); + TF_LITE_ENSURE(context, (activation_state->type == kTfLiteInt16) || + (activation_state->type == kTfLiteInt8)); + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); + } + + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); + + const double effective_scale_1 = static_cast( + input->params.scale * weights_feature->params.scale / + activation_state->params.scale); + const double effective_scale_2 = + static_cast(activation_state->params.scale * + weights_time->params.scale / output->params.scale); + + // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready. + // TODO(#1751): account for optional bias tensor + TF_LITE_ENSURE( + context, + std::abs(static_cast(bias->params.scale) - + static_cast(activation_state->params.scale * + weights_time->params.scale)) < 1e-5); + + QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a), + &(data->effective_scale_1_b)); + QuantizeMultiplier(effective_scale_2, &(data->effective_scale_2_a), + &(data->effective_scale_2_b)); + + data->input_zero_point = input->params.zero_point; + data->output_zero_point = output->params.zero_point; + data->activation_state_zero_point = activation_state->params.zero_point; + + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + + const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( + context, batch_size * num_filters * sizeof(int32_t), + &(data->scratch_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_status); + + const TfLiteStatus scratch_output_status = + context->RequestScratchBufferInArena( + context, batch_size * num_units * sizeof(int32_t), + &(data->scratch_output_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_output_status); + + cmsis_nn_dims weights_feature_dims; + weights_feature_dims.n = num_filters; + weights_feature_dims.h = input_size; + + const int32_t buf_size = arm_svdf_s8_get_buffer_size(&weights_feature_dims); + + if (buf_size > 0) { +#if defined(KERNELS_OPTIMIZED_FOR_SPEED) + data->kernel_sums = static_cast( + context->AllocatePersistentBuffer(context, buf_size)); + + arm_vector_sum_s8(data->kernel_sums, input_size, num_filters, + GetTensorData(weights_feature), + -data->input_zero_point, + -data->activation_state_zero_point, nullptr); +#elif defined(KERNELS_OPTIMIZED_FOR_SIZE) + const TfLiteStatus scratch_kernel_status = + context->RequestScratchBufferInArena( + context, buf_size, &(data->scratch_weight_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_kernel_status); +#else + MicroPrintf( + "Either KERNELS_OPTIMIZED_FOR_SIZE or KERNELS_OPTIMIZED_FOR_SPEED " + "must be defined"); + return kTfLiteError; +#endif + } + + } else { + TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32); + TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32); + TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32); + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); + } + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); + + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( + context, batch_size * num_filters * sizeof(float), + &(data->scratch_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_status); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(weights_feature); + micro_context->DeallocateTempTfLiteTensor(weights_time); + micro_context->DeallocateTempTfLiteTensor(activation_state); + micro_context->DeallocateTempTfLiteTensor(output); + // TODO(#1751): account for optional bias tensor + micro_context->DeallocateTempTfLiteTensor(bias); + return kTfLiteOk; +} + +TfLiteStatus EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const CmsisNnOpDataSvdf& data) { + cmsis_nn_dims input_dims; + input_dims.n = input_tensor->dims->data[0]; + input_dims.h = input_tensor->dims->data[1]; + + cmsis_nn_dims weights_feature_dims; + weights_feature_dims.n = weights_feature_tensor->dims->data[0]; + weights_feature_dims.h = weights_feature_tensor->dims->data[1]; + + cmsis_nn_dims weights_time_dims; + weights_time_dims.n = weights_time_tensor->dims->data[0]; + weights_time_dims.h = weights_time_tensor->dims->data[1]; + + cmsis_nn_dims bias_dims; + bias_dims.n = bias_tensor->dims->data[0]; + + cmsis_nn_dims state_dims; + state_dims.n = bias_tensor->dims->data[0]; + state_dims.h = bias_tensor->dims->data[1]; + + cmsis_nn_dims output_dims; + output_dims.n = output_tensor->dims->data[0]; + output_dims.h = output_tensor->dims->data[1]; + + cmsis_nn_svdf_params svdf_params; + svdf_params.rank = params->rank; + svdf_params.input_offset = data.input_zero_point; + svdf_params.output_offset = data.output_zero_point; + + svdf_params.input_activation.min = INT16_MIN; + svdf_params.input_activation.max = INT16_MAX; + + svdf_params.output_activation.min = INT8_MIN; + svdf_params.output_activation.max = INT8_MAX; + + cmsis_nn_per_tensor_quant_params in_quant_params; + in_quant_params.multiplier = data.effective_scale_1_a; + in_quant_params.shift = data.effective_scale_1_b; + + cmsis_nn_per_tensor_quant_params out_quant_params; + out_quant_params.multiplier = data.effective_scale_2_a; + out_quant_params.shift = data.effective_scale_2_b; + + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(context->GetScratchBuffer != nullptr); + + cmsis_nn_context scratch_ctx; + scratch_ctx.buf = static_cast( + context->GetScratchBuffer(context, data.scratch_tensor_index)); + + cmsis_nn_context scratch_output_ctx; + scratch_output_ctx.buf = static_cast( + context->GetScratchBuffer(context, data.scratch_output_tensor_index)); + + int8_t* output_data = tflite::micro::GetTensorData(output_tensor); + + switch (weights_time_tensor->type) { + case kTfLiteInt8: { + cmsis_nn_context ctx; + +#if defined(KERNELS_OPTIMIZED_FOR_SPEED) + ctx.buf = data.kernel_sums; +#elif defined(KERNELS_OPTIMIZED_FOR_SIZE) + ctx.buf = static_cast( + context->GetScratchBuffer(context, data.scratch_weight_tensor_index)); + + const int input_size = input_tensor->dims->data[1]; + const int num_filters = weights_feature_tensor->dims->data[0]; + + arm_vector_sum_s8( + static_cast(ctx.buf), input_size, num_filters, + tflite::micro::GetTensorData(weights_feature_tensor), + -data.input_zero_point, -data.activation_state_zero_point, nullptr); +#endif + + arm_svdf_s8( + &ctx, &scratch_ctx, &scratch_output_ctx, &svdf_params, + &in_quant_params, &out_quant_params, &input_dims, + tflite::micro::GetTensorData(input_tensor), &state_dims, + tflite::micro::GetTensorData(activation_state_tensor), + &weights_feature_dims, + tflite::micro::GetTensorData(weights_feature_tensor), + &weights_time_dims, + tflite::micro::GetTensorData(weights_time_tensor), &bias_dims, + tflite::micro::GetTensorData(bias_tensor), &output_dims, + output_data); + return kTfLiteOk; + } + + case kTfLiteInt16: { + arm_svdf_state_s16_s8( + &scratch_ctx, &scratch_output_ctx, &svdf_params, &in_quant_params, + &out_quant_params, &input_dims, + tflite::micro::GetTensorData(input_tensor), &state_dims, + tflite::micro::GetTensorData(activation_state_tensor), + &weights_feature_dims, + tflite::micro::GetTensorData(weights_feature_tensor), + &weights_time_dims, + tflite::micro::GetTensorData(weights_time_tensor), + &bias_dims, tflite::micro::GetTensorData(bias_tensor), + &output_dims, output_data); + return kTfLiteOk; + } + + default: + MicroPrintf("Could not find matching function for type %s.", + TfLiteTypeGetName(weights_time_tensor->type)); + return kTfLiteError; + } +} + +TfLiteStatus EvalSvdf(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + TFLITE_DCHECK(node->user_data != nullptr); + const CmsisNnOpDataSvdf& data = + *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kSvdfInputTensor); + const TfLiteEvalTensor* weights_feature = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsFeatureTensor); + const TfLiteEvalTensor* weights_time = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsTimeTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 5) + ? tflite::micro::GetEvalInput(context, node, kSvdfBiasTensor) + : nullptr; + TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( + context, node, kSvdfInputActivationStateTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kSvdfOutputTensor); + + switch (weights_time->type) { + case kTfLiteFloat32: { + EvalFloatSvdfReference( + context, node, input, weights_feature, weights_time, bias, params, + data.scratch_tensor_index, activation_state, output); + return kTfLiteOk; + } + + case kTfLiteInt8: + case kTfLiteInt16: { + return EvalIntegerSVDF(context, node, input, weights_feature, + weights_time, bias, params, activation_state, + output, data); + } + + default: + MicroPrintf("Type %s not currently supported.", + TfLiteTypeGetName(weights_feature->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus EvalSvdfInt8(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + TFLITE_DCHECK(node->user_data != nullptr); + const CmsisNnOpDataSvdf& data = + *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kSvdfInputTensor); + const TfLiteEvalTensor* weights_feature = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsFeatureTensor); + const TfLiteEvalTensor* weights_time = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsTimeTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 5) + ? tflite::micro::GetEvalInput(context, node, kSvdfBiasTensor) + : nullptr; + TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( + context, node, kSvdfInputActivationStateTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kSvdfOutputTensor); + + TFLITE_DCHECK((weights_time->type == kTfLiteInt8) || + (weights_time->type == kTfLiteInt16)); + // Because of the TODO mentioned below, the int16 weight data type is not + // split into a separate registration. + // TODO(#523): remove 16-bit code when no longer needed. + return EvalIntegerSVDF(context, node, input, weights_feature, weights_time, + bias, params, activation_state, output, data); +} + +} // namespace + +TFLMRegistration Register_SVDF() { + return tflite::micro::RegisterOp(Init, CmsisNnPrepareSvdf, EvalSvdf); +} + +TFLMRegistration Register_SVDF_INT8() { + return tflite::micro::RegisterOp(Init, CmsisNnPrepareSvdf, EvalSvdfInt8); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/transpose_conv.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/transpose_conv.cpp new file mode 100644 index 0000000..121333b --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/transpose_conv.cpp @@ -0,0 +1,515 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/kernels/transpose_conv.h" + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h" +#include "tensorflow/lite/kernels/internal/reference/transpose_conv.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +// For the TfLite transpose_conv implementation, input tensor 0 corresponds to +// the OutputShapeTensor. However, since TFLM does not support dynamic tensors, +// the TFLM implementation ignores input tensor 0 and the only inputs we care +// about are kFilterTensor, kInputTensor and kBiasTensor. +constexpr int kFilterTensor = 1; +constexpr int kInputTensor = 2; +constexpr int kBiasTensor = 3; +constexpr int kOutputTensor = 0; + +// Conv is quantized along dimension 0: +// https://www.tensorflow.org/lite/performance/quantization_spec +constexpr int kConvQuantizedDimension = 0; + +struct OpData { + ConvParams params; + + // Scratch buffers are required for quantized implementations. + int scratch_buffer_index; + int scratch_buffer_output_index; + + // TODO(b/192090531): Remove this once all 8x16 transpose conv models use + // 64-bit biases. + int bias_converted_buffer_index; + + // Multiplier and shift arrays are required for the int8 implementation. + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; +}; + +inline PaddingType RuntimePaddingType(TfLitePadding padding) { + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, + const TfLiteTransposeConvParams* params, int width, + int height, int filter_width, int filter_height, + const TfLiteType data_type, OpData* data) { + bool has_bias = node->inputs->size == 4; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 3); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params->padding; + int pad_output_width; + int pad_output_height; + + TfLitePaddingValues padding_values = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, 1, + 1, // Dilation height and width are always 1 for transpose_conv. + height, width, filter_height, filter_width, padding, &pad_output_height, + &pad_output_width); + + data->params.padding_type = RuntimePaddingType(padding); + data->params.padding_values.width = padding_values.width; + data->params.padding_values.height = padding_values.height; + data->params.padding_values.width_offset = + padding_values.width_offset + padding_values.width; + data->params.padding_values.height_offset = + padding_values.height_offset + padding_values.height; + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. + if (data_type != kTfLiteFloat32) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kFilterTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + int output_channels = filter->dims->data[kConvQuantizedDimension]; + + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, kTfLiteActNone, + &data->params.output_multiplier, &data->params.output_shift, + &data->params.quantized_activation_min, + &data->params.quantized_activation_max, + data->per_channel_output_multiplier, data->per_channel_output_shift, + output_channels)); + + // TODO(b/192090531): Remove this once all 8x16 transpose conv models use + // 64-bit biases. + if (input->type == kTfLiteInt16) { + TFLITE_DCHECK(filter->type == kTfLiteInt8); + TFLITE_DCHECK(output->type == kTfLiteInt16); + if (bias->type == kTfLiteInt16) { + TFLITE_DCHECK( + context->RequestScratchBufferInArena( + context, GetTensorShape(bias).FlatSize() * sizeof(std::int64_t), + &(data->bias_converted_buffer_index)) == kTfLiteOk); + } + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(output); + if (bias != nullptr) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + } + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = + static_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kFilterTensor); + TF_LITE_ENSURE(context, filter != nullptr); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG(context, + input->type == kTfLiteFloat32 || + input->type == kTfLiteInt16 || + input->type == kTfLiteInt8, + "Input data type not supported"); + TF_LITE_ENSURE_MSG( + context, + (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && filter->type == kTfLiteInt8), + "Hybrid models are not supported on TFLite Micro."); + + // Get height and width of the output. + const int width = SizeOfDimension(output, 2); + const int height = SizeOfDimension(output, 1); + const int filter_width = SizeOfDimension(filter, 2); + const int filter_height = SizeOfDimension(filter, 1); + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + if (input->type == kTfLiteInt8) { + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + + RuntimeShape input_shape = GetTensorShape(input); + RuntimeShape output_shape = GetTensorShape(output); + RuntimeShape filter_shape = GetTensorShape(filter); + + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + + cmsis_nn_dims output_dims; + output_dims.n = batch_size; + output_dims.h = output_shape.Dims(1); + output_dims.w = output_shape.Dims(2); + output_dims.c = output_depth; + + cmsis_nn_transpose_conv_params conv_params; + conv_params.stride.w = params->stride_width; + conv_params.stride.h = params->stride_height; + + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + + cmsis_nn_dims input_dims; + input_dims.n = batch_size; + input_dims.h = input_shape.Dims(1); + input_dims.w = input_shape.Dims(2); + input_dims.c = input_depth; + + cmsis_nn_dims filter_dims; + filter_dims.n = output_depth; + filter_dims.h = filter_shape.Dims(1); + filter_dims.w = filter_shape.Dims(2); + filter_dims.c = input_depth; + + const size_t buf_size = arm_transpose_conv_s8_get_buffer_size( + &conv_params, &input_dims, &filter_dims, &output_dims); + TFLITE_DCHECK(context->RequestScratchBufferInArena( + context, buf_size, &(data->scratch_buffer_index)) == + kTfLiteOk); + + // Quantized 8-bit kernels use a second scratch buffer for reversing the + // filter for certain configurations. + const size_t reverse_buf_size = + arm_transpose_conv_s8_get_reverse_conv_buffer_size( + &conv_params, &input_dims, &filter_dims); + TFLITE_DCHECK(context->RequestScratchBufferInArena( + context, reverse_buf_size, + &(data->scratch_buffer_output_index)) == kTfLiteOk); + } + + // Quantized 16x8 kernels use an int64 scratch buffer. + if (input->type == kTfLiteInt16) { + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + TFLITE_DCHECK(context->RequestScratchBufferInArena( + context, + GetTensorShape(output).FlatSize() * sizeof(std::int64_t), + &(data->scratch_buffer_index)) == kTfLiteOk); + } + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->zero_point); + + TF_LITE_ENSURE(context, + affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, + filter_width, filter_height, + input->type, data)); + + // Offsets (zero points) + data->params.input_offset = -input->params.zero_point; + data->params.weights_offset = -filter->params.zero_point; + data->params.output_offset = output->params.zero_point; + + // Stride + data->params.stride_width = params->stride_width; + data->params.stride_height = params->stride_height; + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams& params, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_transpose_conv_params conv_params; + conv_params.dilation.h = 1; + conv_params.dilation.w = 1; + + // Initialize cmsis_nn convolution parameters + conv_params.input_offset = data.params.input_offset; + conv_params.output_offset = data.params.output_offset; + conv_params.stride.h = params.stride_height; + conv_params.stride.w = params.stride_width; + conv_params.padding.h = data.params.padding_values.height; + conv_params.padding.w = data.params.padding_values.width; + conv_params.padding_offsets.h = data.params.padding_values.height_offset; + conv_params.padding_offsets.w = data.params.padding_values.width_offset; + conv_params.activation.min = data.params.quantized_activation_min; + conv_params.activation.max = data.params.quantized_activation_max; + + // Initialize cmsis_nn per channel quantization parameters + cmsis_nn_per_channel_quant_params quant_params; + quant_params.multiplier = + const_cast(data.per_channel_output_multiplier); + quant_params.shift = const_cast(data.per_channel_output_shift); + + RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); + RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); + + // Consistency check. + TFLITE_DCHECK_LE(conv_params.activation.min, conv_params.activation.max); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + if (tflite::micro::GetOptionalTensorData(bias)) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + cmsis_nn_dims input_dims; + input_dims.n = batch_size; + input_dims.h = input_shape.Dims(1); + input_dims.w = input_shape.Dims(2); + input_dims.c = input_depth; + + cmsis_nn_dims filter_dims; + filter_dims.n = output_depth; + filter_dims.h = filter_shape.Dims(1); + filter_dims.w = filter_shape.Dims(2); + filter_dims.c = input_depth; + + cmsis_nn_dims bias_dims; + bias_dims.n = 1; + bias_dims.h = 1; + bias_dims.w = 1; + bias_dims.c = output_depth; + + cmsis_nn_dims output_dims; + output_dims.n = batch_size; + output_dims.h = output_shape.Dims(1); + output_dims.w = output_shape.Dims(2); + output_dims.c = output_depth; + + cmsis_nn_context ctx; + ctx.size = 0; // Note: ctx.size is currently not used in cmsis_nn. + ctx.buf = context->GetScratchBuffer(context, data.scratch_buffer_index); + + cmsis_nn_context scratch_output_ctx; + scratch_output_ctx.size = + 0; // Note: ctx.size is currently not used in cmsis_nn. + scratch_output_ctx.buf = + context->GetScratchBuffer(context, data.scratch_buffer_output_index); + + TFLITE_DCHECK_EQ( + arm_transpose_conv_wrapper_s8( + &ctx, &scratch_output_ctx, &conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFilterTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 4) + ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { + ConvParams op_params = data.params; + CalculateActivationRange(params.activation, + &op_params.float_activation_min, + &op_params.float_activation_max); + + reference_ops::TransposeConv( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr); + break; + } + case kTfLiteInt8: { + return EvalQuantizedPerChannel(context, node, params, data, input, filter, + bias, output); + break; + } + case kTfLiteInt16: { + std::int64_t* scratch_buffer = static_cast( + context->GetScratchBuffer(context, data.scratch_buffer_index)); + // TODO(b/192090531): Remove this once all 8x16 transpose conv models use + // 64-bit biases. + if (bias != nullptr && bias->type == kTfLiteInt16) { + std::int64_t* bias_converted_buffer = + static_cast(context->GetScratchBuffer( + context, data.bias_converted_buffer_index)); + for (int i = 0; i < tflite::micro::GetTensorShape(bias).FlatSize(); + i++) { + bias_converted_buffer[i] = bias->data.i16[i]; + } + reference_integer_ops::TransposeConv( + data.params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), bias_converted_buffer, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); + } else { + reference_integer_ops::TransposeConv( + data.params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); + } + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFilterTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 4) + ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const auto& params = + *(reinterpret_cast(node->builtin_data)); + + return EvalQuantizedPerChannel(context, node, params, data, input, filter, + bias, output); +} + +} // namespace + +TFLMRegistration Register_TRANSPOSE_CONV() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +TFLMRegistration Register_TRANSPOSE_CONV_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt8); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/cmsis_nn/unidirectional_sequence_lstm.cpp b/src/tensorflow/lite/micro/kernels/cmsis_nn/unidirectional_sequence_lstm.cpp new file mode 100644 index 0000000..c193a88 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cmsis_nn/unidirectional_sequence_lstm.cpp @@ -0,0 +1,546 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Integer version of unidirectional sequence LSTM. Only the standard LSTM +// (defined in the keras LSTM layer, e.g., no peephole etc.) is supported here. +// Currently used by the 8 bits activation case only, except for fallbacks. + +#include +#include + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/fully_connected.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/lstm_eval.h" +#include "tensorflow/lite/micro/kernels/lstm_shared.h" +#include "tensorflow/lite/micro/kernels/micro_tensor_utils.h" +namespace tflite { + +namespace { + +struct OpData { + OpDataLSTM params_ref; // Used for fallback implementation + cmsis_nn_lstm_params params_cmsis_nn; // Used for CMSIS-NN implementation +}; + +LSTMBuffers CMSIS_NN_CreateLSTMBuffers(TfLiteContext* context, + const int* buffer_indices) { + LSTMBuffers buffers; + buffers.buffer0 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[0])); + buffers.buffer1 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[1])); + buffers.buffer2 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[2])); + + return buffers; +} + +void CMSIS_NN_VectorSum(int32_t* kernel_sum, const int32_t size1, + const int32_t size2, const int8_t* weights, + const int32_t offset, const int32_t* biases) { + arm_vector_sum_s8(kernel_sum, size1, size2, weights, offset, 0, biases); +} + +void CMSIS_NN_VectorSum(int64_t* kernel_sum, const int32_t size1, + const int32_t size2, const int8_t* weights, + const int32_t offset, const int64_t* biases) { + arm_vector_sum_s8_s64(kernel_sum, size1, size2, weights, offset, biases); +} + +template +TfLiteStatus CMSIS_NN_PortOpData(TfLiteContext* context, OpDataLSTM* params_ref, + const LSTMKernelContents& kernel_content, + cmsis_nn_lstm_params* params_cmsis_nn) { + // Unwrap pointers + const BiasType* input_gate_bias = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor(tflite::kLstmInputGateBiasTensor)); + const BiasType* forget_gate_bias = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor(tflite::kLstmForgetGateBiasTensor)); + const BiasType* cell_gate_bias = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor(tflite::kLstmCellGateBiasTensor)); + const BiasType* output_gate_bias = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor(tflite::kLstmOutputGateBiasTensor)); + + const int8_t* input_to_input_weights = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor( + tflite::kLstmInputToInputWeightsTensor)); + const int8_t* input_to_forget_weights = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor( + tflite::kLstmInputToForgetWeightsTensor)); + const int8_t* input_to_cell_weights = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor( + tflite::kLstmInputToCellWeightsTensor)); + const int8_t* input_to_output_weights = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor( + tflite::kLstmInputToOutputWeightsTensor)); + + const int8_t* recurrent_to_input_weights = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToInputWeightsTensor)); + const int8_t* recurrent_to_forget_weights = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToForgetWeightsTensor)); + const int8_t* recurrent_to_cell_weights = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToCellWeightsTensor)); + const int8_t* recurrent_to_output_weights = + tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToOutputWeightsTensor)); + + int32_t size_data = params_ref->size_info.input_dimension; + int32_t size_hidden = params_ref->size_info.state_dimension; + + BiasType* input_data_kernel_sum{ + static_cast(context->AllocatePersistentBuffer( + context, size_hidden * sizeof(BiasType)))}; + BiasType* forget_data_kernel_sum{ + static_cast(context->AllocatePersistentBuffer( + context, size_hidden * sizeof(BiasType)))}; + BiasType* cell_data_kernel_sum{ + static_cast(context->AllocatePersistentBuffer( + context, size_hidden * sizeof(BiasType)))}; + BiasType* output_data_kernel_sum{ + static_cast(context->AllocatePersistentBuffer( + context, size_hidden * sizeof(BiasType)))}; + + BiasType* input_hidden_kernel_sum{ + static_cast(context->AllocatePersistentBuffer( + context, size_hidden * sizeof(BiasType)))}; + BiasType* forget_hidden_kernel_sum{ + static_cast(context->AllocatePersistentBuffer( + context, size_hidden * sizeof(BiasType)))}; + BiasType* cell_hidden_kernel_sum = { + static_cast(context->AllocatePersistentBuffer( + context, size_hidden * sizeof(BiasType)))}; + BiasType* output_hidden_kernel_sum = { + static_cast(context->AllocatePersistentBuffer( + context, size_hidden * sizeof(BiasType)))}; + + // Compute effective biases + CMSIS_NN_VectorSum( + input_data_kernel_sum, size_data, size_hidden, input_to_input_weights, + params_ref->input_gate_parameters.input_fc_params.input_offset, + input_gate_bias); + + CMSIS_NN_VectorSum( + forget_data_kernel_sum, size_data, size_hidden, input_to_forget_weights, + params_ref->forget_gate_parameters.input_fc_params.input_offset, + forget_gate_bias); + + CMSIS_NN_VectorSum( + cell_data_kernel_sum, size_data, size_hidden, input_to_cell_weights, + params_ref->cell_gate_parameters.input_fc_params.input_offset, + cell_gate_bias); + + CMSIS_NN_VectorSum( + output_data_kernel_sum, size_data, size_hidden, input_to_output_weights, + params_ref->output_gate_parameters.input_fc_params.input_offset, + output_gate_bias); + + CMSIS_NN_VectorSum( + input_hidden_kernel_sum, size_hidden, size_hidden, + recurrent_to_input_weights, + -params_ref->inter_gate_parameters.output_mul_params.output_offset, + nullptr); + + CMSIS_NN_VectorSum( + forget_hidden_kernel_sum, size_hidden, size_hidden, + recurrent_to_forget_weights, + -params_ref->inter_gate_parameters.output_mul_params.output_offset, + nullptr); + + CMSIS_NN_VectorSum( + cell_hidden_kernel_sum, size_hidden, size_hidden, + recurrent_to_cell_weights, + -params_ref->inter_gate_parameters.output_mul_params.output_offset, + nullptr); + + CMSIS_NN_VectorSum( + output_hidden_kernel_sum, size_hidden, size_hidden, + recurrent_to_output_weights, + -params_ref->inter_gate_parameters.output_mul_params.output_offset, + nullptr); + + // Create input gate parameters + cmsis_nn_lstm_gate gate_input{ + params_ref->input_gate_parameters.input_fc_params.output_multiplier, + params_ref->input_gate_parameters.input_fc_params.output_shift, + input_to_input_weights, + input_data_kernel_sum, + params_ref->input_gate_parameters.recurrent_fc_params.output_multiplier, + params_ref->input_gate_parameters.recurrent_fc_params.output_shift, + recurrent_to_input_weights, + input_hidden_kernel_sum, + input_gate_bias, + ARM_SIGMOID}; + + // Create forget gate parameters + cmsis_nn_lstm_gate gate_forget{ + params_ref->forget_gate_parameters.input_fc_params.output_multiplier, + params_ref->forget_gate_parameters.input_fc_params.output_shift, + input_to_forget_weights, + forget_data_kernel_sum, + params_ref->forget_gate_parameters.recurrent_fc_params.output_multiplier, + params_ref->forget_gate_parameters.recurrent_fc_params.output_shift, + recurrent_to_forget_weights, + forget_hidden_kernel_sum, + forget_gate_bias, + ARM_SIGMOID}; + + auto cell_gate_nonlinear_type = + (params_ref->cell_gate_nonlinear_type == kTfLiteActTanh) ? ARM_TANH + : ARM_SIGMOID; + // Create cell gate parameters + cmsis_nn_lstm_gate gate_cell{ + params_ref->cell_gate_parameters.input_fc_params.output_multiplier, + params_ref->cell_gate_parameters.input_fc_params.output_shift, + input_to_cell_weights, + cell_data_kernel_sum, + params_ref->cell_gate_parameters.recurrent_fc_params.output_multiplier, + params_ref->cell_gate_parameters.recurrent_fc_params.output_shift, + recurrent_to_cell_weights, + cell_hidden_kernel_sum, + cell_gate_bias, + cell_gate_nonlinear_type}; + + // Create output gate parameters + cmsis_nn_lstm_gate gate_output{ + params_ref->output_gate_parameters.input_fc_params.output_multiplier, + params_ref->output_gate_parameters.input_fc_params.output_shift, + input_to_output_weights, + output_data_kernel_sum, + params_ref->output_gate_parameters.recurrent_fc_params.output_multiplier, + params_ref->output_gate_parameters.recurrent_fc_params.output_shift, + recurrent_to_output_weights, + output_hidden_kernel_sum, + output_gate_bias, + ARM_SIGMOID}; + + // Create the complete lstm data struct + *params_cmsis_nn = { + params_ref->size_info.time_major, + params_ref->size_info.batch_size, + params_ref->size_info.time_steps, + params_ref->size_info.input_dimension, + params_ref->size_info.state_dimension, + params_ref->forget_gate_parameters.input_fc_params.input_offset, + params_ref->inter_gate_parameters.forget_cell_mul_params + .output_multiplier, + params_ref->inter_gate_parameters.forget_cell_mul_params.output_shift, + params_ref->inter_gate_parameters.input_mul_params.output_multiplier, + params_ref->inter_gate_parameters.input_mul_params.output_shift, + params_ref->cell_state_info.quantized_cell_clip, + params_ref->cell_state_info.cell_state_scale_power, + params_ref->inter_gate_parameters.output_mul_params.output_multiplier, + params_ref->inter_gate_parameters.output_mul_params.output_shift, + params_ref->inter_gate_parameters.output_mul_params.output_offset, + gate_forget, + gate_input, + gate_cell, + gate_output}; + + return kTfLiteOk; +} + +TfLiteStatus CMSIS_NN_EvalInteger8x8_16Lstm( + const OpData& op_data, const LSTMKernelContents& kernel_content, + const LSTMBuffers& buffers) { + TFLITE_DCHECK( + kernel_content.GetInternalTensor(tflite::kLstmInputTensor)->dims->size >= + 2 && + kernel_content.GetInternalTensor(tflite::kLstmInputTensor)->dims->size <= + 3); + + const int8_t* input = tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor(tflite::kLstmInputTensor)); + int8_t* output = + tflite::micro::GetTensorData(kernel_content.output_tensor); + + // Create lstm buffer struct + cmsis_nn_lstm_context cmsis_buffers; + cmsis_buffers.temp1 = reinterpret_cast(buffers.buffer0); + cmsis_buffers.temp2 = reinterpret_cast(buffers.buffer1); + cmsis_buffers.cell_state = reinterpret_cast(buffers.buffer2); + + arm_lstm_unidirectional_s8(input, output, &op_data.params_cmsis_nn, + &cmsis_buffers); + + return kTfLiteOk; +} + +TfLiteStatus CMSIS_NN_EvalInteger16x8_16Lstm( + const OpData& op_data, const LSTMKernelContents& kernel_content, + const LSTMBuffers& buffers) { + TFLITE_DCHECK( + kernel_content.GetInternalTensor(tflite::kLstmInputTensor)->dims->size >= + 2 && + kernel_content.GetInternalTensor(tflite::kLstmInputTensor)->dims->size <= + 3); + + const int16_t* input = tflite::micro::GetOptionalTensorData( + kernel_content.GetInternalTensor(tflite::kLstmInputTensor)); + int16_t* output = + tflite::micro::GetTensorData(kernel_content.output_tensor); + + // Create lstm buffer struct + cmsis_nn_lstm_context cmsis_buffers; + cmsis_buffers.temp1 = reinterpret_cast(buffers.buffer0); + cmsis_buffers.temp2 = reinterpret_cast(buffers.buffer1); + cmsis_buffers.cell_state = reinterpret_cast(buffers.buffer2); + + arm_lstm_unidirectional_s16(input, output, &op_data.params_cmsis_nn, + &cmsis_buffers); + + return kTfLiteOk; +} + +/*Kernel functions*/ +void* UnidirectionalSequenceLstmInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus UnidirectionalSequenceLstmPrepare(TfLiteContext* context, + TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + TF_LITE_ENSURE_EQ(context, node->inputs->size, 24); + + TFLITE_DCHECK(node->builtin_data != nullptr); + TFLITE_DCHECK(node->user_data != nullptr); + + OpData* op_data = reinterpret_cast(node->user_data); + OpDataLSTM* op_data_lstm = &op_data->params_ref; + + const auto* builtin_data = + static_cast(node->builtin_data); + // All TempTfLiteTensors will be deallocated through the destructor. + LstmTensors lstm_tensors(context, node); + TF_LITE_ENSURE_OK(context, lstm_tensors.ValidateTensorStatus(context)); + + op_data_lstm->cell_gate_nonlinear_type = builtin_data->activation; + op_data_lstm->size_info = + CreateLstmSizeInfo(builtin_data->time_major, + lstm_tensors.GetInternalTensor(kLstmInputTensor)->dims, + lstm_tensors.HiddenStateTensor()->dims); + + const TfLiteTensor* input = lstm_tensors.GetInternalTensor(kLstmInputTensor); + const auto activation_type = input->type; + + TF_LITE_ENSURE_OK(context, ValidateTensorSize(context, lstm_tensors, + op_data_lstm->size_info)); + + auto cell_state_type = + lstm_tensors.GetInternalTensor(kLstmCellStateTensor)->type; + if (cell_state_type == kTfLiteFloat32) { + op_data_lstm->cell_state_info = + CreateLstmCellStateInfoFloat(builtin_data->cell_clip); + TF_LITE_ENSURE_OK(context, PrepareGateParametersFloat(context, lstm_tensors, + op_data_lstm)); + } else if (cell_state_type == kTfLiteInt16) { + op_data_lstm->cell_state_info = CreateLstmCellStateInfo( + lstm_tensors.CellStateTensor()->params.scale, builtin_data->cell_clip); + TF_LITE_ENSURE_OK(context, PrepareGateParametersInteger( + context, lstm_tensors, op_data_lstm)); + } else { + MicroPrintf( + "Cell state type %s (%d) not supported. The quantized Unidirectional " + "Sequence LSTM Op only support int16 cell state", + TfLiteTypeGetName(cell_state_type), cell_state_type); + return kTfLiteError; + } + + size_t number_of_buffers; + if (activation_type == kTfLiteInt8 && cell_state_type == kTfLiteInt16) { + auto kernel_content = CreateLSTMKernelContent(context, node); + number_of_buffers = 3; + CMSIS_NN_PortOpData(context, op_data_lstm, kernel_content, + &op_data->params_cmsis_nn); + } else if (activation_type == kTfLiteInt16 && + cell_state_type == kTfLiteInt16) { + auto kernel_content = CreateLSTMKernelContent(context, node); + number_of_buffers = 3; + CMSIS_NN_PortOpData(context, op_data_lstm, kernel_content, + &op_data->params_cmsis_nn); + } else { + number_of_buffers = 4; + } + + for (size_t i = 0; i < number_of_buffers; i++) { + TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( + context, + op_data_lstm->size_info.batch_size * + op_data_lstm->size_info.state_dimension * + TfLiteTypeGetSize(cell_state_type), + &(op_data_lstm->buffer_indices[i]))); + } + + return kTfLiteOk; +} + +TfLiteStatus UnidirectionalSequenceLstmEval(TfLiteContext* context, + TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& op_data = *reinterpret_cast(node->user_data); + const OpDataLSTM& op_data_lstm = op_data.params_ref; + + auto kernel_content = CreateLSTMKernelContent(context, node); + + const auto activation_type = + kernel_content.internal_tensors[kLstmInputTensor]->type; + const auto weight_type = + kernel_content.internal_tensors[kLstmInputToInputWeightsTensor]->type; + + switch (activation_type) { + case kTfLiteFloat32: { + LSTMBuffers buffers = + CreateLSTMBuffers(context, op_data_lstm.buffer_indices); + EvalLstm(op_data_lstm, kernel_content, + buffers); + break; + } + case kTfLiteInt8: { + switch (weight_type) { + case kTfLiteInt8: { + // 8(activation)x8(weight)->16(cell) LSTM with 32 bits bias + LSTMBuffers buffers = + CMSIS_NN_CreateLSTMBuffers(context, op_data_lstm.buffer_indices); + CMSIS_NN_EvalInteger8x8_16Lstm(op_data, kernel_content, buffers); + break; + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(weight_type), activation_type); + return kTfLiteError; + } + } + break; + } + case kTfLiteInt16: { + switch (weight_type) { + case kTfLiteInt8: { + // 16(activation)x8(weight)->16(cell) LSTM with 64 bits bias + LSTMBuffers buffers = + CMSIS_NN_CreateLSTMBuffers(context, op_data_lstm.buffer_indices); + CMSIS_NN_EvalInteger16x8_16Lstm(op_data, kernel_content, buffers); + break; + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(weight_type), weight_type); + return kTfLiteError; + } + } + break; + } + default: { + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(activation_type), activation_type); + return kTfLiteError; + } + } + return kTfLiteOk; +} + +TfLiteStatus UnidirectionalSequenceLstmEvalInt8(TfLiteContext* context, + TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& op_data = *reinterpret_cast(node->user_data); + const OpDataLSTM& op_data_lstm = op_data.params_ref; + auto kernel_content = CreateLSTMKernelContent(context, node); + const auto activation_type = + kernel_content.internal_tensors[kLstmInputTensor]->type; + const auto weight_type = + kernel_content.internal_tensors[kLstmInputToInputWeightsTensor]->type; + + TFLITE_DCHECK(weight_type == kTfLiteInt16 && + "Only int16 filter type supported."); + + if (activation_type == kTfLiteInt8) { + LSTMBuffers buffers = + CMSIS_NN_CreateLSTMBuffers(context, op_data_lstm.buffer_indices); + + return CMSIS_NN_EvalInteger8x8_16Lstm(op_data, kernel_content, buffers); + } else { + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(activation_type), activation_type); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus UnidirectionalSequenceLstmEvalInt16(TfLiteContext* context, + TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& op_data = *reinterpret_cast(node->user_data); + const OpDataLSTM& op_data_lstm = op_data.params_ref; + auto kernel_content = CreateLSTMKernelContent(context, node); + const auto activation_type = + kernel_content.internal_tensors[kLstmInputTensor]->type; + const auto weight_type = + kernel_content.internal_tensors[kLstmInputToInputWeightsTensor]->type; + + TFLITE_DCHECK(weight_type == kTfLiteInt16 && + "Only int16 filter type supported."); + + if (activation_type == kTfLiteInt16) { + LSTMBuffers buffers = + CMSIS_NN_CreateLSTMBuffers(context, op_data_lstm.buffer_indices); + + return CMSIS_NN_EvalInteger16x8_16Lstm(op_data, kernel_content, buffers); + } else { + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(activation_type), activation_type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM() { + return tflite::micro::RegisterOp(UnidirectionalSequenceLstmInit, + UnidirectionalSequenceLstmPrepare, + UnidirectionalSequenceLstmEval); +} + +TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT8() { + return tflite::micro::RegisterOp(UnidirectionalSequenceLstmInit, + UnidirectionalSequenceLstmPrepare, + UnidirectionalSequenceLstmEvalInt8); +} + +TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT16() { + return tflite::micro::RegisterOp(UnidirectionalSequenceLstmInit, + UnidirectionalSequenceLstmPrepare, + UnidirectionalSequenceLstmEvalInt16); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/comparisons.cpp b/src/tensorflow/lite/micro/kernels/comparisons.cpp index 3500764..69b3c61 100644 --- a/src/tensorflow/lite/micro/kernels/comparisons.cpp +++ b/src/tensorflow/lite/micro/kernels/comparisons.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,11 +19,10 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace comparisons { + namespace { struct OpData { @@ -104,19 +103,6 @@ TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowEqualWithScaling( @@ -131,8 +117,8 @@ TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -209,19 +195,6 @@ TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowNotEqualWithScaling( @@ -236,8 +209,8 @@ TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -300,19 +273,6 @@ TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowGreaterWithScaling( @@ -327,8 +287,8 @@ TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -391,19 +351,6 @@ TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowGreaterEqualWithScaling( @@ -418,8 +365,8 @@ TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -482,19 +429,6 @@ TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowLessWithScaling( @@ -509,8 +443,8 @@ TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -573,19 +507,6 @@ TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowLessEqualWithScaling( @@ -600,30 +521,32 @@ TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; } -} // namespace - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(OpData)); } -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus ComparisonsPrepare(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); OpData* data = static_cast(node->user_data); - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); TF_LITE_ENSURE(context, input2 != nullptr); - if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) { + if (input1->type == kTfLiteInt8) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; const int kLeftShift = 8; @@ -648,77 +571,36 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { data->params.input2_shift = input2_shift; } + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + return kTfLiteOk; } -} // namespace comparisons - -TfLiteRegistration Register_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::EqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +} // namespace + +TFLMRegistration Register_EQUAL() { + return tflite::micro::RegisterOp(Init, ComparisonsPrepare, EqualEval); } -TfLiteRegistration Register_NOT_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::NotEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_NOT_EQUAL() { + return tflite::micro::RegisterOp(Init, ComparisonsPrepare, NotEqualEval); } -TfLiteRegistration Register_GREATER() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::GreaterEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_GREATER() { + return tflite::micro::RegisterOp(Init, ComparisonsPrepare, GreaterEval); } -TfLiteRegistration Register_GREATER_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::GreaterEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_GREATER_EQUAL() { + return tflite::micro::RegisterOp(Init, ComparisonsPrepare, GreaterEqualEval); } -TfLiteRegistration Register_LESS() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::LessEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_LESS() { + return tflite::micro::RegisterOp(Init, ComparisonsPrepare, LessEval); } -TfLiteRegistration Register_LESS_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::LessEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_LESS_EQUAL() { + return tflite::micro::RegisterOp(Init, ComparisonsPrepare, LessEqualEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/concatenation.cpp b/src/tensorflow/lite/micro/kernels/concatenation.cpp index 8127cc3..151d3b4 100644 --- a/src/tensorflow/lite/micro/kernels/concatenation.cpp +++ b/src/tensorflow/lite/micro/kernels/concatenation.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,22 +18,28 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace concatenation { + +namespace { constexpr int kMaxInputNum = 10; // Maximum number of input tensors constexpr int kOutputTensor = 0; struct OpData { ConcatenationParams params; + +#ifdef USE_TFLM_COMPRESSION + + // scratch buffers for compressed tensors + int scratch_indices[kMaxInputNum]; + +#endif // USE_TFLM_COMPRESSION }; // Handles negative axis index, coerces to positive index value. @@ -53,8 +59,6 @@ inline int CalculatePositiveAxis(int axis, const TfLiteTensor* output_tensor) { inline void GetAllInputTensorShapes(const TfLiteContext* context, const TfLiteNode* node, RuntimeShape all_shapes[kMaxInputNum]) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); for (int i = 0; i < node->inputs->size; ++i) { const TfLiteEvalTensor* t = tflite::micro::GetEvalInput(context, node, i); RuntimeShape shape = tflite::micro::GetTensorShape(t); @@ -74,12 +78,22 @@ inline void GetShapesPointers(const RuntimeShape* shapes, size_t num, template inline void GetAllInputTensorData(const TfLiteContext* context, const TfLiteNode* node, - T* all_data[kMaxInputNum]) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); + const T* all_data[kMaxInputNum]) { +#ifdef USE_TFLM_COMPRESSION + const OpData* data = static_cast(node->user_data); + MicroContext* micro_context = GetMicroContext(context); +#endif // USE_TFLM_COMPRESSION + for (int i = 0; i < node->inputs->size; ++i) { const TfLiteEvalTensor* t = tflite::micro::GetEvalInput(context, node, i); +#ifdef USE_TFLM_COMPRESSION + const CompressionTensorData* comp_td = + micro_context->GetTensorCompressionData(node, i); + all_data[i] = tflite::micro::GetTensorData(micro_context, t, comp_td, + data->scratch_indices[i]); +#else // USE_TFLM_COMPRESSION all_data[i] = tflite::micro::GetTensorData(t); +#endif // USE_TFLM_COMPRESSION } } @@ -89,6 +103,10 @@ void EvalUnquantized(TfLiteContext* context, TfLiteNode* node) { RuntimeShape inputs_shape[kMaxInputNum]; const RuntimeShape* inputs_shape_ptr[kMaxInputNum]; const data_type* inputs_data[kMaxInputNum]; + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(node != nullptr); + TFLITE_DCHECK(node->user_data != nullptr); + const OpData* data = static_cast(node->user_data); GetAllInputTensorShapes(context, node, inputs_shape); GetShapesPointers(inputs_shape, node->inputs->size, inputs_shape_ptr); GetAllInputTensorData(context, node, inputs_data); @@ -96,138 +114,122 @@ void EvalUnquantized(TfLiteContext* context, TfLiteNode* node) { TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - reference_ops::Concatenation(data->params, inputs_shape_ptr, inputs_data, tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); } -void EvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node) { - // Collect the shapes and data pointer of input tensors - RuntimeShape inputs_shape[kMaxInputNum]; - const RuntimeShape* inputs_shape_ptr[kMaxInputNum]; - const uint8_t* inputs_data[kMaxInputNum]; - GetAllInputTensorShapes(context, node, inputs_shape); - GetShapesPointers(inputs_shape, node->inputs->size, inputs_shape_ptr); - GetAllInputTensorData(context, node, inputs_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - reference_ops::ConcatenationWithScaling( - data->params, inputs_shape_ptr, inputs_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* ConcatenationInit(TfLiteContext* context, const char* buffer, + size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(OpData)); } -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus ConcatenationPrepare(TfLiteContext* context, TfLiteNode* node) { // This function only checks the types. Additional shape validations are // performed in the reference implementation called during Eval(). const TfLiteConcatenationParams* params = reinterpret_cast(node->builtin_data); - const TfLiteTensor* input_tensor = GetInput(context, node, 0); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input_tensor = micro_context->AllocateTempInputTensor(node, 0); TF_LITE_ENSURE(context, input_tensor != nullptr); TfLiteType input_type = input_tensor->type; - const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output_tensor = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output_tensor != nullptr); TfLiteType output_type = output_tensor->type; + micro_context->DeallocateTempTfLiteTensor(input_tensor); + // Check activation and input type TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); TF_LITE_ENSURE(context, - input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 || - input_type == kTfLiteInt8 || input_type == kTfLiteInt32 || - input_type == kTfLiteInt64); + input_type == kTfLiteFloat32 || input_type == kTfLiteInt8 || + input_type == kTfLiteInt16 || input_type == kTfLiteInt32 || + input_type == kTfLiteInt64 || input_type == kTfLiteBool); // Output type must match input type - TF_LITE_ENSURE_EQ(context, output_type, input_type); + TF_LITE_ENSURE_TYPES_EQ(context, output_type, input_type); // This implementation does not support large number of input tensors const int num_inputs = NumInputs(node); TF_LITE_ENSURE(context, num_inputs <= kMaxInputNum); - // Shapes with dimensions >4 are not yet supported with static allocation. + // Calculate OpData. + TFLITE_DCHECK(node->user_data != nullptr); + OpData* data = static_cast(node->user_data); + + // Shapes with dimensions > kMaxSmallSize are not yet supported with static + // allocation. for (int i = 0; i < num_inputs; ++i) { - const TfLiteTensor* input = GetInput(context, node, i); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, i); TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, input_type); int num_dimensions = NumDimensions(input); - if (num_dimensions > 4) { - TF_LITE_KERNEL_LOG( - context, - "Op Concatenation does not currently support num dimensions >4 " + if (num_dimensions > RuntimeShape::kMaxSmallSize) { + MicroPrintf( + "Op Concatenation does not currently support num dimensions > %d " "Tensor has %d dimensions.", - num_dimensions); + RuntimeShape::kMaxSmallSize, num_dimensions); return kTfLiteError; } - } - // Calculate OpData. - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); + if (input_type == kTfLiteInt8) { + // Make sure there is no re-scaling needed for Int8 quantized kernel. This + // is a restriction we introduced to Int8 kernels. + TF_LITE_ENSURE_EQ(context, static_cast(input->params.scale), + static_cast(output_tensor->params.scale)); + TF_LITE_ENSURE_EQ(context, input->params.zero_point, + output_tensor->params.zero_point); + } else if (input_type == kTfLiteInt16) { + // Make sure that all Int16 inputs have a null zero-point. + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + } + +#ifdef USE_TFLM_COMPRESSION + + // Compression scratch buffers. + // These will only be allocated if the tensor is compressed. + data->scratch_indices[i] = + micro_context->AllocateDecompressionScratchBuffer(node, i); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); +#endif // USE_TFLM_COMPRESSION + + micro_context->DeallocateTempTfLiteTensor(input); + } + + if (input_type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, output_tensor->params.zero_point, 0); + } switch (output_type) { // Already know in/outtypes are same. + case kTfLiteBool: case kTfLiteFloat32: + case kTfLiteInt8: + case kTfLiteInt16: case kTfLiteInt32: case kTfLiteInt64: { - data->params.axis = CalculatePositiveAxis(params->axis, output); + data->params.axis = CalculatePositiveAxis(params->axis, output_tensor); data->params.inputs_count = node->inputs->size; break; } - case kTfLiteUInt8: - case kTfLiteInt8: { - data->params.axis = CalculatePositiveAxis(params->axis, output); - data->params.inputs_count = node->inputs->size; - - float* input_scales = - reinterpret_cast(context->AllocatePersistentBuffer( - context, node->inputs->size * sizeof(float))); - - int32_t* input_zero_points = - reinterpret_cast(context->AllocatePersistentBuffer( - context, node->inputs->size * sizeof(int32_t))); - - // Allocate persistent scale and zeropoint buffers. - // Store input scale and zero point values in OpParams: - for (int i = 0; i < node->inputs->size; ++i) { - const TfLiteTensor* t = GetInput(context, node, i); - TF_LITE_ENSURE(context, t != nullptr); - input_scales[i] = t->params.scale; - input_zero_points[i] = t->params.zero_point; - } - - data->params.input_scale = input_scales; - data->params.input_zeropoint = input_zero_points; - data->params.output_zeropoint = output->params.zero_point; - data->params.output_scale = output->params.scale; - break; - } default: - TF_LITE_KERNEL_LOG( - context, "Op Concatenation does not currently support Type '%s'.", - TfLiteTypeGetName(output_type)); + MicroPrintf("Op Concatenation does not currently support type '%s'.", + TfLiteTypeGetName(output_type)); return kTfLiteError; } + micro_context->DeallocateTempTfLiteTensor(output_tensor); + return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); +TfLiteStatus ConcatenationEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* output_tensor = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, output_tensor != nullptr); TfLiteType output_type = output_tensor->type; @@ -238,39 +240,33 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteInt32: EvalUnquantized(context, node); break; - case kTfLiteUInt8: - EvalQuantizedUInt8(context, node); - break; case kTfLiteInt8: EvalUnquantized(context, node); break; case kTfLiteInt64: EvalUnquantized(context, node); break; + case kTfLiteInt16: + EvalUnquantized(context, node); + break; + case kTfLiteBool: + EvalUnquantized(context, node); + break; default: - TF_LITE_KERNEL_LOG( - context, "Op Concatenation does not currently support Type '%s'.", - TfLiteTypeGetName(output_type)); + MicroPrintf("Op Concatenation does not currently support Type '%s'.", + TfLiteTypeGetName(output_type)); return kTfLiteError; } return kTfLiteOk; } -} // namespace concatenation - -TfLiteRegistration Register_CONCATENATION() { - return {/*init=*/concatenation::Init, - /*free=*/nullptr, - /*prepare=*/concatenation::Prepare, - /*invoke=*/concatenation::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +} // namespace + +TFLMRegistration Register_CONCATENATION() { + return tflite::micro::RegisterOp(ConcatenationInit, ConcatenationPrepare, + ConcatenationEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/conv.h b/src/tensorflow/lite/micro/kernels/conv.h new file mode 100644 index 0000000..8e48ef5 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/conv.h @@ -0,0 +1,134 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +struct OpDataConv { + TfLitePaddingValues padding; + + // Cached tensor zero point values for quantized operations. + int32_t input_zero_point; + int32_t filter_zero_point; + int32_t output_zero_point; + + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + + // Per channel output multiplier and shift. + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; + + // The range of the fused activation layer. For example for kNone and + // uint8_t these would be 0 and 255. + int32_t output_activation_min; + int32_t output_activation_max; + + // A buffer used to store unpacked filter values. This is used if the source + // tensor is of n-bit precision that cannot be easily processed by kernels. + int filter_buffer_index; + +#ifdef USE_TFLM_COMPRESSION + + // scratch buffers for compressed tensors + int weights_scratch_index; + int bias_scratch_index; + +#endif // USE_TFLM_COMPRESSION +}; + +extern const int kConvInputTensor; +extern const int kConvWeightsTensor; +extern const int kConvBiasTensor; +extern const int kConvOutputTensor; +extern const int kConvQuantizedDimension; + +// Returns a ConvParams struct with all the parameters needed for a +// float computation. +ConvParams ConvParamsFloat(const TfLiteConvParams& params, + const OpDataConv& data); + +// Returns a ConvParams struct with all the parameters needed for a +// quantized computation. +ConvParams ConvParamsQuantized(const TfLiteConvParams& params, + const OpDataConv& data); + +TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams& params, int width, + int height, int filter_width, + int filter_height, int out_width, + int out_height, const TfLiteType data_type, + OpDataConv* data); + +void* ConvInit(TfLiteContext* context, const char* buffer, size_t length); + +TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node); + +// This is the most generic TFLMRegistration. The actual supported types +// may still be target dependent. The only requirement is that every +// implementation (reference or optimized) must define this function. +TFLMRegistration Register_CONV_2D(); + +#if defined(XTENSA) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 activations and int8 weights and always calls the reference +// implementation. +TFLMRegistration Register_CONV_2D_INT8REF(); + +#else +inline TFLMRegistration Register_CONV_2D_INT8REF() { + return Register_CONV_2D(); +} +#endif // defined(XTENSA) + +#if defined(ARDUINO) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 activations and int4 weights and uses the latency optimized +// implementations. +TFLMRegistration Register_CONV_2D_INT4(); +#else +inline TFLMRegistration Register_CONV_2D_INT4() { return Register_CONV_2D(); } +#endif // defined(ARDUINO) + +#if defined(ARDUINO) || defined(XTENSA) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 activations and int8 weights and uses the latency optimized +// implementations. +TFLMRegistration Register_CONV_2D_INT8(); + +// Returns a TFLMRegistration struct for kernel variant that only supports +// int16 activations and int8 weights and uses the latency optimized +// implementations. +TFLMRegistration Register_CONV_2D_INT16(); + +#else +inline TFLMRegistration Register_CONV_2D_INT8() { return Register_CONV_2D(); } + +inline TFLMRegistration Register_CONV_2D_INT16() { return Register_CONV_2D(); } +#endif // defined(ARDUINO) || defined(XTENSA) + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ diff --git a/src/tensorflow/lite/micro/kernels/conv_common.cpp b/src/tensorflow/lite/micro/kernels/conv_common.cpp new file mode 100644 index 0000000..41f8141 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/conv_common.cpp @@ -0,0 +1,235 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/c_api_types.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/conv.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +const int kConvInputTensor = 0; +const int kConvWeightsTensor = 1; +const int kConvBiasTensor = 2; +const int kConvOutputTensor = 0; + +// Conv is quantized along dimension 0: +// https://www.tensorflow.org/lite/performance/quantization_spec +const int kConvQuantizedDimension = 0; + +// Returns a ConvParams struct with all the parameters needed for a +// float computation. +ConvParams ConvParamsFloat(const TfLiteConvParams& params, + const OpDataConv& data) { + ConvParams op_params; + CalculateActivationRange(params.activation, &op_params.float_activation_min, + &op_params.float_activation_max); + op_params.padding_type = tflite::micro::RuntimePaddingType(params.padding); + op_params.padding_values.width = data.padding.width; + op_params.padding_values.height = data.padding.height; + op_params.stride_width = params.stride_width; + op_params.stride_height = params.stride_height; + op_params.dilation_width_factor = params.dilation_width_factor; + op_params.dilation_height_factor = params.dilation_height_factor; + return op_params; +} + +// Returns a ConvParams struct with all the parameters needed for a +// quantized computation. +ConvParams ConvParamsQuantized(const TfLiteConvParams& params, + const OpDataConv& data) { + ConvParams op_params; + op_params.input_offset = -data.input_zero_point; + op_params.weights_offset = -data.filter_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.output_multiplier = data.output_multiplier; + op_params.output_shift = -data.output_shift; + op_params.padding_type = tflite::micro::RuntimePaddingType(params.padding); + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.stride_height = params.stride_height; + op_params.stride_width = params.stride_width; + op_params.dilation_height_factor = params.dilation_height_factor; + op_params.dilation_width_factor = params.dilation_width_factor; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; + return op_params; +} + +void* ConvInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataConv)); +} + +TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams& params, int width, + int height, int filter_width, + int filter_height, int out_width, + int out_height, const TfLiteType data_type, + OpDataConv* data) { + bool has_bias = node->inputs->size == 3; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params.padding; + data->padding = ComputePaddingHeightWidth( + params.stride_height, params.stride_width, params.dilation_height_factor, + params.dilation_width_factor, height, width, filter_height, filter_width, + padding, &out_height, &out_width); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kConvBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. + if (data_type != kTfLiteFloat32) { + int output_channels = filter->dims->data[kConvQuantizedDimension]; + + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params.activation, + &data->output_multiplier, &data->output_shift, + &data->output_activation_min, &data->output_activation_max, + data->per_channel_output_multiplier, data->per_channel_output_shift, + output_channels)); + } + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + if (bias != nullptr) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + + return kTfLiteOk; +} + +TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpDataConv* data = static_cast(node->user_data); + const auto& params = + *(static_cast(node->builtin_data)); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG( + context, + (input->type == kTfLiteFloat32 && filter->type == kTfLiteFloat32) || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && + (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)), + "Hybrid models are not supported on TFLite Micro."); + + const int input_width = input->dims->data[2]; + const int input_height = input->dims->data[1]; + const int filter_width = filter->dims->data[2]; + const int filter_height = filter->dims->data[1]; + const int output_width = output->dims->data[2]; + const int output_height = output->dims->data[1]; + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TFLITE_DCHECK(affine_quantization != nullptr); + TFLITE_DCHECK(affine_quantization->scale != nullptr); + TFLITE_DCHECK(affine_quantization->zero_point != nullptr); + + TF_LITE_ENSURE(context, + affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kConvQuantizedDimension]); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataConv( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, input->type, data)); + + if (filter->type == kTfLiteInt4) { + int filter_size = + RuntimeShape(filter->dims->size, + reinterpret_cast(filter->dims->data)) + .FlatSize(); + context->RequestScratchBufferInArena(context, filter_size, + &data->filter_buffer_index); + } + +#ifdef USE_TFLM_COMPRESSION + + // Compression scratch buffers. + // These will only be allocated if the tensor is compressed. + if (micro_context->IsTensorCompressed(node, kConvWeightsTensor) && + filter->type == kTfLiteInt4) { + MicroPrintf("Compression not supported with INT4 tensors"); + return kTfLiteError; + } + data->weights_scratch_index = + micro_context->AllocateDecompressionScratchBuffer(node, + kConvWeightsTensor); + data->bias_scratch_index = + micro_context->AllocateDecompressionScratchBuffer(node, kConvBiasTensor); + +#endif // USE_TFLM_COMPRESSION + + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/conv_test.h b/src/tensorflow/lite/micro/kernels/conv_test.h new file mode 100644 index 0000000..642f4c7 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/conv_test.h @@ -0,0 +1,229 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/kernels/conv.h" +#include "tensorflow/lite/micro/kernels/kernel_runner.h" +#include "tensorflow/lite/micro/kernels/micro_ops.h" +#include "tensorflow/lite/micro/test_helpers.h" +#include "tensorflow/lite/micro/testing/micro_test.h" + +namespace tflite { +namespace testing { + +constexpr int kConvMaxTensors = 4; +constexpr int kConvMaxInputTensors = 3; + +template +TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, + int output_length, const TfLiteConvParams* conv_params, + TFLMRegistration registration, T* output_data +#ifdef USE_TFLM_COMPRESSION + , + const CompressedTensorList* comp_list_p = nullptr +#endif // USE_TFLM_COMPRESSION +) { + // TODO(b/358165875): support optional bias tensor + int inputs_array_data[] = {3, 0, 1, 2}; + TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data); + int outputs_array_data[] = {1, 3}; + TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); + + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, conv_params +#ifdef USE_TFLM_COMPRESSION + , + nullptr, comp_list_p +#endif // USE_TFLM_COMPRESSION + ); + + const char* init_data = reinterpret_cast(conv_params); + TfLiteStatus status = runner.InitAndPrepare(init_data); + if (status != kTfLiteOk) { + return status; + } + return runner.Invoke(); +} + +template +TfLiteStatus ValidateConvGoldens( + TfLiteTensor* tensors, int tensors_size, const T* expected_output_data, + int output_length, const TfLiteConvParams* conv_params, + TFLMRegistration registration, T* output_data, float tolerance = 1e-5 +#ifdef USE_TFLM_COMPRESSION + , + const TestCompressionInfo* filter_comp_info = nullptr, + const TestCompressionInfo* bias_comp_info = nullptr +#endif // USE_TFLM_COMPRESSION +) { +#ifdef USE_TFLM_COMPRESSION + + TestCompressedList tcl; + if (filter_comp_info != nullptr) { + TF_LITE_MICRO_EXPECT_EQ( + tcl.AddInput(*filter_comp_info, tensors[kConvWeightsTensor], + kConvWeightsTensor), + kTfLiteOk); + TF_LITE_MICRO_CHECK_FAIL(); + } + if (bias_comp_info) { + TF_LITE_MICRO_EXPECT_EQ( + tcl.AddInput(*bias_comp_info, tensors[kConvBiasTensor], + kConvBiasTensor), + kTfLiteOk); + TF_LITE_MICRO_CHECK_FAIL(); + } + const CompressedTensorList* comp_list_p = tcl.GetCompressedTensorList(); + +#endif // USE_TFLM_COMPRESSION + + TfLiteStatus status = InvokeConv(tensors, tensors_size, output_length, + conv_params, registration, output_data +#ifdef USE_TFLM_COMPRESSION + , + comp_list_p +#endif // USE_TFLM_COMPRESSION + ); + if (status != kTfLiteOk) { + return status; + } + for (int i = 0; i < output_length; ++i) { + TF_LITE_MICRO_EXPECT_NEAR(expected_output_data[i], output_data[i], + tolerance); + } + return kTfLiteOk; +} + +TfLiteStatus TestConvFloat( + int* input_dims_data, const float* input_data, int* filter_dims_data, + const float* filter_data, int* bias_dims_data, const float* bias_data, + int* output_dims_data, const float* expected_output_data, + TfLiteConvParams* conv_params, TFLMRegistration registration, + float* output_data +#ifdef USE_TFLM_COMPRESSION + , + const TestCompressionInfo* filter_comp_info = nullptr, + const TestCompressionInfo* bias_comp_info = nullptr +#endif // USE_TFLM_COMPRESSION +); + +TfLiteStatus TestConvQuantizedPerChannel( + int* input_dims_data, const float* input_data, int8_t* input_quantized, + float input_scale, int input_zero_point, int* filter_dims_data, + const float* filter_data, int8_t* filter_data_quantized, + int* bias_dims_data, const float* bias_data, int32_t* bias_data_quantized, + float* bias_scales, int* bias_zero_points, int* output_dims_data, + const float* expected_output_data, int8_t* expected_output_data_quantized, + float output_scale, int output_zero_point, TfLiteConvParams* conv_params, + TFLMRegistration registration, int8_t* output_data, + TfLiteType tensor_weight_type = kTfLiteNoType); + +TfLiteStatus TestConvQuantizedPerChannel( + int* input_dims_data, const float* input_data, int16_t* input_quantized, + float input_scale, int input_zero_point, int* filter_dims_data, + const float* filter_data, int8_t* filter_data_quantized, + int* bias_dims_data, const float* bias_data, + std::int64_t* bias_data_quantized, float* bias_scales, + int* bias_zero_points, int* output_dims_data, + const float* expected_output_data, int16_t* expected_output_data_quantized, + float output_scale, int output_zero_point, TfLiteConvParams* conv_params, + TFLMRegistration registration, int16_t* output_data); + +TfLiteStatus TestConvQuantizedPerChannel( + int* input_dims_data, const float* input_data, int16_t* input_quantized, + float input_scale, int input_zero_point, int* filter_dims_data, + const float* filter_data, int8_t* filter_data_quantized, + int* bias_dims_data, const float* bias_data, int32_t* bias_data_quantized, + float* bias_scales, int* bias_zero_points, int* output_dims_data, + const float* expected_output_data, int16_t* expected_output_data_quantized, + float output_scale, int output_zero_point, TfLiteConvParams* conv_params, + TFLMRegistration registration, int16_t* output_data); + +#ifdef USE_TFLM_COMPRESSION + +template +TfLiteStatus TestConvQuantizedPerChannelCompressed( + int* input_dims_data, const float* input_data, TIO* input_quantized, + float input_scale, int input_zero_point, int* output_dims_data, + const float* expected_output_data, TIO* expected_output_quantized, + TIO* output_quantized, float output_scale, int output_zero_point, + const TfLiteConvParams* conv_params, TFLMRegistration registration, + const TestCompressionQuantizedInfo* filter_comp_info, + const TestCompressionQuantizedInfo* bias_comp_info) { + // TODO(b/358165875): account for optional bias tensor + // bool null_bias = comp_info->bias_data == nullptr ? true : false; + + TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data); + TfLiteIntArray* filter_dims = IntArrayFromInts(filter_comp_info->dims_data); + TfLiteIntArray* bias_dims = IntArrayFromInts(bias_comp_info->dims_data); + TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); + + TfLiteFloatArray* filter_scales = + FloatArrayFromFloats(filter_comp_info->scales); + TfLiteIntArray* filter_zero_points = + IntArrayFromInts(filter_comp_info->zero_points); + TfLiteFloatArray* bias_scales = FloatArrayFromFloats(bias_comp_info->scales); + TfLiteIntArray* bias_zero_points = + IntArrayFromInts(bias_comp_info->zero_points); + + TfLiteAffineQuantization filter_quant = {}; + TfLiteTensor filter_tensor = CreatePerChannelQuantizedTensor( + filter_comp_info->compressed, filter_dims, filter_scales, + filter_zero_points, &filter_quant, kConvQuantizedDimension, + false /* is_variable */, kTfLiteInt8); + SymmetricPerChannelQuantize( + filter_comp_info->data, filter_comp_info->value_table, + filter_scales->size * filter_comp_info->value_table_stride, + filter_scales->size, filter_scales->data); + + TfLiteAffineQuantization bias_quant = {}; + TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor( + bias_comp_info->compressed, bias_dims, input_scale, filter_scales, + bias_scales, bias_zero_points, &bias_quant, kConvQuantizedDimension, + false /* is_variable */, typeToTfLiteType()); + SymmetricPerChannelQuantize( + bias_comp_info->data, bias_comp_info->value_table, + bias_scales->size * bias_comp_info->value_table_stride, bias_scales->size, + bias_scales->data); + + constexpr int tensors_size = kConvMaxTensors; + TfLiteTensor tensors[tensors_size] = { + CreateQuantizedTensor(input_data, input_quantized, input_dims, + input_scale, input_zero_point), + filter_tensor, + bias_tensor, + CreateQuantizedTensor(output_quantized, output_dims, output_scale, + output_zero_point), + }; + + const int output_dims_count = ElementCount(*output_dims); + Quantize(expected_output_data, expected_output_quantized, output_dims_count, + output_scale, output_zero_point); + return ValidateConvGoldens(tensors, tensors_size, expected_output_quantized, + output_dims_count, conv_params, registration, + output_quantized, 1.0e-5f /* tolerance */, + filter_comp_info, bias_comp_info); +} + +#endif // USE_TFLM_COMPRESSION + +} // namespace testing +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_ diff --git a/src/tensorflow/lite/micro/kernels/cumsum.cpp b/src/tensorflow/lite/micro/kernels/cumsum.cpp new file mode 100644 index 0000000..258cf8d --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/cumsum.cpp @@ -0,0 +1,175 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/cumsum.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kAxisTensor = 1; +constexpr int kOutputTensor = 0; + +constexpr int kCumSumIntegerShift = 20; + +// only used with INT8 tensors +struct OpData { + int32_t output_activation_min; + int32_t output_activation_max; + int32_t input_offset; + int32_t output_offset; + int32_t input_multiplier; + int32_t output_multiplier; + int input_shift; + int output_shift; + int left_shift; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* axis = + micro_context->AllocateTempInputTensor(node, kAxisTensor); + + TF_LITE_ENSURE(context, + input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); + TF_LITE_ENSURE_EQ(context, axis->type, kTfLiteInt32); + + TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); + + TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE(context, HaveSameShapes(input, output)); + + if (output->type == kTfLiteInt8) { + node->user_data = + context->AllocatePersistentBuffer(context, sizeof(OpData)); + OpData* data = static_cast(node->user_data); + + // 8bit -> 8bit general quantized path, with general rescalings + data->input_offset = -input->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = kCumSumIntegerShift; + const double twice_max_input_scale = + 2 * static_cast(input->params.scale); + const double real_input_multiplier = + static_cast(input->params.scale) / twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input_multiplier, &data->input_multiplier, &data->input_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, kTfLiteActNone, output, &data->output_activation_min, + &data->output_activation_max)); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(axis); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus CumSumPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus CumSumEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* axis_tensor = + tflite::micro::GetEvalInput(context, node, kAxisTensor); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + auto* cs_params = static_cast(node->builtin_data); + auto input_shape = tflite::micro::GetTensorShape(input); + + int32_t axis = *tflite::micro::GetTensorData(axis_tensor); + if (axis < 0) axis += input_shape.DimensionsCount(); + + if (axis < 0 || axis >= input_shape.DimensionsCount()) { + MicroPrintf("CUMSUM Invalid axis: %d", axis); + return kTfLiteError; + } + + switch (input->type) { + case kTfLiteFloat32: { + reference_ops::CumSum(tflite::micro::GetTensorData(input), + input_shape, axis, cs_params->exclusive, + cs_params->reverse, + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } break; + + case kTfLiteInt8: { + auto* data = static_cast(node->user_data); + ArithmeticParams params; + params.left_shift = data->left_shift; + params.input1_offset = data->input_offset; + params.input1_multiplier = data->input_multiplier; + params.input1_shift = data->input_shift; + params.output_offset = data->output_offset; + params.output_multiplier = data->output_multiplier; + params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, + data->output_activation_max, ¶ms); + reference_ops::CumSum(params, tflite::micro::GetTensorData(input), + input_shape, axis, cs_params->exclusive, + cs_params->reverse, + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } break; + + default: { + MicroPrintf("CUMSUM only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } + + return kTfLiteError; +} + +} // namespace + +TFLMRegistration Register_CUMSUM() { + return tflite::micro::RegisterOp(nullptr, CumSumPrepare, CumSumEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/decompress.cpp b/src/tensorflow/lite/micro/kernels/decompress.cpp new file mode 100644 index 0000000..2d6d867 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/decompress.cpp @@ -0,0 +1,61 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifdef USE_TFLM_COMPRESSION + +#include "tensorflow/lite/micro/kernels/decompress.h" + +#include +#include + +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +template +T* DecompressionState::DecompressToBuffer(void* buffer) { + TFLITE_DCHECK(compressed_bit_width_ <= LookupTableData::kMaxBitWidth); + TFLITE_DCHECK(compressed_bit_width_ > 0); + + if (std::is_same::value && + comp_data_.data.lut_data->compressed_bit_width == 4 && + !comp_data_.data.lut_data->use_alternate_axis) { + DecompressToBufferWidth4_16(static_cast(buffer)); + } else if (std::is_same::value && + comp_data_.data.lut_data->compressed_bit_width == 3 && + !comp_data_.data.lut_data->use_alternate_axis) { + DecompressToBufferWidth3_32(static_cast(buffer)); + } else if (std::is_same::value && + comp_data_.data.lut_data->compressed_bit_width == 2 && + !comp_data_.data.lut_data->use_alternate_axis) { + DecompressToBufferWidth2_16(static_cast(buffer)); + } else { + DecompressToBufferWidthAny(static_cast(buffer)); + } + + return static_cast(buffer); +} + +template bool* DecompressionState::DecompressToBuffer(void*); +template float* DecompressionState::DecompressToBuffer(void*); +template int8_t* DecompressionState::DecompressToBuffer(void*); +template int16_t* DecompressionState::DecompressToBuffer(void*); +template int32_t* DecompressionState::DecompressToBuffer(void*); +template int64_t* DecompressionState::DecompressToBuffer(void*); + +} // namespace tflite + +#endif // USE_TFLM_COMPRESSION diff --git a/src/tensorflow/lite/micro/kernels/decompress.h b/src/tensorflow/lite/micro/kernels/decompress.h new file mode 100644 index 0000000..d1d4e98 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/decompress.h @@ -0,0 +1,89 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_KERNELS_DECOMPRESS_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_KERNELS_DECOMPRESS_H_ + +#include + +#include "tensorflow/lite/micro/compression.h" +#include "tensorflow/lite/micro/micro_profiler.h" + +namespace tflite { + +#ifdef USE_TFLM_COMPRESSION + +struct DecompressionState { + DecompressionState() = delete; + + DecompressionState(const uint8_t* compressed_indices, + const size_t count_indices, + const CompressionTensorData& comp_data, + const size_t num_channels, + MicroProfilerInterface* profiler = nullptr) + : compressed_indices_(compressed_indices), + count_indices_(count_indices), + comp_data_(comp_data), + num_channels_(num_channels), + micro_profiler_(profiler) {} + + DecompressionState(const DecompressionState& other) + : compressed_indices_(other.compressed_indices_), + count_indices_(other.count_indices_), + comp_data_(other.comp_data_), + num_channels_(other.num_channels_), + micro_profiler_(other.micro_profiler_) {} + + template + T* DecompressToBuffer(void* buffer); + + protected: + // optimized C++ for INT8, use_alt_axis == false + void DecompressToBufferWidth4_16(int8_t* buffer); + void DecompressToBufferWidth3_32(int8_t* buffer); + void DecompressToBufferWidth2_16(int8_t* buffer); + + // generic C++ for any bit width and value table type + template + void DecompressToBufferWidthAny(T* buffer); + + // Optimized C++ table index fetch + inline size_t GetNextTableIndexWidth7(const size_t current_offset); + inline size_t GetNextTableIndexWidth6(const size_t current_offset); + inline size_t GetNextTableIndexWidth5(const size_t current_offset); + inline size_t GetNextTableIndexWidth4(const size_t current_offset); + inline size_t GetNextTableIndexWidth3(const size_t current_offset); + inline size_t GetNextTableIndexWidth2(const size_t current_offset); + inline size_t GetNextTableIndexWidth1(const size_t current_offset); + + protected: + const uint8_t* compressed_indices_; + const size_t count_indices_; + const CompressionTensorData& comp_data_; + const size_t num_channels_; + const size_t compressed_bit_width_ = + comp_data_.data.lut_data->compressed_bit_width; + const size_t elements_per_channel_ = + comp_data_.data.lut_data->use_alternate_axis + ? 1 + : count_indices_ / num_channels_; + MicroProfilerInterface* micro_profiler_; +}; + +#endif // USE_TFLM_COMPRESSION + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_KERNELS_DECOMPRESS_H_ diff --git a/src/tensorflow/lite/micro/kernels/decompress_common.cpp b/src/tensorflow/lite/micro/kernels/decompress_common.cpp new file mode 100644 index 0000000..5c40af8 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/decompress_common.cpp @@ -0,0 +1,539 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifdef USE_TFLM_COMPRESSION + +#include +#include + +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/kernels/decompress.h" +#include "tensorflow/lite/micro/micro_common.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_profiler.h" + +namespace tflite { + +void DecompressionState::DecompressToBufferWidth4_16(int8_t* buffer) { + ScopedMicroProfiler scoped_profiler(__func__, micro_profiler_); + + const size_t stride = comp_data_.data.lut_data->value_table_channel_stride; + const uint8_t* value_table = + static_cast(comp_data_.data.lut_data->value_table); + const size_t max_count = elements_per_channel_; + size_t current_offset = 0; + + for (size_t channel = 0; channel < num_channels_; channel++) { + size_t count = max_count; + + // process elements at start of channel up to next uint64_t alignment of + // compressed_indices_ + while (count > 0 && (current_offset & 0x0F)) { + const size_t index = GetNextTableIndexWidth4(current_offset++); + *buffer++ = value_table[index]; + count -= 1; + } + + // process elements in current channel in groups of 16 + if (count >= 16) { + const uint64_t* indices = reinterpret_cast( + &compressed_indices_[current_offset >> 1]); + + while (count >= 16) { + count -= 16; + uint64_t index = *indices++; + uint64_t value, value2; + + value = static_cast(value_table[(index >> 4) & 0x0F]); + value |= static_cast(value_table[index & 0x0F]) << 8; + value |= static_cast(value_table[(index >> 12) & 0x0F]) << 16; + value |= static_cast(value_table[(index >> 8) & 0x0F]) << 24; + value |= static_cast(value_table[(index >> 20) & 0x0F]) << 32; + value |= static_cast(value_table[(index >> 16) & 0x0F]) << 40; + value |= static_cast(value_table[(index >> 28) & 0x0F]) << 48; + value |= static_cast(value_table[(index >> 24) & 0x0F]) << 56; + + *reinterpret_cast(buffer) = value; + + value2 = static_cast(value_table[(index >> 36) & 0x0F]); + value2 |= static_cast(value_table[(index >> 32) & 0x0F]) << 8; + value2 |= static_cast(value_table[(index >> 44) & 0x0F]) + << 16; + value2 |= static_cast(value_table[(index >> 40) & 0x0F]) + << 24; + value2 |= static_cast(value_table[(index >> 52) & 0x0F]) + << 32; + value2 |= static_cast(value_table[(index >> 48) & 0x0F]) + << 40; + value2 |= static_cast(value_table[(index >> 60) & 0x0F]) + << 48; + value2 |= static_cast(value_table[(index >> 56) & 0x0F]) + << 56; + + *reinterpret_cast(buffer + 8) = value2; + + buffer += 16; + } + + current_offset = + (reinterpret_cast(indices) - compressed_indices_) + << 1; + } + + // process remaining elements in current channel + while (count > 0) { + count -= 1; + const size_t index = GetNextTableIndexWidth4(current_offset++); + *buffer++ = value_table[index]; + } + + value_table += stride; + } +} + +void DecompressionState::DecompressToBufferWidth2_16(int8_t* buffer) { + ScopedMicroProfiler scoped_profiler(__func__, micro_profiler_); + + const size_t stride = comp_data_.data.lut_data->value_table_channel_stride; + const uint8_t* value_table = + static_cast(comp_data_.data.lut_data->value_table); + const size_t max_count = elements_per_channel_; + size_t current_offset = 0; + + for (size_t channel = 0; channel < num_channels_; channel++) { + size_t count = max_count; + + // process elements at start of channel up to next uint32_t alignment of + // compressed_indices_ + while (count > 0 && (current_offset & 0x0F)) { + const size_t index = GetNextTableIndexWidth2(current_offset++); + *buffer++ = value_table[index]; + count -= 1; + } + + // process elements in current channel in groups of 16 + if (count >= 16) { + const uint32_t* indices = reinterpret_cast( + &compressed_indices_[current_offset >> 2]); + + while (count >= 16) { + count -= 16; + uint32_t index = *indices++; + uint64_t value, value2; + + value = static_cast(value_table[(index >> 6) & 0x03]); + value |= static_cast(value_table[(index >> 4) & 0x03]) << 8; + value |= static_cast(value_table[(index >> 2) & 0x03]) << 16; + value |= static_cast(value_table[index & 0x03]) << 24; + value |= static_cast(value_table[(index >> 14) & 0x03]) << 32; + value |= static_cast(value_table[(index >> 12) & 0x03]) << 40; + value |= static_cast(value_table[(index >> 10) & 0x03]) << 48; + value |= static_cast(value_table[(index >> 8) & 0x03]) << 56; + + *reinterpret_cast(buffer) = value; + + value2 = static_cast(value_table[(index >> 22) & 0x03]); + value2 |= static_cast(value_table[(index >> 20) & 0x03]) << 8; + value2 |= static_cast(value_table[(index >> 18) & 0x03]) + << 16; + value2 |= static_cast(value_table[(index >> 16) & 0x03]) + << 24; + value2 |= static_cast(value_table[(index >> 30) & 0x03]) + << 32; + value2 |= static_cast(value_table[(index >> 28) & 0x03]) + << 40; + value2 |= static_cast(value_table[(index >> 26) & 0x03]) + << 48; + value2 |= static_cast(value_table[(index >> 24) & 0x03]) + << 56; + + *reinterpret_cast(buffer + 8) = value2; + + buffer += 16; + } + + current_offset = + (reinterpret_cast(indices) - compressed_indices_) + << 2; + } + + // process remaining elements in current channel + while (count > 0) { + count -= 1; + const size_t index = GetNextTableIndexWidth2(current_offset++); + *buffer++ = value_table[index]; + } + + value_table += stride; + } +} + +void DecompressionState::DecompressToBufferWidth3_32(int8_t* buffer) { + ScopedMicroProfiler scoped_profiler(__func__, micro_profiler_); + + const size_t stride = comp_data_.data.lut_data->value_table_channel_stride; + const uint8_t* value_table = + static_cast(comp_data_.data.lut_data->value_table); + const size_t max_count = elements_per_channel_; + size_t current_offset = 0; + + for (size_t channel = 0; channel < num_channels_; channel++) { + size_t count = max_count; + + // process elements at start of channel up to next uint32_t alignment of + // compressed_indices_ + while (count > 0 && (current_offset & 0x1F)) { + const size_t index = GetNextTableIndexWidth3(current_offset++); + *buffer++ = value_table[index]; + count -= 1; + } + + // process elements in current channel in groups of 32 + if (count >= 32) { + const uint32_t* indices = reinterpret_cast( + &compressed_indices_[(current_offset >> 5) * 12]); + + while (count >= 32) { + count -= 32; + uint32_t index0 = *indices++; + uint32_t index1 = *indices++; + uint32_t index2 = *indices++; + uint64_t value, value2; + + value = static_cast(value_table[(index0 >> 5) & 0x07]); + value |= static_cast(value_table[(index0 >> 2) & 0x07]) << 8; + value |= + static_cast( + value_table[((index0 << 1) & 0b110) | ((index0 >> 15) & 0b1)]) + << 16; + value |= static_cast(value_table[(index0 >> 12) & 0x07]) + << 24; + value |= static_cast(value_table[(index0 >> 9) & 0x07]) << 32; + value |= + static_cast( + value_table[((index0 >> 6) & 0b100) | ((index0 >> 22) & 0b11)]) + << 40; + value |= static_cast(value_table[(index0 >> 19) & 0x07]) + << 48; + value |= static_cast(value_table[(index0 >> 16) & 0x07]) + << 56; + + *reinterpret_cast(buffer) = value; + + value2 = static_cast(value_table[(index0 >> 29) & 0x07]); + value2 |= static_cast(value_table[(index0 >> 26) & 0x07]) + << 8; + value2 |= + static_cast( + value_table[((index0 >> 23) & 0b110) | ((index1 >> 7) & 0b1)]) + << 16; + value2 |= static_cast(value_table[(index1 >> 4) & 0x07]) + << 24; + value2 |= static_cast(value_table[(index1 >> 1) & 0x07]) + << 32; + value2 |= + static_cast( + value_table[((index1 << 2) & 0b100) | ((index1 >> 14) & 0b11)]) + << 40; + value2 |= static_cast(value_table[(index1 >> 11) & 0x07]) + << 48; + value2 |= static_cast(value_table[(index1 >> 8) & 0x07]) + << 56; + + *reinterpret_cast(buffer + 8) = value2; + + value = static_cast(value_table[(index1 >> 21) & 0x07]); + value |= static_cast(value_table[(index1 >> 18) & 0x07]) << 8; + value |= + static_cast( + value_table[((index1 >> 15) & 0b110) | ((index1 >> 31) & 0b1)]) + << 16; + value |= static_cast(value_table[(index1 >> 28) & 0x07]) + << 24; + value |= static_cast(value_table[(index1 >> 25) & 0x07]) + << 32; + value |= + static_cast( + value_table[((index1 >> 22) & 0b100) | ((index2 >> 6) & 0b11)]) + << 40; + value |= static_cast(value_table[(index2 >> 3) & 0x07]) << 48; + value |= static_cast(value_table[(index2 >> 0) & 0x07]) << 56; + + *reinterpret_cast(buffer + 16) = value; + + value2 = static_cast(value_table[(index2 >> 13) & 0x07]); + value2 |= static_cast(value_table[(index2 >> 10) & 0x07]) + << 8; + value2 |= + static_cast( + value_table[((index2 >> 7) & 0b110) | ((index2 >> 23) & 0b1)]) + << 16; + value2 |= static_cast(value_table[(index2 >> 20) & 0x07]) + << 24; + value2 |= static_cast(value_table[(index2 >> 17) & 0x07]) + << 32; + value2 |= + static_cast( + value_table[((index2 >> 14) & 0b100) | ((index2 >> 30) & 0b11)]) + << 40; + value2 |= static_cast(value_table[(index2 >> 27) & 0x07]) + << 48; + value2 |= static_cast(value_table[(index2 >> 24) & 0x07]) + << 56; + + *reinterpret_cast(buffer + 24) = value2; + + buffer += 32; + current_offset += 32; + } + } + + // process remaining elements in current channel + while (count > 0) { + count -= 1; + const size_t index = GetNextTableIndexWidth3(current_offset++); + *buffer++ = value_table[index]; + } + + value_table += stride; + } +} + +// TODO(ddavis-2015): templating GetNextTableIndexWidth makes this method +// more than 2x faster, but with a large code size increase +template +void DecompressionState::DecompressToBufferWidthAny(T* buffer) { + ScopedMicroProfiler scoped_profiler(__func__, micro_profiler_); + + if (comp_data_.data.lut_data->use_alternate_axis) { + const size_t stride = comp_data_.data.lut_data->value_table_channel_stride; + size_t current_offset = 0; + size_t count = count_indices_; + + while (count > 0) { + const T* value_table = + static_cast(comp_data_.data.lut_data->value_table); + for (size_t channel = 0; channel < num_channels_; channel++) { + size_t index; + switch (compressed_bit_width_) { + case 1: + index = GetNextTableIndexWidth1(current_offset); + break; + case 2: + index = GetNextTableIndexWidth2(current_offset); + break; + case 3: + index = GetNextTableIndexWidth3(current_offset); + break; + case 4: + index = GetNextTableIndexWidth4(current_offset); + break; + case 5: + index = GetNextTableIndexWidth5(current_offset); + break; + case 6: + index = GetNextTableIndexWidth6(current_offset); + break; + case 7: + index = GetNextTableIndexWidth7(current_offset); + break; + } + current_offset++; + *buffer++ = value_table[index]; + value_table += stride; + } + count -= num_channels_; + } + } else { + const size_t stride = comp_data_.data.lut_data->value_table_channel_stride; + const T* value_table = + static_cast(comp_data_.data.lut_data->value_table); + const size_t max_count = elements_per_channel_; + size_t current_offset = 0; + + for (size_t channel = 0; channel < num_channels_; channel++) { + size_t count = max_count; + + while (count-- > 0) { + size_t index; + switch (compressed_bit_width_) { + case 1: + index = GetNextTableIndexWidth1(current_offset); + break; + case 2: + index = GetNextTableIndexWidth2(current_offset); + break; + case 3: + index = GetNextTableIndexWidth3(current_offset); + break; + case 4: + index = GetNextTableIndexWidth4(current_offset); + break; + case 5: + index = GetNextTableIndexWidth5(current_offset); + break; + case 6: + index = GetNextTableIndexWidth6(current_offset); + break; + case 7: + index = GetNextTableIndexWidth7(current_offset); + break; + } + current_offset++; + *buffer++ = value_table[index]; + } + value_table += stride; + } + } +} + +template void DecompressionState::DecompressToBufferWidthAny(bool*); +template void DecompressionState::DecompressToBufferWidthAny(float*); +template void DecompressionState::DecompressToBufferWidthAny(int8_t*); +template void DecompressionState::DecompressToBufferWidthAny(int16_t*); +template void DecompressionState::DecompressToBufferWidthAny(int32_t*); +template void DecompressionState::DecompressToBufferWidthAny(int64_t*); + +inline size_t DecompressionState::GetNextTableIndexWidth7( + const size_t current_offset) { + const size_t current_byte_index = (current_offset >> 3) * 7; + const uint8_t* indices = &compressed_indices_[current_byte_index]; + switch (current_offset & 0b111) { + case 0: + return indices[0] >> 1; + case 1: + return ((indices[0] & 0b1) << 6) | (indices[1] >> 2); + case 2: + return ((indices[1] & 0b11) << 5) | (indices[2] >> 3); + case 3: + return ((indices[2] & 0b111) << 4) | (indices[3] >> 4); + case 4: + return ((indices[3] & 0x0F) << 3) | (indices[4] >> 5); + case 5: + return ((indices[4] & 0x1F) << 2) | (indices[5] >> 6); + case 6: + return ((indices[5] & 0x3F) << 1) | (indices[6] >> 7); + case 7: + return indices[6] & 0x7F; + } + // NOTREACHED + return 0; +} + +inline size_t DecompressionState::GetNextTableIndexWidth6( + const size_t current_offset) { + const size_t current_byte_index = (current_offset >> 2) * 3; + const uint8_t* indices = &compressed_indices_[current_byte_index]; + switch (current_offset & 0b11) { + case 0: + return indices[0] >> 2; + case 1: + return ((indices[0] & 0b11) << 4) | (indices[1] >> 4); + case 2: + return ((indices[1] & 0x0F) << 2) | (indices[2] >> 6); + case 3: + return indices[2] & 0x3F; + } + // NOTREACHED + return 0; +} + +inline size_t DecompressionState::GetNextTableIndexWidth5( + const size_t current_offset) { + const size_t current_byte_index = (current_offset >> 3) * 5; + const uint8_t* indices = &compressed_indices_[current_byte_index]; + switch (current_offset & 0b111) { + case 0: + return indices[0] >> 3; + case 1: + return ((indices[0] & 0b111) << 2) | (indices[1] >> 6); + case 2: + return (indices[1] >> 1) & 0x1F; + case 3: + return ((indices[1] & 0b1) << 4) | (indices[2] >> 4); + case 4: + return ((indices[2] & 0x0F) << 1) | (indices[3] >> 7); + case 5: + return (indices[3] >> 2) & 0x1F; + case 6: + return ((indices[3] & 0b11) << 3) | (indices[4] >> 5); + case 7: + return indices[4] & 0x1F; + } + // NOTREACHED + return 0; +} + +inline size_t DecompressionState::GetNextTableIndexWidth4( + const size_t current_offset) { + if (current_offset & 1) { + return compressed_indices_[current_offset >> 1] & 0x0F; + } else { + return compressed_indices_[current_offset >> 1] >> 4; + } +} + +inline size_t DecompressionState::GetNextTableIndexWidth3( + const size_t current_offset) { + const size_t current_byte_index = (current_offset >> 3) * 3; + const uint8_t* indices = &compressed_indices_[current_byte_index]; + switch (current_offset & 0b111) { + case 0: + return indices[0] >> 5; + case 1: + return (indices[0] >> 2) & 0b111; + case 2: + return ((indices[0] & 0b11) << 1) | (indices[1] >> 7); + case 3: + return (indices[1] >> 4) & 0b111; + case 4: + return (indices[1] >> 1) & 0b111; + case 5: + return ((indices[1] & 0b1) << 2) | (indices[2] >> 6); + case 6: + return (indices[2] >> 3) & 0b111; + case 7: + return indices[2] & 0b111; + } + // NOTREACHED + return 0; +} + +inline size_t DecompressionState::GetNextTableIndexWidth2( + const size_t current_offset) { + if (current_offset & 0b10) { + if (current_offset & 1) { + return compressed_indices_[current_offset >> 2] & 0x03; + } else { + return (compressed_indices_[current_offset >> 2] >> 2) & 0x03; + } + } else { + if (current_offset & 1) { + return (compressed_indices_[current_offset >> 2] >> 4) & 0x03; + } else { + return (compressed_indices_[current_offset >> 2] >> 6) & 0x03; + } + } +} + +inline size_t DecompressionState::GetNextTableIndexWidth1( + const size_t current_offset) { + const size_t shift = ~current_offset & 0b111; + return (compressed_indices_[current_offset >> 3] >> shift) & 0b1; +} + +} // namespace tflite + +#endif // USE_TFLM_COMPRESSION diff --git a/src/tensorflow/lite/micro/kernels/depth_to_space.cpp b/src/tensorflow/lite/micro/kernels/depth_to_space.cpp new file mode 100644 index 0000000..d4faf7c --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/depth_to_space.cpp @@ -0,0 +1,143 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/depth_to_space.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// input/output tensor shape rank associations +constexpr int kBatchRank = 0; +constexpr int kHeightRank = 1; +constexpr int kWidthRank = 2; +constexpr int kDepthRank = 3; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); + + auto data_type = output->type; + TF_LITE_ENSURE(context, + data_type == kTfLiteFloat32 || data_type == kTfLiteInt8); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + const int block_size = params->block_size; + TF_LITE_ENSURE(context, block_size > 0); + const int input_height = input->dims->data[kHeightRank]; + const int input_width = input->dims->data[kWidthRank]; + const int input_channels = input->dims->data[kDepthRank]; + int output_height = input_height * block_size; + int output_width = input_width * block_size; + int output_channels = input_channels / block_size / block_size; + + TF_LITE_ENSURE_EQ(context, input_height, output_height / block_size); + TF_LITE_ENSURE_EQ(context, input_width, output_width / block_size); + TF_LITE_ENSURE_EQ(context, input_channels, + output_channels * block_size * block_size); + + // We must update the output tensor dimensions. + // The dims storage is expected to be the same area in memory + // for both TfLiteTensor and TfLiteEvalTensor. This is important + // because TfLiteTensor in the MicroInterpreter is a temporary + // allocation. For the KernelRunner interpreter, TfLiteEvalTensor + // is a temporary allocation. We must therefore relocate the dims + // from the FlatBuffer to the persistent storage arena. + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + output->dims->data[kBatchRank] = input->dims->data[kBatchRank]; + output->dims->data[kHeightRank] = output_height; + output->dims->data[kWidthRank] = output_width; + output->dims->data[kDepthRank] = output_channels; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus DepthToSpacePrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus DepthToSpaceEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + tflite::DepthToSpaceParams op_params; + op_params.block_size = static_cast(params->block_size); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + reference_ops::DepthToSpace(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::DepthToSpace(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("DEPTH_TO_SPACE only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_DEPTH_TO_SPACE() { + return tflite::micro::RegisterOp(nullptr, DepthToSpacePrepare, + DepthToSpaceEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/depthwise_conv.h b/src/tensorflow/lite/micro/kernels/depthwise_conv.h new file mode 100644 index 0000000..08f8a53 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/depthwise_conv.h @@ -0,0 +1,90 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/micro/kernels/conv.h" + +namespace tflite { + +extern const int kDepthwiseConvInputTensor; +extern const int kDepthwiseConvWeightsTensor; +extern const int kDepthwiseConvBiasTensor; +extern const int kDepthwiseConvOutputTensor; +extern const int kDepthwiseConvQuantizedDimension; + +// Returns a DepthwiseParams struct with all the parameters needed for a +// float computation. +DepthwiseParams DepthwiseConvParamsFloat( + const TfLiteDepthwiseConvParams& params, const OpDataConv& data); + +// Returns a DepthwiseParams struct with all the parameters needed for a +// quantized computation. +DepthwiseParams DepthwiseConvParamsQuantized( + const TfLiteDepthwiseConvParams& params, const OpDataConv& data); + +TfLiteStatus CalculateOpDataDepthwiseConv( + TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, int width, int height, + int filter_width, int filter_height, int out_width, int out_height, + const TfLiteType data_type, OpDataConv* data); + +TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node); + +// This is the most generic TFLMRegistration. The actual supported types +// may still be target dependent. The only requirement is that every +// implementation (reference or optimized) must define this function. +TFLMRegistration Register_DEPTHWISE_CONV_2D(); + +#if defined(ARDUINO) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 activations and int8 weights and uses the latency optimized +// implementations. +TFLMRegistration Register_DEPTHWISE_CONV_2D_INT8(); + +// Returns a TFLMRegistration struct for kernel variant that only supports +// int16 activations and int8 weights and uses the latency optimized +// implementations. +TFLMRegistration Register_DEPTHWISE_CONV_2D_INT16(); + +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 activations and int4 weights and uses the latency optimized +// implementations. +TFLMRegistration Register_DEPTHWISE_CONV_2D_INT4(); + +#else +inline TFLMRegistration Register_DEPTHWISE_CONV_2D_INT8() { + return Register_DEPTHWISE_CONV_2D(); +} + +inline TFLMRegistration Register_DEPTHWISE_CONV_2D_INT16() { + return Register_DEPTHWISE_CONV_2D(); +} + +inline TFLMRegistration Register_DEPTHWISE_CONV_2D_INT4() { + return Register_DEPTHWISE_CONV_2D(); +} + +#endif + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_ diff --git a/src/tensorflow/lite/micro/kernels/depthwise_conv_common.cpp b/src/tensorflow/lite/micro/kernels/depthwise_conv_common.cpp new file mode 100644 index 0000000..b68715e --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/depthwise_conv_common.cpp @@ -0,0 +1,239 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/depthwise_conv.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +const int kDepthwiseConvInputTensor = 0; +const int kDepthwiseConvWeightsTensor = 1; +const int kDepthwiseConvBiasTensor = 2; +const int kDepthwiseConvOutputTensor = 0; + +// DepthwiseConv is quantized along dimension 3: +// https://www.tensorflow.org/lite/performance/quantization_spec +const int kDepthwiseConvQuantizedDimension = 3; + +// Returns a DepthwiseParams struct with all the parameters needed for a +// float computation. +DepthwiseParams DepthwiseConvParamsFloat( + const TfLiteDepthwiseConvParams& params, const OpDataConv& data) { + DepthwiseParams op_params; + CalculateActivationRange(params.activation, &op_params.float_activation_min, + &op_params.float_activation_max); + op_params.padding_type = tflite::micro::RuntimePaddingType(params.padding); + op_params.padding_values.width = data.padding.width; + op_params.padding_values.height = data.padding.height; + op_params.stride_width = params.stride_width; + op_params.stride_height = params.stride_height; + op_params.dilation_width_factor = params.dilation_width_factor; + op_params.dilation_height_factor = params.dilation_height_factor; + op_params.depth_multiplier = params.depth_multiplier; + return op_params; +} + +// Returns a DepthwiseParams struct with all the parameters needed for a +// quantized computation. +DepthwiseParams DepthwiseConvParamsQuantized( + const TfLiteDepthwiseConvParams& params, const OpDataConv& data) { + DepthwiseParams op_params; + op_params.input_offset = -data.input_zero_point; + op_params.weights_offset = -data.filter_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.output_multiplier = data.output_multiplier; + op_params.output_shift = -data.output_shift; + op_params.padding_type = tflite::micro::RuntimePaddingType(params.padding); + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.stride_height = params.stride_height; + op_params.stride_width = params.stride_width; + op_params.dilation_height_factor = params.dilation_height_factor; + op_params.dilation_width_factor = params.dilation_width_factor; + op_params.depth_multiplier = params.depth_multiplier; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; + return op_params; +} + +TfLiteStatus CalculateOpDataDepthwiseConv( + TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, int width, int height, + int filter_width, int filter_height, int out_width, int out_height, + const TfLiteType data_type, OpDataConv* data) { + bool has_bias = node->inputs->size == 3; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params.padding; + data->padding = ComputePaddingHeightWidth( + params.stride_height, params.stride_width, params.dilation_height_factor, + params.dilation_width_factor, height, width, filter_height, filter_width, + padding, &out_height, &out_width); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kConvBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. + if (data_type != kTfLiteFloat32) { + int output_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params.activation, + &data->output_multiplier, &data->output_shift, + &data->output_activation_min, &data->output_activation_max, + data->per_channel_output_multiplier, data->per_channel_output_shift, + output_channels)); + } + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + if (has_bias) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpDataConv* data = static_cast(node->user_data); + const auto& params = + *(static_cast(node->builtin_data)); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kDepthwiseConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kDepthwiseConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kDepthwiseConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + + const int input_width = input->dims->data[2]; + const int input_height = input->dims->data[1]; + const int filter_width = filter->dims->data[2]; + const int filter_height = filter->dims->data[1]; + const int output_width = output->dims->data[2]; + const int output_height = output->dims->data[1]; + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TFLITE_DCHECK(affine_quantization != nullptr); + TFLITE_DCHECK(affine_quantization->scale != nullptr); + TFLITE_DCHECK(affine_quantization->zero_point != nullptr); + + TF_LITE_ENSURE( + context, affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kDepthwiseConvQuantizedDimension]); + + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_MSG( + context, + input->type == filter->type || + (input->type == kTfLiteInt8 && + (filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)) || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8), + "Hybrid models are not supported on TFLite Micro."); + + if (filter->type == kTfLiteInt4) { + int filter_size = + RuntimeShape(filter->dims->size, + reinterpret_cast(filter->dims->data)) + .FlatSize(); + context->RequestScratchBufferInArena(context, filter_size, + &data->filter_buffer_index); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, input->type, data)); + +#ifdef USE_TFLM_COMPRESSION + + // Compression scratch buffers. + // These will only be allocated if the tensor is compressed. + if (micro_context->IsTensorCompressed(node, kDepthwiseConvWeightsTensor) && + filter->type == kTfLiteInt4) { + MicroPrintf("Compression not supported with INT4 tensors"); + return kTfLiteError; + } + data->weights_scratch_index = + micro_context->AllocateDecompressionScratchBuffer( + node, kDepthwiseConvWeightsTensor); + data->bias_scratch_index = micro_context->AllocateDecompressionScratchBuffer( + node, kDepthwiseConvBiasTensor); + +#endif // USE_TFLM_COMPRESSION + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/dequantize.cpp b/src/tensorflow/lite/micro/kernels/dequantize.cpp index f4e2eb9..428c866 100644 --- a/src/tensorflow/lite/micro/kernels/dequantize.cpp +++ b/src/tensorflow/lite/micro/kernels/dequantize.cpp @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,145 +22,63 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/reference/requantize.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/dequantize.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace dequantize { -struct OpData { - tflite::DequantizationParams quantization_params; - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - int32_t output_zero_point; -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* DequantizeInit(TfLiteContext* context, const char* buffer, + size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - // TODO(b/140515557): Add cached dequant to improve hybrid model performance. - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE(context, input->type == kTfLiteUInt8 || - input->type == kTfLiteInt8 || - input->type == kTfLiteInt16); - TF_LITE_ENSURE( - context, output->type == kTfLiteFloat32 || output->type == kTfLiteInt32); - - if (output->type == kTfLiteInt32) { - const double effective_output_scale = - static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(effective_output_scale, &data->output_multiplier, - &data->output_shift); - } - - data->quantization_params.zero_point = input->params.zero_point; - data->quantization_params.scale = static_cast(input->params.scale); - data->output_zero_point = output->params.zero_point; - return kTfLiteOk; + return context->AllocatePersistentBuffer(context, sizeof(DequantizeOpData)); } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus DequantizeEval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); + DequantizeOpData* data = static_cast(node->user_data); const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - if (output->type == kTfLiteFloat32) { - switch (input->type) { - case kTfLiteUInt8: - reference_ops::Dequantize(data->quantization_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::Dequantize(data->quantization_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::Dequantize(data->quantization_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (output->type == kTfLiteInt32) { - int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output)); - switch (input->type) { - case kTfLiteInt16: { - reference_ops::Requantize( - tflite::micro::GetTensorData(input), flat_size, - data->output_multiplier, data->output_shift, - data->quantization_params.zero_point, data->output_zero_point, - tflite::micro::GetTensorData(output)); - break; - } - case kTfLiteInt8: { - reference_ops::Requantize( - tflite::micro::GetTensorData(input), flat_size, - data->output_multiplier, data->output_shift, - data->quantization_params.zero_point, data->output_zero_point, - tflite::micro::GetTensorData(output)); - break; - } - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else { - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; + // Output type ensured to be kTfLiteFloat32 at the Prepare stage + TFLITE_DCHECK(output->type == kTfLiteFloat32); + + switch (input->type) { + case kTfLiteInt8: + reference_ops::Dequantize(data->quantization_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Dequantize(data->quantization_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteUInt8: + reference_ops::Dequantize(data->quantization_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; } return kTfLiteOk; } -} // namespace dequantize - -TfLiteRegistration Register_DEQUANTIZE() { - return {/*init=*/dequantize::Init, - /*free=*/nullptr, - /*prepare=*/dequantize::Prepare, - /*invoke=*/dequantize::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_DEQUANTIZE() { + return tflite::micro::RegisterOp(DequantizeInit, DequantizePrepare, + DequantizeEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/dequantize.h b/src/tensorflow/lite/micro/kernels/dequantize.h new file mode 100644 index 0000000..fe6ec16 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/dequantize.h @@ -0,0 +1,38 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +struct DequantizeOpData { + tflite::DequantizationParams quantization_params; + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + int32_t output_zero_point; +}; + +TfLiteStatus DequantizePrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_ diff --git a/src/tensorflow/lite/micro/kernels/dequantize_common.cpp b/src/tensorflow/lite/micro/kernels/dequantize_common.cpp new file mode 100644 index 0000000..08e448e --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/dequantize_common.cpp @@ -0,0 +1,58 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/dequantize.h" +#include "tensorflow/lite/kernels/internal/reference/quantize.h" +#include "tensorflow/lite/kernels/internal/reference/requantize.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/dequantize.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +TfLiteStatus DequantizePrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + DequantizeOpData* data = static_cast(node->user_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + // TODO(b/140515557): Add cached dequant to improve hybrid model performance. + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || + input->type == kTfLiteInt16 || + input->type == kTfLiteUInt8); + TF_LITE_ENSURE(context, output->type == kTfLiteFloat32); + + data->quantization_params.zero_point = input->params.zero_point; + data->quantization_params.scale = static_cast(input->params.scale); + data->output_zero_point = output->params.zero_point; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/detection_postprocess.cpp b/src/tensorflow/lite/micro/kernels/detection_postprocess.cpp index f91e300..cb60b2a 100644 --- a/src/tensorflow/lite/micro/kernels/detection_postprocess.cpp +++ b/src/tensorflow/lite/micro/kernels/detection_postprocess.cpp @@ -1,4 +1,5 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -12,10 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include #include +#include -#define FLATBUFFERS_LOCALE_INDEPENDENT 0 -#include "flatbuffers/flexbuffers.h" +#include "third_party/flatbuffers/include/flatbuffers/flexbuffers.h" #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/common.h" @@ -80,7 +82,7 @@ struct CenterSizeEncoding { float h; float w; }; -// We make sure that the memory allocations are contiguous with static assert. +// We make sure that the memory allocations are contiguous with static_assert. static_assert(sizeof(BoxCornerEncoding) == sizeof(float) * kNumCoordBox, "Size of BoxCornerEncoding is 4 float values"); static_assert(sizeof(CenterSizeEncoding) == sizeof(float) * kNumCoordBox, @@ -115,13 +117,13 @@ struct OpData { TfLiteQuantizationParams input_anchors; }; -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* DetectionPostProcessInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); OpData* op_data = nullptr; const uint8_t* buffer_t = reinterpret_cast(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); - - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); op_data = reinterpret_cast( context->AllocatePersistentBuffer(context, sizeof(OpData))); @@ -148,19 +150,21 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { return op_data; } -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus DetectionPostProcessPrepare(TfLiteContext* context, + TfLiteNode* node) { auto* op_data = static_cast(node->user_data); + MicroContext* micro_context = GetMicroContext(context); + // Inputs: box_encodings, scores, anchors TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - const TfLiteTensor* input_box_encodings = - GetInput(context, node, kInputTensorBoxEncodings); - const TfLiteTensor* input_class_predictions = - GetInput(context, node, kInputTensorClassPredictions); - const TfLiteTensor* input_anchors = - GetInput(context, node, kInputTensorAnchors); + TfLiteTensor* input_box_encodings = + micro_context->AllocateTempInputTensor(node, kInputTensorBoxEncodings); + TfLiteTensor* input_class_predictions = + micro_context->AllocateTempInputTensor(node, + kInputTensorClassPredictions); + TfLiteTensor* input_anchors = + micro_context->AllocateTempInputTensor(node, kInputTensorAnchors); TF_LITE_ENSURE_EQ(context, NumDimensions(input_box_encodings), 3); TF_LITE_ENSURE_EQ(context, NumDimensions(input_class_predictions), 3); TF_LITE_ENSURE_EQ(context, NumDimensions(input_anchors), 2); @@ -218,6 +222,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // num_detections TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4); + micro_context->DeallocateTempTfLiteTensor(input_box_encodings); + micro_context->DeallocateTempTfLiteTensor(input_class_predictions); + micro_context->DeallocateTempTfLiteTensor(input_anchors); + return kTfLiteOk; } @@ -234,25 +242,6 @@ class Dequantizer { float scale_; }; -void DequantizeBoxEncodings(const TfLiteEvalTensor* input_box_encodings, - int idx, float quant_zero_point, float quant_scale, - int length_box_encoding, - CenterSizeEncoding* box_centersize) { - const uint8_t* boxes = - tflite::micro::GetTensorData(input_box_encodings) + - length_box_encoding * idx; - Dequantizer dequantize(quant_zero_point, quant_scale); - // See definition of the KeyPointBoxCoder at - // https://github.com/tensorflow/models/blob/master/research/object_detection/box_coders/keypoint_box_coder.py - // The first four elements are the box coordinates, which is the same as the - // FastRnnBoxCoder at - // https://github.com/tensorflow/models/blob/master/research/object_detection/box_coders/faster_rcnn_box_coder.py - box_centersize->y = dequantize(boxes[0]); - box_centersize->x = dequantize(boxes[1]); - box_centersize->h = dequantize(boxes[2]); - box_centersize->w = dequantize(boxes[3]); -} - template T ReInterpretTensor(const TfLiteEvalTensor* tensor) { const float* tensor_base = tflite::micro::GetTensorData(tensor); @@ -282,19 +271,6 @@ TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node, CenterSizeEncoding anchor; for (int idx = 0; idx < num_boxes; ++idx) { switch (input_box_encodings->type) { - // Quantized - case kTfLiteUInt8: - DequantizeBoxEncodings( - input_box_encodings, idx, - static_cast(op_data->input_box_encodings.zero_point), - static_cast(op_data->input_box_encodings.scale), - input_box_encodings->dims->data[2], &box_centersize); - DequantizeBoxEncodings( - input_anchors, idx, - static_cast(op_data->input_anchors.zero_point), - static_cast(op_data->input_anchors.scale), kNumCoordBox, - &anchor); - break; // Float case kTfLiteFloat32: { // Please see DequantizeBoxEncodings function for the support detail. @@ -311,14 +287,26 @@ TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node, return kTfLiteError; } - float ycenter = box_centersize.y / scale_values.y * anchor.h + anchor.y; - float xcenter = box_centersize.x / scale_values.x * anchor.w + anchor.x; + float ycenter = static_cast(static_cast(box_centersize.y) / + static_cast(scale_values.y) * + static_cast(anchor.h) + + static_cast(anchor.y)); + + float xcenter = static_cast(static_cast(box_centersize.x) / + static_cast(scale_values.x) * + static_cast(anchor.w) + + static_cast(anchor.x)); + float half_h = - 0.5f * static_cast(std::exp(box_centersize.h / scale_values.h)) * - anchor.h; + static_cast(0.5 * + (std::exp(static_cast(box_centersize.h) / + static_cast(scale_values.h))) * + static_cast(anchor.h)); float half_w = - 0.5f * static_cast(std::exp(box_centersize.w / scale_values.w)) * - anchor.w; + static_cast(0.5 * + (std::exp(static_cast(box_centersize.w) / + static_cast(scale_values.w))) * + static_cast(anchor.w)); float* decoded_boxes = reinterpret_cast( context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); @@ -334,9 +322,61 @@ TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node, void DecreasingPartialArgSort(const float* values, int num_values, int num_to_sort, int* indices) { std::iota(indices, indices + num_values, 0); - std::partial_sort( - indices, indices + num_to_sort, indices + num_values, - [&values](const int i, const int j) { return values[i] > values[j]; }); + std::partial_sort(indices, indices + num_to_sort, indices + num_values, + [&values](const int i, const int j) { + return std::tie(values[i], j) > std::tie(values[j], i); + }); +} + +template +void InsertionSort(int* start, int* end, Compare compare) { + for (int* i = start; i != end; ++i) { + std::rotate(std::upper_bound(start, i, *i, compare), i, i + 1); + } +} + +template +void TopDownMerge(int* values, int* scratch, const int half_num_values, + int num_values, Compare compare) { + int left = 0; + int right = half_num_values; + + for (int i = 0; i < num_values; i++) { + if (left >= half_num_values || + (right < num_values && compare(values[right], values[left]))) { + scratch[i] = values[right++]; + } else { + scratch[i] = values[left++]; + } + } + memcpy(values, scratch, num_values * sizeof(int)); +} + +template +void MergeSort(int* values, int* scratch, const int num_values, + Compare compare) { + constexpr int threshold = 20; + + if (num_values < threshold) { + InsertionSort(values, values + num_values, compare); + return; + } + + const int half_num_values = num_values / 2; + + MergeSort(values, scratch, half_num_values, compare); + MergeSort(values + half_num_values, scratch, num_values - half_num_values, + compare); + TopDownMerge(values, scratch, half_num_values, num_values, compare); +} + +void DecreasingArgSort(const float* values, int num_values, int* indices, + int* scratch) { + std::iota(indices, indices + num_values, 0); + + MergeSort(indices, scratch, num_values, [&values](const int i, const int j) { + return values[i] > values[j]; + }); } int SelectDetectionsAboveScoreThreshold(const float* values, int size, @@ -345,8 +385,9 @@ int SelectDetectionsAboveScoreThreshold(const float* values, int size, int counter = 0; for (int i = 0; i < size; i++) { if (values[i] >= threshold) { - keep_values[counter++] = values[i]; - keep_indices[i] = i; + keep_values[counter] = values[i]; + keep_indices[counter] = i; + counter++; } } return counter; @@ -420,8 +461,16 @@ TfLiteStatus NonMaxSuppressionSingleClassHelper( int* sorted_indices = reinterpret_cast( context->GetScratchBuffer(context, op_data->sorted_indices_idx)); - DecreasingPartialArgSort(keep_scores, num_scores_kept, num_scores_kept, - sorted_indices); + // Reusing keep_indices for scratch buffer and write back its values + // after the sorting is done. + DecreasingArgSort(keep_scores, num_scores_kept, sorted_indices, keep_indices); + int counter = 0; + for (int i = 0; i < num_boxes; i++) { + if (scores[i] >= non_max_suppression_score_threshold) { + keep_indices[counter] = i; + counter++; + } + } const int num_boxes_kept = num_scores_kept; const int output_size = std::min(num_boxes_kept, max_detections); @@ -687,22 +736,6 @@ TfLiteStatus NonMaxSuppressionMultiClassFastHelper(TfLiteContext* context, return kTfLiteOk; } -void DequantizeClassPredictions(const TfLiteEvalTensor* input_class_predictions, - const int num_boxes, - const int num_classes_with_background, - float* scores, OpData* op_data) { - float quant_zero_point = - static_cast(op_data->input_class_predictions.zero_point); - float quant_scale = - static_cast(op_data->input_class_predictions.scale); - Dequantizer dequantize(quant_zero_point, quant_scale); - const uint8_t* scores_quant = - tflite::micro::GetTensorData(input_class_predictions); - for (int idx = 0; idx < num_boxes * num_classes_with_background; ++idx) { - scores[idx] = dequantize(scores_quant[idx]); - } -} - TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context, TfLiteNode* node, OpData* op_data) { // Get the input tensors @@ -724,14 +757,6 @@ TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context, const float* scores; switch (input_class_predictions->type) { - case kTfLiteUInt8: { - float* temporary_scores = reinterpret_cast( - context->GetScratchBuffer(context, op_data->scores_idx)); - DequantizeClassPredictions(input_class_predictions, num_boxes, - num_classes_with_background, temporary_scores, - op_data); - scores = temporary_scores; - } break; case kTfLiteFloat32: scores = tflite::micro::GetTensorData(input_class_predictions); break; @@ -751,7 +776,8 @@ TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context, return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus DetectionPostProcessEval(TfLiteContext* context, + TfLiteNode* node) { TF_LITE_ENSURE(context, (kBatchSize == 1)); auto* op_data = static_cast(node->user_data); @@ -776,15 +802,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } } // namespace -TfLiteRegistration* Register_DETECTION_POSTPROCESS() { - static TfLiteRegistration r = {/*init=*/Init, - /*free=*/Free, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration* Register_DETECTION_POSTPROCESS() { + static TFLMRegistration r = tflite::micro::RegisterOp( + DetectionPostProcessInit, DetectionPostProcessPrepare, + DetectionPostProcessEval); return &r; } diff --git a/src/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h b/src/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h new file mode 100644 index 0000000..f5b9eae --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h @@ -0,0 +1,25 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H +#define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H + +extern const int g_gen_data_size_none_regular_nms; +extern const unsigned char g_gen_data_none_regular_nms[]; + +extern const int g_gen_data_size_regular_nms; +extern const unsigned char g_gen_data_regular_nms[]; + +#endif diff --git a/src/tensorflow/lite/micro/kernels/div.cpp b/src/tensorflow/lite/micro/kernels/div.cpp new file mode 100644 index 0000000..a80b3f2 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/div.cpp @@ -0,0 +1,219 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/div.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +struct OpDataDiv { + // Parameters used in the quantized paths where the output is 8bit + int32_t input1_zero_point; + int32_t input2_zero_point; + int32_t output_zero_point; + int32_t output_activation_min; + int32_t output_activation_max; + + // Parameters used in all quantized paths + int32_t output_multiplier; + int output_shift; +}; + +TfLiteStatus CalculateOpDataDiv(TfLiteContext* context, TfLiteTensor* input1, + TfLiteTensor* input2, TfLiteTensor* output, + TfLiteDivParams* params, OpDataDiv* data) { + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); + + if (output->type == kTfLiteInt8) { + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + const double real_multiplier = static_cast( + input1->params.scale / (input2->params.scale * output->params.scale)); + QuantizeMultiplier(real_multiplier, &data->output_multiplier, + &data->output_shift); + data->input1_zero_point = input1->params.zero_point; + data->input2_zero_point = input2->params.zero_point; + data->output_zero_point = output->params.zero_point; + } + + return kTfLiteOk; +} + +void* DivInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataDiv)); +} + +TfLiteStatus DivPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + OpDataDiv* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_STATUS( + CalculateOpDataDiv(context, input1, input2, output, params, data)); + + if (output->type == kTfLiteInt32) { + // Only support int32 unquantized DIV for now. + TF_LITE_ENSURE_EQ(context, input1->quantization.type, + kTfLiteNoQuantization); + TF_LITE_ENSURE_EQ(context, input2->quantization.type, + kTfLiteNoQuantization); + } + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params, + const OpDataDiv* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + +#define TF_LITE_DIV(type, opname, data_type) \ + data_type output_activation_min, output_activation_max; \ + CalculateActivationRange(params->activation, &output_activation_min, \ + &output_activation_max); \ + SetActivationParams(output_activation_min, output_activation_max, \ + &op_params); \ + type::opname(op_params, tflite::micro::GetTensorShape(input1), \ + tflite::micro::GetTensorData(input1), \ + tflite::micro::GetTensorShape(input2), \ + tflite::micro::GetTensorData(input2), \ + tflite::micro::GetTensorShape(output), \ + tflite::micro::GetTensorData(output)) + + bool requires_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (requires_broadcast) { + TF_LITE_DIV(reference_ops, BroadcastDivSlow, T); + } else { + TF_LITE_DIV(reference_ops, Div, T); + } +#undef TF_LITE_DIV +} + +TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteDivParams* params, const OpDataDiv* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + +#define TF_LITE_DIV(type, opname, dtype) \ + type::opname(op_params, tflite::micro::GetTensorShape(input1), \ + tflite::micro::GetTensorData(input1), \ + tflite::micro::GetTensorShape(input2), \ + tflite::micro::GetTensorData(input2), \ + tflite::micro::GetTensorShape(output), \ + tflite::micro::GetTensorData(output)) + + if (input1->type == kTfLiteInt8 && input2->type == kTfLiteInt8 && + output->type == kTfLiteInt8) { + SetActivationParams(data->output_activation_min, + data->output_activation_max, &op_params); + op_params.input1_offset = -data->input1_zero_point; + op_params.input2_offset = -data->input2_zero_point; + op_params.output_offset = data->output_zero_point; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + + bool requires_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (requires_broadcast) { + TF_LITE_DIV(reference_ops, BroadcastDivSlow, int8_t); + } else { + TF_LITE_DIV(reference_ops, Div, int8_t); + } +#undef TF_LITE_DIV + } else { + MicroPrintf("Unsupported combination of input and output types in DIV."); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus DivEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = static_cast(node->builtin_data); + TFLITE_DCHECK(node->user_data != nullptr); + auto* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteFloat32) { + EvalDiv(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt32) { + EvalDiv(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt8) { + TF_LITE_ENSURE_OK(context, EvalQuantized(context, node, params, data, + input1, input2, output)); + } else { + MicroPrintf( + "DIV only supports FLOAT32, INT32, quantized INT8 " + "now, got type %s (%d).", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_DIV() { + return tflite::micro::RegisterOp(DivInit, DivPrepare, DivEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/elementwise.cpp b/src/tensorflow/lite/micro/kernels/elementwise.cpp index 581e532..a33c340 100644 --- a/src/tensorflow/lite/micro/kernels/elementwise.cpp +++ b/src/tensorflow/lite/micro/kernels/elementwise.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,17 +16,33 @@ limitations under the License. #include #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_utils.h" namespace tflite { -namespace ops { -namespace micro { -namespace elementwise { namespace { +constexpr int kAbsNameId = 0; +constexpr int kRsrqtNameId = 1; + +const int kElementwiseInputTensor = 0; +const int kElementwiseOutputTensor = 0; + +struct OpDataAbsRsqrt { + int32_t multiplier; + int shift; + int input_offset; + int output_offset; + bool needs_rescale; + TfLiteQuantizationType input_quantization_type; + TfLiteType input_type; +}; + bool IsNumericSupportedType(const TfLiteType type) { return type == kTfLiteFloat32; } @@ -35,27 +51,151 @@ bool IsLogicalSupportedType(const TfLiteType type) { return type == kTfLiteBool; } +bool IsAbsSupportedType(const TfLiteType type) { + return type == kTfLiteFloat32 || type == kTfLiteInt8 || type == kTfLiteInt16; +} + +bool IsRsqrtSupportedType(const TfLiteType type) { + return type == kTfLiteFloat32 || type == kTfLiteInt8; +} + +inline void SetAbsOutputMultiplier(const float input_scale, + const float output_scale, + int32_t* multiplier, int* shift) { + QuantizeMultiplier(static_cast(input_scale / output_scale), + multiplier, shift); +} + +inline void SetRsqrtOutputMultiplier(const float input_scale, + const float output_scale, + int32_t* multiplier, int* shift) { + const double scale = + 1. / static_cast((std::sqrt(input_scale) * output_scale)); + QuantizeMultiplier(scale, multiplier, shift); +} + typedef bool (*IsSupportedType)(TfLiteType); template TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kElementwiseInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kElementwiseOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + if (!IsSupportedType(input->type)) { + MicroPrintf("Input data type %s (%d) is not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +typedef bool (*IsSupportedType)(TfLiteType); +template +TfLiteStatus PrepareAbsRsqrt(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, 0); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (!IsSupportedType(input->type)) { - TF_LITE_KERNEL_LOG(context, "Input data type %s (%d) is not supported.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Input data type %s (%d) is not supported.", + TfLiteTypeGetName(input->type), input->type); return kTfLiteError; } + + auto* op_data = static_cast(node->user_data); + op_data->input_type = input->type; + + // For int16 type input, we support both quantized and non-quantized + // evaluation. + if (op_nameid == kAbsNameId) { + op_data->input_quantization_type = input->quantization.type; + } + + if (input->type == kTfLiteInt8 || + (input->type == kTfLiteInt16 && + input->quantization.type != kTfLiteNoQuantization)) { + TF_LITE_ENSURE_EQ(context, input->quantization.type, + kTfLiteAffineQuantization); + TF_LITE_ENSURE_EQ(context, output->quantization.type, + kTfLiteAffineQuantization); + const auto* input_params = + reinterpret_cast(input->quantization.params); + const auto* output_params = reinterpret_cast( + output->quantization.params); + TF_LITE_ENSURE(context, input_params != nullptr); + TF_LITE_ENSURE(context, input_params->scale != nullptr); + TF_LITE_ENSURE(context, input_params->scale->size > 0); + TF_LITE_ENSURE(context, input_params->zero_point->size > 0); + TF_LITE_ENSURE(context, output_params != nullptr); + TF_LITE_ENSURE(context, output_params->scale != nullptr); + TF_LITE_ENSURE(context, output_params->scale->size > 0); + TF_LITE_ENSURE(context, output_params->zero_point->size > 0); + op_data->input_offset = input_params->zero_point->data[0]; + op_data->output_offset = output_params->zero_point->data[0]; + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, op_data->input_offset, 0); + TF_LITE_ENSURE_EQ(context, op_data->output_offset, 0); + } + const float input_scale = input_params->scale->data[0]; + const float output_scale = output_params->scale->data[0]; + op_data->needs_rescale = input_scale != output_scale; + if (op_nameid == kAbsNameId && op_data->needs_rescale) { + SetAbsOutputMultiplier(input_scale, output_scale, &op_data->multiplier, + &op_data->shift); + } else if (op_nameid == kRsrqtNameId) { + SetRsqrtOutputMultiplier(input_scale, output_scale, &op_data->multiplier, + &op_data->shift); + } + } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } +template +inline TfLiteStatus EvalImplQuantized( + TfLiteContext* context, TfLiteNode* node, + T func(TfLiteContext*, TfLiteNode*, T), + TfLiteStatus validate_input_func(TfLiteContext*, TfLiteNode*, T), + TfLiteType expected_type) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); + const size_t num_elements = ElementCount(*input->dims); + const T* in_data = tflite::micro::GetTensorData(input); + T* out_data = tflite::micro::GetTensorData(output); + for (size_t i = 0; i < num_elements; ++i) { + if (validate_input_func) { + TF_LITE_ENSURE_OK(context, + validate_input_func(context, node, in_data[i])); + } + out_data[i] = func(context, node, in_data[i]); + } + return kTfLiteOk; +} + +template +inline T AbsHelper(T i) { + return std::abs(i); +} + template inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, - T func(T), TfLiteType expected_type) { + T func(T), TfLiteStatus validate_input_func(T), + TfLiteType expected_type) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); @@ -63,6 +203,9 @@ inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, const T* in_data = tflite::micro::GetTensorData(input); T* out_data = tflite::micro::GetTensorData(output); for (size_t i = 0; i < num_elements; ++i) { + if (validate_input_func) { + TF_LITE_ENSURE_OK(context, validate_input_func(in_data[i])); + } out_data[i] = func(in_data[i]); } return kTfLiteOk; @@ -70,16 +213,114 @@ inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, inline TfLiteStatus EvalNumeric(TfLiteContext* context, TfLiteNode* node, float float_func(float)) { - return EvalImpl(context, node, float_func, kTfLiteFloat32); + return EvalImpl(context, node, float_func, + /*validate_input_func=*/nullptr, kTfLiteFloat32); } inline TfLiteStatus EvalLogical(TfLiteContext* context, TfLiteNode* node, + bool bool_func(bool)) { - return EvalImpl(context, node, bool_func, kTfLiteBool); + return EvalImpl(context, node, bool_func, + /*validate_input_func=*/nullptr, kTfLiteBool); +} + +void* ElementWiseAbsRsqrtInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataAbsRsqrt)); +} + +template +inline T AbsEvalQuantized(TfLiteContext* context, TfLiteNode* node, T i) { + const auto* op_data = static_cast(node->user_data); + const int kMin = std::numeric_limits::min(); + const int kMax = std::numeric_limits::max(); + + const int32_t value = std::abs(i - op_data->input_offset); + if (!op_data->needs_rescale) { + return static_cast( + std::min(std::max(static_cast(value + op_data->output_offset), + static_cast(kMin)), + static_cast(kMax))); + } + + const int32_t output = tflite::MultiplyByQuantizedMultiplier( + value, op_data->multiplier, op_data->shift) + + op_data->output_offset; + return static_cast(std::min( + std::max(static_cast(output), static_cast(kMin)), + static_cast(kMax))); +} + +template +inline T RsqrtEvalQuantized(TfLiteContext* context, TfLiteNode* node, T i) { + const auto* op_data = static_cast(node->user_data); + const int kMin = std::numeric_limits::min(); + const int kMax = std::numeric_limits::max(); + + const int32_t value = (i - op_data->input_offset); + const int32_t kShift = 20; // Shift to keep value integer. + if (value == 0) { + // Assume that any value close to 0 represents the max output value. + return static_cast(kMax); + } + int32_t inv_sqrt_multiplier; + int inv_sqrt_shift; + GetInvSqrtQuantizedMultiplierExp(value, kReverseShift, &inv_sqrt_multiplier, + &inv_sqrt_shift); + const int32_t data = tflite::MultiplyByQuantizedMultiplier( + static_cast(1), inv_sqrt_multiplier, inv_sqrt_shift + kShift); + const int32_t output = + tflite::MultiplyByQuantizedMultiplier(data, op_data->multiplier, + op_data->shift - kShift) + + op_data->output_offset; + return static_cast(std::min( + std::max(static_cast(output), static_cast(kMin)), + static_cast(kMax))); +} + +template +TfLiteStatus validate_input_func(TfLiteContext* context, TfLiteNode* node, + T i) { + const auto* op_data = static_cast(node->user_data); + + TF_LITE_ENSURE_MSG(context, i >= op_data->input_offset, + "Rsqrt is only defined for positive values"); + return static_cast(kTfLiteOk); } TfLiteStatus AbsEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::abs); + OpDataAbsRsqrt* op_data = reinterpret_cast(node->user_data); + TfLiteType type = op_data->input_type; + TfLiteQuantizationType input_quantization_type = + op_data->input_quantization_type; + TfLiteStatus eval_result; + + switch (type) { + case kTfLiteFloat32: + eval_result = EvalNumeric(context, node, std::abs); + break; + case kTfLiteInt8: + eval_result = + EvalImplQuantized(context, node, AbsEvalQuantized, + /*validate_input_func=*/nullptr, type); + break; + case kTfLiteInt16: + eval_result = + input_quantization_type == kTfLiteNoQuantization + ? EvalImpl(context, node, AbsHelper, + /*validate_input_func=*/nullptr, type) + : EvalImplQuantized(context, node, AbsEvalQuantized, + /*validate_input_func=*/nullptr, + type); + break; + default: + MicroPrintf("Current data type %s is not supported.", + TfLiteTypeGetName(type)); + return kTfLiteError; + break; + } + return eval_result; } TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { @@ -99,7 +340,25 @@ TfLiteStatus SqrtEval(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus RsqrtEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, [](float f) { return 1.f / std::sqrt(f); }); + const auto* op_data = static_cast(node->user_data); + TfLiteType type = op_data->input_type; + switch (type) { + case kTfLiteFloat32: + return EvalImpl( + context, node, [](float f) { return 1.f / std::sqrt(f); }, + /*validate_input_func=*/nullptr, type); + case kTfLiteInt8: + return EvalImplQuantized(context, node, RsqrtEvalQuantized, + validate_input_func, type); + case kTfLiteInt16: + return EvalImplQuantized(context, node, RsqrtEvalQuantized, + validate_input_func, type); + + default: + MicroPrintf("Current data type %s is not supported.", + TfLiteTypeGetName(type)); + return kTfLiteError; + } } TfLiteStatus SquareEval(TfLiteContext* context, TfLiteNode* node) { @@ -111,104 +370,47 @@ TfLiteStatus LogicalNotEval(TfLiteContext* context, TfLiteNode* node) { } } // namespace -} // namespace elementwise - -TfLiteRegistration Register_ABS() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::AbsEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SIN() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SinEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_COS() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::CosEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LOG() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::LogEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SQRT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SqrtEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_RSQRT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::RsqrtEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SQUARE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SquareEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LOGICAL_NOT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::LogicalNotEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops + +TFLMRegistration Register_ABS() { + return tflite::micro::RegisterOp( + ElementWiseAbsRsqrtInit, PrepareAbsRsqrt, + AbsEval); +} + +TFLMRegistration Register_SIN() { + return tflite::micro::RegisterOp( + nullptr, GenericPrepare, SinEval); +} + +TFLMRegistration Register_COS() { + return tflite::micro::RegisterOp( + nullptr, GenericPrepare, CosEval); +} + +TFLMRegistration Register_LOG() { + return tflite::micro::RegisterOp( + nullptr, GenericPrepare, LogEval); +} + +TFLMRegistration Register_SQRT() { + return tflite::micro::RegisterOp( + nullptr, GenericPrepare, SqrtEval); +} + +TFLMRegistration Register_RSQRT() { + return tflite::micro::RegisterOp( + ElementWiseAbsRsqrtInit, + PrepareAbsRsqrt, RsqrtEval); +} + +TFLMRegistration Register_SQUARE() { + return tflite::micro::RegisterOp( + nullptr, GenericPrepare, SquareEval); +} + +TFLMRegistration Register_LOGICAL_NOT() { + return tflite::micro::RegisterOp( + nullptr, GenericPrepare, LogicalNotEval); +} + } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/elu.cpp b/src/tensorflow/lite/micro/kernels/elu.cpp new file mode 100644 index 0000000..aacd21e --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/elu.cpp @@ -0,0 +1,151 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/elu.h" + +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/cppmath.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// OLD-TODO(b/142762739): We should figure out a multi-threading plan for most +// of the activation ops below. + +struct OpData { + int8_t table[256]; +}; + +using TransformFunc = float (*)(float); + +template +void PopulateLookupTable(const TfLiteTensor* input, const TfLiteTensor* output, + const TransformFunc transform, OpData* data) { + if (sizeof(T) != 1) { + MicroPrintf("Lookup table valid only for 8bit"); + TFLITE_ABORT; + } + + const float inverse_scale = 1 / output->params.scale; + int32_t maxval = std::numeric_limits::max(); + int32_t minval = std::numeric_limits::min(); + for (int32_t val = minval; val <= maxval; ++val) { + const float dequantized = + input->params.scale * (val - input->params.zero_point); + const float transformed = transform(dequantized); + const float rescaled = TfLiteRound(transformed * inverse_scale); + const int32_t quantized = + static_cast(rescaled + output->params.zero_point); + data->table[static_cast(static_cast(val))] = + static_cast(std::max(std::min(maxval, quantized), minval)); + } +} + +// OLD-TODO(b/143696793): move this to optimized_ops. +void EvalUsingLookupTable(const OpData* data, const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + const int size = MatchingFlatSize(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output)); + int8_t* output_data = tflite::micro::GetTensorData(output); + const int8_t* input_data = tflite::micro::GetTensorData(input); + + for (int i = 0; i < size; ++i) { + output_data[i] = data->table[static_cast(input_data[i])]; + } +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + // Use LUT to handle quantized elu path. + if (input->type == kTfLiteInt8) { + OpData* data = static_cast(node->user_data); + TransformFunc transform = [](float value) { + return value < 0.0f ? std::exp(value) - 1.0f : value; + }; + PopulateLookupTable(input, output, transform, data); + } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +void* EluInit(TfLiteContext* context, const char* buffer, size_t length) { + // This is a builtin op, so we don't use the contents in 'buffer', if any. + // Instead, we allocate a new object to carry information from Prepare() to + // Eval(). + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + switch (input->type) { + case kTfLiteFloat32: { + reference_ops::Elu(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + case kTfLiteInt8: { + const OpData* data = static_cast(node->user_data); + EvalUsingLookupTable(data, input, output); + return kTfLiteOk; + } + default: + MicroPrintf("ELU only supports float32 and int8 currently, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } +} + +} // namespace + +TFLMRegistration Register_ELU() { + return tflite::micro::RegisterOp(EluInit, EluPrepare, EluEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/embedding_lookup.cpp b/src/tensorflow/lite/micro/kernels/embedding_lookup.cpp new file mode 100644 index 0000000..6a4be87 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/embedding_lookup.cpp @@ -0,0 +1,214 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Ops that looks up items from matrix. +// +// Input: +// Tensor[0]: Row numbers to lookup, dim.size == 1, int32 +// Tensor[1]: 2-dimensional matrix of multi-dimensional items +// dim.size >= 2, all items are INT8 or FLOAT32. +// first dimension is row, second dimension is column. +// +// Output: +// Output.dim[0] == Tensor[0].dim[0], num of lookups +// Output.dim[1] == Tensor[1].dim[1], num of items per row +// Each item in output is a raw bytes copy of the corresponding item in input, +// or a dequantized value in the case of a INT8 input. +// When indices are out of bound, the ops will not succeed. +// + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor_0 = 0; +constexpr int kInputTensor_1 = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + float scale; // quantization scale for tensor 1 + size_t num_columns; // number of columns after flattening tensor 1 into 2D +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, + const TfLiteTensor* tensor_1, + const TfLiteTensor* output) { + node->user_data = context->AllocatePersistentBuffer(context, sizeof(OpData)); + OpData* op_data = static_cast(node->user_data); + TF_LITE_ENSURE(context, op_data != nullptr); + + if (tensor_1->type == kTfLiteInt8 && output->type == kTfLiteFloat32) { + TF_LITE_ENSURE_EQ(context, tensor_1->params.zero_point, 0); + op_data->scale = tensor_1->params.scale; + } + + op_data->num_columns = NumElements(tensor_1) / tensor_1->dims->data[0]; + + return kTfLiteOk; +} + +TfLiteStatus EmbeddingLookUpPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* lookup = + micro_context->AllocateTempInputTensor(node, kInputTensor_0); + TF_LITE_ENSURE(context, lookup != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); + TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); + + TfLiteTensor* value = + micro_context->AllocateTempInputTensor(node, kInputTensor_1); + TF_LITE_ENSURE(context, value != nullptr); + TF_LITE_ENSURE(context, NumDimensions(value) >= 2); + TF_LITE_ENSURE(context, + value->type == kTfLiteFloat32 || value->type == kTfLiteInt8); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + if (value->type == kTfLiteFloat32) { + TF_LITE_ENSURE(context, output->type == kTfLiteFloat32); + } else { + TF_LITE_ENSURE( + context, output->type == kTfLiteFloat32 || output->type == kTfLiteInt8); + } + + // make sure output dimensions size can hold the new dimension data + TF_LITE_ENSURE(context, output->dims->size >= NumDimensions(value)); + // make the output tensor dimensions mutable + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + // set the new output dimensions + output->dims->data[0] = SizeOfDimension(lookup, 0); + output->dims->data[1] = SizeOfDimension(value, 1); + for (int i = 2; i < NumDimensions(value); i++) { + output->dims->data[i] = SizeOfDimension(value, i); + } + // check the new output dimensions do not exceed the output data buffer size + size_t new_dims_size = NumElements(output) * TfLiteTypeGetSize(output->type); + TF_LITE_ENSURE(context, new_dims_size <= output->bytes); + + TF_LITE_ENSURE_OK(context, CalculateOpData(context, node, value, output)); + + micro_context->DeallocateTempTfLiteTensor(lookup); + micro_context->DeallocateTempTfLiteTensor(value); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus EvalSimple(const OpData& op_data, const TfLiteEvalTensor* lookup, + const TfLiteEvalTensor* value, + TfLiteEvalTensor* output) { + const int num_rows = value->dims->data[0]; + if (num_rows == 0) { + // Propagate empty tensor if input is empty + return kTfLiteOk; + } + const size_t row_bytes = op_data.num_columns * TfLiteTypeGetSize(value->type); + + int8_t* output_raw = tflite::micro::GetTensorData(output); + const int8_t* value_raw = tflite::micro::GetTensorData(value); + const int32_t* lookup_data = tflite::micro::GetTensorData(lookup); + for (int i = 0; i < lookup->dims->data[0]; i++) { + int32_t idx = lookup_data[i]; + if (idx >= num_rows || idx < 0) { + MicroPrintf( + "EMBEDDING_LOOKUP: index out of bounds. " + "Got %d, and bounds are [0, %d]", + idx, num_rows - 1); + return kTfLiteError; + } else { + std::memcpy(output_raw + i * row_bytes, value_raw + idx * row_bytes, + row_bytes); + } + } + + return kTfLiteOk; +} + +TfLiteStatus EvalHybrid(const OpData& op_data, const TfLiteEvalTensor* lookup, + const TfLiteEvalTensor* value, + TfLiteEvalTensor* output) { + const int num_rows = value->dims->data[0]; + const size_t num_colums = op_data.num_columns; + + float* output_ptr = tflite::micro::GetTensorData(output); + const int8_t* value_ptr = tflite::micro::GetTensorData(value); + const int32_t* lookup_data = tflite::micro::GetTensorData(lookup); + + for (int i = 0; i < lookup->dims->data[0]; i++) { + int32_t idx = lookup_data[i]; + if (idx >= num_rows || idx < 0) { + MicroPrintf( + "EMBEDDING_LOOKUP: index out of bounds. " + "Got %d, and bounds are [0, %d]", + idx, num_rows - 1); + return kTfLiteError; + } else { + // Dequantize embedding values. + Dequantize(&value_ptr[idx * num_colums], num_colums, op_data.scale, 0, + &output_ptr[i * num_colums]); + } + } + + return kTfLiteOk; +} + +TfLiteStatus EmbeddingLookUpEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* lookup = + tflite::micro::GetEvalInput(context, node, kInputTensor_0); + const TfLiteEvalTensor* value = + tflite::micro::GetEvalInput(context, node, kInputTensor_1); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + OpData& op_data = *static_cast(node->user_data); + + switch (value->type) { + case kTfLiteFloat32: + return EvalSimple(op_data, lookup, value, output); + case kTfLiteInt8: + if (output->type == kTfLiteFloat32) { + return EvalHybrid(op_data, lookup, value, output); + } else { + return EvalSimple(op_data, lookup, value, output); + } + default: + MicroPrintf("EMBEDDING_LOOKUP only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } +} + +} // namespace + +TFLMRegistration Register_EMBEDDING_LOOKUP() { + return tflite::micro::RegisterOp(nullptr, EmbeddingLookUpPrepare, + EmbeddingLookUpEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/ethosu.cpp b/src/tensorflow/lite/micro/kernels/ethosu.cpp index c305121..5620359 100644 --- a/src/tensorflow/lite/micro/kernels/ethosu.cpp +++ b/src/tensorflow/lite/micro/kernels/ethosu.cpp @@ -16,11 +16,11 @@ limitations under the License. // // This is a stub file for non-Ethos platforms // -#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_common.h" namespace tflite { -TfLiteRegistration* Register_ETHOSU() { return nullptr; } +TFLMRegistration* Register_ETHOSU() { return nullptr; } const char* GetString_ETHOSU() { return ""; } diff --git a/src/tensorflow/lite/micro/kernels/ethosu.h b/src/tensorflow/lite/micro/kernels/ethosu.h index cfbb0d3..5b86303 100644 --- a/src/tensorflow/lite/micro/kernels/ethosu.h +++ b/src/tensorflow/lite/micro/kernels/ethosu.h @@ -19,7 +19,7 @@ limitations under the License. namespace tflite { -TfLiteRegistration* Register_ETHOSU(); +TFLMRegistration* Register_ETHOSU(); const char* GetString_ETHOSU(); diff --git a/src/tensorflow/lite/micro/kernels/exp.cpp b/src/tensorflow/lite/micro/kernels/exp.cpp new file mode 100644 index 0000000..8d1da8f --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/exp.cpp @@ -0,0 +1,79 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/exp.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus ExpPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); + TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); + TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); + for (int i = 0; i < output->dims->size; ++i) { + TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); + } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus ExpEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output)); + + if (input->type == kTfLiteFloat32) { + reference_ops::Exp(tflite::micro::GetTensorData(input), + static_cast(flat_size), + tflite::micro::GetTensorData(output)); + } else { + MicroPrintf("Type %s (%d) currently not supported by Exp.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} +} // namespace + +TFLMRegistration Register_EXP() { + return tflite::micro::RegisterOp(nullptr, ExpPrepare, ExpEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/expand_dims.cpp b/src/tensorflow/lite/micro/kernels/expand_dims.cpp new file mode 100644 index 0000000..d47b42c --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/expand_dims.cpp @@ -0,0 +1,156 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kAxisTensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus GetAxisValueFromTensor(TfLiteContext* context, + const TfLiteTensor* axis, + int32_t* axis_value) { + const int axis_dims = (tflite::GetTensorShape(axis)).DimensionsCount(); + if (axis_dims > 1) { + MicroPrintf("Axis has only one element for Expand_Dims.", axis_dims); + return kTfLiteError; + } + + if (kTfLiteInt32 == (axis->type)) { + const int32_t* axis_ptr = tflite::GetTensorData(axis); + *axis_value = axis_ptr[0]; + return kTfLiteOk; + } else { + MicroPrintf("Axis type %s (%d) not supported by Expand_Dims.", + TfLiteTypeGetName(axis->type), axis->type); + return kTfLiteError; + } +} + +// Verifies that the output tensor's dimension shape is equivalent to inserting +// a dimension of length 1 at the dimension index axis of input's shape as +// defined in https://www.tensorflow.org/api_docs/python/tf/expand_dims. +TfLiteStatus VerifyTensorDim(TfLiteContext* context, const TfLiteTensor* input, + const TfLiteTensor* axis_tensor, + const TfLiteTensor* output) { + int32_t axis_value = 0; + TF_LITE_ENSURE_OK(context, + GetAxisValueFromTensor(context, axis_tensor, &axis_value)); + + tflite::RuntimeShape input_shape = tflite::GetTensorShape(input); + if (axis_value < 0) { + axis_value = input_shape.DimensionsCount() + 1 + axis_value; + } + TF_LITE_ENSURE(context, axis_value <= input_shape.DimensionsCount()); + + // TFLM only supports fixed dimension tensor and assumes that the output shape + // is fully specified in the model. As such, TFLM directly use the pointer to + // the dimension array in the model buffer. + tflite::RuntimeShape output_shape = tflite::GetTensorShape(output); + + TF_LITE_ENSURE(context, output_shape.DimensionsCount() == + input_shape.DimensionsCount() + 1); + for (int i = 0; i < output_shape.DimensionsCount(); ++i) { + if (i < axis_value) { + TF_LITE_ENSURE(context, output_shape.Dims(i) == input_shape.Dims(i)); + } else if (i == axis_value) { + TF_LITE_ENSURE(context, output_shape.Dims(i) == 1); + } else { + TF_LITE_ENSURE(context, output_shape.Dims(i) == input_shape.Dims(i - 1)); + } + } + return kTfLiteOk; +} + +TfLiteStatus ExpandDimsPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* axis = + micro_context->AllocateTempInputTensor(node, kAxisTensor); + TF_LITE_ENSURE(context, axis != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + output->type = input->type; + if (IsDynamicTensor(axis)) { + MicroPrintf("DynamicTensor is not yet supported by Expand_Dims."); + return kTfLiteError; + } + TF_LITE_ENSURE_OK(context, VerifyTensorDim(context, input, axis, output)); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(axis); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +void memCopyN(T* out, const T* in, const int num_elements) { + for (int i = 0; i < num_elements; ++i) { + out[i] = in[i]; + } +} + +TfLiteStatus ExpandDimsEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const int flat_size = ElementCount(*input->dims); + + switch (input->type) { + case kTfLiteFloat32: { + memCopyN(tflite::micro::GetTensorData(output), + tflite::micro::GetTensorData(input), flat_size); + } break; + case kTfLiteInt16: { + memCopyN(tflite::micro::GetTensorData(output), + tflite::micro::GetTensorData(input), flat_size); + } break; + case kTfLiteInt8: { + memCopyN(tflite::micro::GetTensorData(output), + tflite::micro::GetTensorData(input), flat_size); + } break; + default: + MicroPrintf( + "Expand_Dims only currently supports int8, int16 and float32, got " + "%d.", + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} +} // namespace + +TFLMRegistration Register_EXPAND_DIMS() { + return tflite::micro::RegisterOp(nullptr, ExpandDimsPrepare, ExpandDimsEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/fill.cpp b/src/tensorflow/lite/micro/kernels/fill.cpp new file mode 100644 index 0000000..1486fcb --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/fill.cpp @@ -0,0 +1,140 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/fill.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +template +TfLiteStatus EnsureEqImpl(TfLiteContext* context, const TfLiteIntArray* array, + const TfLiteTensor* tensor) { + for (int i = 0; i < array->size; ++i) { + TF_LITE_ENSURE_EQ(context, array->data[i], GetTensorData(tensor)[i]); + } + return kTfLiteOk; +} + +// Ensure the equality of an int array and a tensor, which must be +// one-dimensional and of an integer type. +TfLiteStatus EnsureEq(TfLiteContext* context, const TfLiteIntArray* array, + const TfLiteTensor* tensor) { + TF_LITE_ENSURE_EQ(context, NumDimensions(tensor), 1); + const auto tensor_len = tensor->dims->data[0]; + TF_LITE_ENSURE_EQ(context, array->size, tensor_len); + + switch (tensor->type) { + case kTfLiteInt8: + return EnsureEqImpl(context, array, tensor); + case kTfLiteInt16: + return EnsureEqImpl(context, array, tensor); + case kTfLiteInt32: + return EnsureEqImpl(context, array, tensor); + case kTfLiteInt64: + return EnsureEqImpl(context, array, tensor); + default: + MicroPrintf("cannot compare int array to tensor of type %d.", + tensor->type); + return kTfLiteError; + } +} + +constexpr int kDimsTensor = 0; +constexpr int kValueTensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus FillPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + // Ensure inputs and outputs exist. + TfLiteTensor* dims = + micro_context->AllocateTempInputTensor(node, kDimsTensor); + TF_LITE_ENSURE(context, dims != nullptr); + TfLiteTensor* value = + micro_context->AllocateTempInputTensor(node, kValueTensor); + TF_LITE_ENSURE(context, value != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + // The value tensor must be a scalar. + TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0); + + // The value type and output type must match. + TF_LITE_ENSURE_EQ(context, value->type, output->type); + + // The dimension of the output tensor is known in model already. + TFLITE_DCHECK(output->dims != nullptr); + + if (dims->data.data != nullptr) { + // When the dims tensor is specified in model already (i.e. is not an + // activation tensor), the dims tensor must match the output tensor shape. + // As a byproduct, ensures the dims tensor is of an integer type. + TF_LITE_ENSURE_OK(context, EnsureEq(context, output->dims, dims)); + } + + micro_context->DeallocateTempTfLiteTensor(dims); + micro_context->DeallocateTempTfLiteTensor(value); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +void FillImpl(const TfLiteEvalTensor* value, TfLiteEvalTensor* output) { + reference_ops::Fill( + micro::GetTensorShape(value), micro::GetTensorData(value), + micro::GetTensorShape(output), micro::GetTensorData(output)); +} + +TfLiteStatus FillEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* value = + micro::GetEvalInput(context, node, kValueTensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + switch (value->type) { + case kTfLiteFloat32: + FillImpl(value, output); + break; + case kTfLiteInt32: + FillImpl(value, output); + break; + case kTfLiteInt8: + FillImpl(value, output); + break; + default: + MicroPrintf("Fill only currently supports float32 for input 1, got %d.", + TfLiteTypeGetName(value->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_FILL() { + return tflite::micro::RegisterOp(nullptr, FillPrepare, FillEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cpp b/src/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cpp deleted file mode 100644 index 3f01096..0000000 --- a/src/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This file is generated. See: -// tensorflow/lite/micro/kernels/detection_postprocess_test/README.md - -#include "tensorflow/lite/micro/kernels/flexbuffers_generated_data.h" - -const int g_gen_data_size_none_regular_nms = 242; -const unsigned char g_gen_data_none_regular_nms[] = { - 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x00, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x00, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x75, 0x6c, 0x61, - 0x72, 0x5f, 0x6e, 0x6d, 0x73, 0x00, 0x6e, 0x6d, 0x73, 0x5f, 0x73, 0x63, - 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x00, 0x6e, 0x6d, 0x73, 0x5f, 0x69, 0x6f, 0x75, 0x5f, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x00, 0x6e, 0x75, 0x6d, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x00, 0x79, 0x5f, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x00, 0x78, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x00, - 0x68, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x00, 0x77, 0x5f, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x00, 0x0b, 0x78, 0x12, 0x94, 0xa4, 0x43, 0x58, 0x33, - 0x6a, 0x11, 0x22, 0x2b, 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0x20, 0x41, 0x00, 0x00, 0x20, 0x41, - 0x06, 0x0e, 0x06, 0x06, 0x0e, 0x0e, 0x06, 0x6a, 0x0e, 0x0e, 0x0e, 0x37, - 0x26, 0x01, -}; -const int g_gen_data_size_regular_nms = 242; -const unsigned char g_gen_data_regular_nms[] = { - 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x00, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x00, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x75, 0x6c, 0x61, - 0x72, 0x5f, 0x6e, 0x6d, 0x73, 0x00, 0x6e, 0x6d, 0x73, 0x5f, 0x73, 0x63, - 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x00, 0x6e, 0x6d, 0x73, 0x5f, 0x69, 0x6f, 0x75, 0x5f, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x00, 0x6e, 0x75, 0x6d, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x00, 0x79, 0x5f, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x00, 0x78, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x00, - 0x68, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x00, 0x77, 0x5f, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x00, 0x0b, 0x78, 0x12, 0x94, 0xa4, 0x43, 0x58, 0x33, - 0x6a, 0x11, 0x22, 0x2b, 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0x20, 0x41, 0x00, 0x00, 0x20, 0x41, - 0x06, 0x0e, 0x06, 0x06, 0x0e, 0x0e, 0x06, 0x6a, 0x0e, 0x0e, 0x0e, 0x37, - 0x26, 0x01, -}; diff --git a/src/tensorflow/lite/micro/kernels/flexbuffers_generated_data.h b/src/tensorflow/lite/micro/kernels/flexbuffers_generated_data.h deleted file mode 100644 index 0eab0ae..0000000 --- a/src/tensorflow/lite/micro/kernels/flexbuffers_generated_data.h +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H -#define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H - -extern const int g_gen_data_size_none_regular_nms; -extern const unsigned char g_gen_data_none_regular_nms[]; - -extern const int g_gen_data_size_regular_nms; -extern const unsigned char g_gen_data_regular_nms[]; - -#endif diff --git a/src/tensorflow/lite/micro/kernels/floor.cpp b/src/tensorflow/lite/micro/kernels/floor.cpp index b8be1cf..f92b7e0 100644 --- a/src/tensorflow/lite/micro/kernels/floor.cpp +++ b/src/tensorflow/lite/micro/kernels/floor.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,14 +20,13 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/kernel_util.h" namespace tflite { -namespace ops { -namespace micro { -namespace floor { + +namespace { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus FloorEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); @@ -39,19 +38,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); return kTfLiteOk; } -} // namespace floor - -TfLiteRegistration Register_FLOOR() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/floor::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + +} // namespace + +TFLMRegistration Register_FLOOR() { + return tflite::micro::RegisterOp(nullptr, nullptr, FloorEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/floor_div.cpp b/src/tensorflow/lite/micro/kernels/floor_div.cpp new file mode 100644 index 0000000..9adf614 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/floor_div.cpp @@ -0,0 +1,130 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/floor_div.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/binary_function.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +void* FloorDivInit(TfLiteContext* context, const char* buffer, size_t length) { + return nullptr; +} + +TfLiteStatus FloorDivPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +template +TfLiteStatus EvalFloorDiv(TfLiteContext* context, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + const T* denominator_data = tflite::micro::GetTensorData(input2); + + // Validate the denominator. + for (int i = 0; i < tflite::ElementCount(*input2->dims); ++i) { + if (std::equal_to()(denominator_data[i], 0)) { + MicroPrintf("Division by 0"); + return kTfLiteError; + } + } + + bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); + + if (requires_broadcast) { + reference_ops::BroadcastBinaryFunction4DSlow( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), denominator_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), reference_ops::FloorDiv); + } else { + reference_ops::BinaryFunction( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), denominator_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), reference_ops::FloorDiv); + } + + return kTfLiteOk; +} + +TfLiteStatus FloorDivEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (input1->type) { + case kTfLiteFloat32: { + return EvalFloorDiv(context, input1, input2, output); + } + default: { + MicroPrintf("Type '%s' is not supported by FLOOR_DIV.", + TfLiteTypeGetName(input1->type)); + return kTfLiteError; + } + } +} + +} // namespace + +TFLMRegistration Register_FLOOR_DIV() { + return tflite::micro::RegisterOp(FloorDivInit, FloorDivPrepare, FloorDivEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/floor_mod.cpp b/src/tensorflow/lite/micro/kernels/floor_mod.cpp new file mode 100644 index 0000000..da2a7c9 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/floor_mod.cpp @@ -0,0 +1,128 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/floor_mod.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/binary_function.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +// OLD-TODO(b/117523611): We should factor out a binary_op and put binary ops +// there. +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +// OLD-TODO(b/117912880): Support quantization. + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +void* FloorModInit(TfLiteContext* context, const char* buffer, size_t length) { + return nullptr; +} + +TfLiteStatus FloorModPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +template +TfLiteStatus EvalFloorMod(TfLiteContext* context, bool requires_broadcast, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + const T* denominator_data = tflite::micro::GetTensorData(input2); + + if (requires_broadcast) { + reference_ops::BroadcastBinaryFunction4DSlow( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), denominator_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), reference_ops::FloorMod); + } else { + reference_ops::BinaryFunction( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), denominator_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), reference_ops::FloorMod); + } + + return kTfLiteOk; +} + +TfLiteStatus FloorModEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); + + switch (input1->type) { + case kTfLiteFloat32: { + return EvalFloorMod(context, requires_broadcast, input1, input2, + output); + } + default: { + MicroPrintf("Type '%s' is not supported by FLOOR_MOD.", + TfLiteTypeGetName(input1->type)); + return kTfLiteError; + } + } +} + +} // namespace + +TFLMRegistration Register_FLOOR_MOD() { + return tflite::micro::RegisterOp(FloorModInit, FloorModPrepare, FloorModEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/fully_connected.h b/src/tensorflow/lite/micro/kernels/fully_connected.h index 3e64671..265a2c4 100644 --- a/src/tensorflow/lite/micro/kernels/fully_connected.h +++ b/src/tensorflow/lite/micro/kernels/fully_connected.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,24 +15,101 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ #define TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ -#include "tensorflow/lite/c/common.h" +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/micro/micro_common.h" namespace tflite { -// This is the most generic TfLiteRegistration. The actual supported types may -// still be target dependent. The only requirement is that every implementation -// (reference or optimized) must define this function. -TfLiteRegistration Register_FULLY_CONNECTED(); +struct OpDataFullyConnected { + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + // The range of the fused activation layer. For example for kNone and + // uint8_t these would be 0 and 255. + int32_t output_activation_min; + int32_t output_activation_max; + // The index of the temporary tensor where the quantized inputs are cached. + int input_quantized_index; + // Cached zero point values of tensors. + int32_t input_zero_point; + int32_t filter_zero_point; + int32_t output_zero_point; + +// TODO(b/258710417): enable by default once optimized fully-connected works for +// all targets. +#if !defined(HEXAGON) + // A buffer used to store unpacked filter values. This is used if the source + // tensor is of n-bit precision that cannot be easily processed by kernels. + int filter_buffer_index; + + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; + bool is_per_channel; +#endif + +#ifdef USE_TFLM_COMPRESSION + + // scratch buffers for compressed tensors + int weights_scratch_index; + int bias_scratch_index; + +#endif // USE_TFLM_COMPRESSION +}; + +extern const int kFullyConnectedInputTensor; +extern const int kFullyConnectedWeightsTensor; +extern const int kFullyConnectedBiasTensor; +extern const int kFullyConnectedOutputTensor; + +// Returns a FullyConnectedParams struct with all the parameters needed for a +// float computation. +FullyConnectedParams FullyConnectedParamsFloat( + TfLiteFusedActivation activation); + +// Returns a FullyConnectedParams struct with all the parameters needed for a +// quantized computation. +FullyConnectedParams FullyConnectedParamsQuantized( + const OpDataFullyConnected& op_data); + +TfLiteStatus CalculateOpDataFullyConnected( + TfLiteContext* context, TfLiteFusedActivation activation, + TfLiteType data_type, const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output, OpDataFullyConnected* data); + +// This is the most generic TFLMRegistration. The actual supported types +// may still be target dependent. The only requirement is that every +// implementation (reference or optimized) must define this function. +TFLMRegistration Register_FULLY_CONNECTED(); + +#if defined(ARDUINO) || defined(HEXAGON) || defined(XTENSA) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8. +TFLMRegistration Register_FULLY_CONNECTED_INT8(); + +#else +// Note that while this block gets used for both reference and optimized kernels +// that do not have any specialized implementations, the only goal here is to +// define fallback implementation that allow reference kernels to still be used +// from applications that call a more specific kernel variant. + +inline TFLMRegistration Register_FULLY_CONNECTED_INT8() { + return Register_FULLY_CONNECTED(); +} + +#endif -#if defined(CMSIS_NN) || defined(ARDUINO) -// The Arduino is a special case where we use the CMSIS kernels, but because of -// the current approach to building for Arduino, we do not support -DCMSIS_NN as -// part of the build. As a result, we use defined(ARDUINO) as proxy for the -// CMSIS kernels for this one special case. +#if defined(ARDUINO) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int16. +TFLMRegistration Register_FULLY_CONNECTED_INT16(); -// Returns a TfLiteRegistration struct for cmsis-nn kernel variant that only -// supports int8. -TfLiteRegistration Register_FULLY_CONNECTED_INT8(); +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 and int4 packed kernels. +TFLMRegistration Register_FULLY_CONNECTED_INT4(); #else // Note that while this block gets used for both reference and optimized kernels @@ -40,11 +117,16 @@ TfLiteRegistration Register_FULLY_CONNECTED_INT8(); // define fallback implementation that allow reference kernels to still be used // from applications that call a more specific kernel variant. -inline TfLiteRegistration Register_FULLY_CONNECTED_INT8() { +inline TFLMRegistration Register_FULLY_CONNECTED_INT16() { + return Register_FULLY_CONNECTED(); +} + +inline TFLMRegistration Register_FULLY_CONNECTED_INT4() { return Register_FULLY_CONNECTED(); } #endif + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ diff --git a/src/tensorflow/lite/micro/kernels/fully_connected_common.cpp b/src/tensorflow/lite/micro/kernels/fully_connected_common.cpp new file mode 100644 index 0000000..53709d3 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/fully_connected_common.cpp @@ -0,0 +1,154 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/fully_connected.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +const int kFullyConnectedInputTensor = 0; +const int kFullyConnectedWeightsTensor = 1; +const int kFullyConnectedBiasTensor = 2; +const int kFullyConnectedOutputTensor = 0; + +FullyConnectedParams FullyConnectedParamsQuantized( + const OpDataFullyConnected& op_data) { + FullyConnectedParams op_params; + op_params.input_offset = -op_data.input_zero_point; + op_params.weights_offset = -op_data.filter_zero_point; + op_params.output_offset = op_data.output_zero_point; + op_params.output_multiplier = op_data.output_multiplier; + op_params.output_shift = op_data.output_shift; + op_params.quantized_activation_min = op_data.output_activation_min; + op_params.quantized_activation_max = op_data.output_activation_max; + return op_params; +} + +FullyConnectedParams FullyConnectedParamsFloat( + TfLiteFusedActivation activation) { + FullyConnectedParams op_params; + CalculateActivationRange(activation, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +TfLiteStatus CalculateOpDataFullyConnected( + TfLiteContext* context, TfLiteFusedActivation activation, + TfLiteType data_type, const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output, + OpDataFullyConnected* data) { +#ifndef HEXAGON + data->is_per_channel = false; +#endif + + if (data_type == kTfLiteFloat32) { + return kTfLiteOk; + } + + bool is_per_channel = false; + if (filter->quantization.type == kTfLiteAffineQuantization && + filter->quantization.params != nullptr) { + const auto* affine_quantization = + reinterpret_cast( + filter->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + is_per_channel = affine_quantization->scale->size > 1; + } + + if (is_per_channel) { +// Hexagon currently does not support per-channel fully connected, and the +// existing hexagon support library is intolerant of data members being added to +// OpDataFullyConnected. As such, we have to be careful not to reference newer +// data members. This is why we use a local variable is_per_channel in common +// code, and only reference the data->is_per_channel in non-HEXAGON code. +#ifdef HEXAGON + TF_LITE_ENSURE_MSG( + context, !is_per_channel, + "FullyConnected per-channel quantization not yet supported on Hexagon. " + "Please set converter._experimental_disable_per_channel_quantization_" + "for_dense_layers = True."); +#else + data->is_per_channel = is_per_channel; + const auto* affine_quantization = + reinterpret_cast( + filter->quantization.params); + const int per_channel_quantization_size = affine_quantization->scale->size; + + // Currently only Int8 is supported for per channel quantization. + TF_LITE_ENSURE(context, + input->type == kTfLiteInt8 && filter->type != kTfLiteInt4); + + TF_LITE_ENSURE_EQ( + context, per_channel_quantization_size, + filter->dims->data[affine_quantization->quantized_dimension]); + + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, per_channel_quantization_size * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, per_channel_quantization_size * sizeof(int32_t))); + + // Populate multiplier and shift using affine quantization. + const float input_scale = input->params.scale; + const float output_scale = output->params.scale; + const float* filter_scales = affine_quantization->scale->data; + + for (int i = 0; i < per_channel_quantization_size; ++i) { + const float scale = filter_scales[i]; + const double filter_scale = static_cast(scale); + const double effective_output_scale = static_cast(input_scale) * + filter_scale / + static_cast(output_scale); + int32_t significand; + int channel_shift; + QuantizeMultiplier(effective_output_scale, &significand, &channel_shift); + data->per_channel_output_multiplier[i] = significand; + data->per_channel_output_shift[i] = channel_shift; + } +#endif + } else { + double real_multiplier = 0.0; + TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( + context, input, filter, bias, output, &real_multiplier)); + QuantizeMultiplier(real_multiplier, &data->output_multiplier, + &data->output_shift); + } + + // Filter weights will always be symmetric quantized since we only support + // int8 quantization. See + // https://github.com/tensorflow/tensorflow/issues/44912 for additional + // context. + TFLITE_DCHECK(filter->params.zero_point == 0); + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + return CalculateActivationRangeQuantized(context, activation, output, + &data->output_activation_min, + &data->output_activation_max); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/gather.cpp b/src/tensorflow/lite/micro/kernels/gather.cpp new file mode 100644 index 0000000..a0af4c0 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/gather.cpp @@ -0,0 +1,224 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kInputPositions = 1; +constexpr int kOutputTensor = 0; + +template +TfLiteStatus Gather(const TfLiteGatherParams* params, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* coords, TfLiteEvalTensor* output) { + const InputT* input_data = tflite::micro::GetTensorData(input); + const CoordsT* coords_data = tflite::micro::GetTensorData(coords); + InputT* output_data = tflite::micro::GetTensorData(output); + const TfLiteIntArray* input_dims = input->dims; + const int input_dims_size = input_dims->size; + int axis = params->axis; + if (axis < 0) { + axis += input_dims_size; + } + TFLITE_DCHECK_GE(axis, 0); + TFLITE_DCHECK_LT(axis, input_dims_size); + + int batch_dims = params->batch_dims; + // batch_dims should be in range: [-rank(coords), rank(coords)]. + // Negative batch_dims is added with rank of coords. + const TfLiteIntArray* coords_dims = coords->dims; + const int coords_dims_size = coords_dims->size; + if (batch_dims < 0) { + batch_dims += coords_dims_size; + } + TFLITE_DCHECK_GE(batch_dims, 0); + TFLITE_DCHECK_LT(batch_dims, input_dims_size); + TFLITE_DCHECK_LE(batch_dims, coords_dims_size); + TFLITE_DCHECK_GE(axis, batch_dims); + for (int i = 0; i < batch_dims; ++i) { + TFLITE_DCHECK_EQ(input_dims->data[i], coords_dims->data[i]); + } + + const int axis_size = input_dims->data[axis]; + + int batch_size = 1; + for (int i = 0; i < batch_dims; ++i) { + batch_size *= input_dims->data[i]; + } + int outer_size = 1; + for (int i = batch_dims; i < axis; ++i) { + outer_size *= input_dims->data[i]; + } + int inner_size = 1; + for (int i = axis + 1; i < input_dims_size; ++i) { + inner_size *= input_dims->data[i]; + } + int coord_size = 1; + for (int i = batch_dims; i < coords_dims_size; ++i) { + coord_size *= coords_dims->data[i]; + } + + for (int batch = 0; batch < batch_size; ++batch) { + for (int outer = 0; outer < outer_size; ++outer) { + for (int coord = 0; coord < coord_size; ++coord) { + TFLITE_DCHECK_GE(coords_data[coord], 0); + TFLITE_DCHECK_LT(coords_data[coord], axis_size); + std::memcpy(output_data + + (((batch * outer_size) + outer) * coord_size + coord) * + inner_size, + input_data + (((batch * outer_size) + outer) * axis_size + + coords_data[batch * coord_size + coord]) * + inner_size, + sizeof(InputT) * inner_size); + } + } + } + return kTfLiteOk; +} + +TfLiteStatus GatherPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + const auto* params = + reinterpret_cast(node->builtin_data); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* coords = + micro_context->AllocateTempInputTensor(node, kInputPositions); + TF_LITE_ENSURE(context, coords != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + switch (coords->type) { + case kTfLiteInt32: + break; + default: + MicroPrintf("Positions of type '%s' are not supported by gather.", + TfLiteTypeGetName(coords->type)); + return kTfLiteError; + break; + } + + // Assign to output the input type. + output->type = input->type; + + // Check conditions for different types. + switch (input->type) { + case kTfLiteFloat32: + case kTfLiteInt8: + break; + default: + MicroPrintf("Type '%s' is not supported by gather.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + break; + } + + int axis = params->axis; + if (axis < 0) { + axis += NumDimensions(input); + } + TF_LITE_ENSURE(context, 0 <= axis && axis < NumDimensions(input)); + + int batch_dims = params->batch_dims; + // batch_dims should be in range: [-rank(coords), rank(coords)]. + // Negative batch_dims is added with rank of coords. + if (batch_dims < 0) { + batch_dims += NumDimensions(coords); + } + TF_LITE_ENSURE(context, batch_dims <= axis); + TF_LITE_ENSURE(context, 0 <= batch_dims && batch_dims < NumDimensions(input)); + TF_LITE_ENSURE(context, batch_dims <= NumDimensions(coords)); + for (int i = 0; i < batch_dims; ++i) { + TF_LITE_ENSURE_EQ(context, input->dims->data[i], coords->dims->data[i]); + } + + // GATHER updates the output tensor dimensions, but TfLiteTensor in the + // MicroInterpreter is a temporary allocation. We must therefore relocate the + // dims from the FlatBuffer to the persistent storage arena. + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + TfLiteIntArray* output_shape = output->dims; + output_shape->size = + NumDimensions(input) + NumDimensions(coords) - 1 - batch_dims; + int output_index = 0; + for (int i = 0; i < axis; ++i) { + output_shape->data[output_index++] = input->dims->data[i]; + } + for (int i = batch_dims; i < coords->dims->size; ++i) { + output_shape->data[output_index++] = coords->dims->data[i]; + } + for (int i = axis + 1; i < input->dims->size; ++i) { + output_shape->data[output_index++] = input->dims->data[i]; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(coords); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus GatherEval(TfLiteContext* context, TfLiteNode* node) { + const auto* params = + reinterpret_cast(node->builtin_data); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* coords = + tflite::micro::GetEvalInput(context, node, kInputPositions); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (coords->type == kTfLiteInt32) { + switch (input->type) { + case kTfLiteFloat32: + return Gather(params, input, coords, output); + break; + case kTfLiteInt8: + return Gather(params, input, coords, output); + break; + default: + MicroPrintf("Type '%s' is not supported by gather.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + break; + } + } + return kTfLiteOk; +} +} // namespace + +TFLMRegistration Register_GATHER() { + return tflite::micro::RegisterOp(nullptr, GatherPrepare, GatherEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/gather_nd.cpp b/src/tensorflow/lite/micro/kernels/gather_nd.cpp new file mode 100644 index 0000000..d01af7c --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/gather_nd.cpp @@ -0,0 +1,212 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kParams = 0; +constexpr int kIndices = 1; +constexpr int kOutputTensor = 0; +constexpr int MAX_INDICES_ND = 5; + +TfLiteStatus GatherNdPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* params = micro_context->AllocateTempInputTensor(node, kParams); + TF_LITE_ENSURE(context, params != nullptr); + TfLiteTensor* indices = + micro_context->AllocateTempInputTensor(node, kIndices); + TF_LITE_ENSURE(context, indices != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + switch (params->type) { + case kTfLiteFloat32: + case kTfLiteInt8: + break; + default: + MicroPrintf("Params of type '%s' are not supported by gather_nd.", + TfLiteTypeGetName(params->type)); + return kTfLiteError; + break; + } + switch (indices->type) { + case kTfLiteInt32: + break; + default: + MicroPrintf("Indices of type '%s' are not supported by gather_nd.", + TfLiteTypeGetName(indices->type)); + return kTfLiteError; + } + + const int params_rank = NumDimensions(params); + const int indices_rank = NumDimensions(indices); + const int indices_nd = SizeOfDimension(indices, indices_rank - 1); + if (params_rank < 1) { + MicroPrintf("Params must be at least a vector."); + return kTfLiteError; + } + if (indices_rank < 1) { + MicroPrintf("Indices must be at least a vector."); + return kTfLiteError; + } + if (indices_nd > params_rank) { + MicroPrintf("Index innermost dimension length must be <= params rank."); + return kTfLiteError; + } + if (indices_nd > MAX_INDICES_ND) { + MicroPrintf("Index innermost dimension length must not exceed %d.", + MAX_INDICES_ND); + return kTfLiteError; + } + + // Assign to output the input type. + output->type = params->type; + + // The tensor output dims must be relocated + // from the FlatBuffer to the persistent storage arena. + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + // TFLM gather_nd does not create the output tensor, but it needs to ensure + // that the output shape is correct. The result shape is + // indices.shape[:-1] + params.shape[indices.shape[-1]:] + TfLiteIntArray* output_shape = output->dims; + int output_index = 0; + for (int i = 0; i < indices_rank - 1; ++i) { + output_shape->data[output_index++] = indices->dims->data[i]; + } + for (int i = indices_nd; i < params_rank; ++i) { + output_shape->data[output_index++] = params->dims->data[i]; + } + output_shape->size = output_index; + + micro_context->DeallocateTempTfLiteTensor(params); + micro_context->DeallocateTempTfLiteTensor(indices); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +TfLiteStatus GatherNd(const TfLiteEvalTensor* params, + const TfLiteEvalTensor* indices, + TfLiteEvalTensor* output) { + const int indices_dims = indices->dims->size; + const int indices_nd = indices->dims->data[indices_dims - 1]; + const int params_dims = params->dims->size; + const IndicesT* index_data = tflite::micro::GetTensorData(indices); + const ParamsT* param_data = tflite::micro::GetTensorData(params); + ParamsT* output_data = tflite::micro::GetTensorData(output); + + int n_slices = 1; + for (int i = 0; i < indices_dims - 1; ++i) { + n_slices *= indices->dims->data[i]; + } + + // If indices[-1] == params.rank, fetch single elements. + // If indices[-1] < params.rank, fetch slices. + int slice_size = 1; + for (int i = indices_nd; i < params_dims; ++i) { + slice_size *= params->dims->data[i]; + } + + int params_flat_size = ElementCount(*params->dims); + int remain_flat_size = params_flat_size; + + // Number of elements per dimension + int dims_to_count[MAX_INDICES_ND]; + for (int i = 0; i < indices_nd; ++i) { + dims_to_count[i] = remain_flat_size / params->dims->data[i]; + remain_flat_size = dims_to_count[i]; + } + + for (int i = 0; i < n_slices; ++i) { + int from_pos = 0; + for (int j = 0; j < indices_nd; ++j) { + int offset = i * indices_nd + j; + IndicesT index = index_data[offset]; + from_pos += index * dims_to_count[j]; + } + if (from_pos < 0 || from_pos + slice_size > params_flat_size) { + return kTfLiteError; + } + std::memcpy(output_data + i * slice_size, param_data + from_pos, + sizeof(ParamsT) * slice_size); + } + return kTfLiteOk; +} + +template +TfLiteStatus EvalGatherNd(TfLiteContext* context, + const TfLiteEvalTensor* params, + const TfLiteEvalTensor* indices, + TfLiteEvalTensor* output) { + TfLiteStatus status = kTfLiteError; + switch (params->type) { + case kTfLiteFloat32: + status = GatherNd(params, indices, output); + break; + case kTfLiteInt8: + status = GatherNd(params, indices, output); + break; + default: + MicroPrintf("Params type '%s' are not supported by gather_nd.", + TfLiteTypeGetName(params->type)); + return kTfLiteError; + } + if (status != kTfLiteOk) { + MicroPrintf("gather_nd index out of bounds"); + } + return status; +} + +TfLiteStatus GatherNdEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* params = + tflite::micro::GetEvalInput(context, node, kParams); + const TfLiteEvalTensor* indices = + tflite::micro::GetEvalInput(context, node, kIndices); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (indices->type) { + case kTfLiteInt32: + return EvalGatherNd(context, params, indices, output); + break; + default: + MicroPrintf("Indices of type '%s' are not supported by gather_nd.", + TfLiteTypeGetName(indices->type)); + return kTfLiteError; + } +} +} // namespace + +TFLMRegistration Register_GATHER_ND() { + return tflite::micro::RegisterOp(nullptr, GatherNdPrepare, GatherNdEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/hard_swish.cpp b/src/tensorflow/lite/micro/kernels/hard_swish.cpp index a0a245f..f7f49ec 100644 --- a/src/tensorflow/lite/micro/kernels/hard_swish.cpp +++ b/src/tensorflow/lite/micro/kernels/hard_swish.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,72 +23,23 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/hard_swish.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_utils.h" namespace tflite { -namespace ops { -namespace micro { -namespace hard_swish { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - +namespace { void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(HardSwishParams)); } -TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { - HardSwishParams* params = static_cast(node->user_data); - - params->input_zero_point = input->params.zero_point; - params->output_zero_point = output->params.zero_point; - - const float input_scale = input->params.scale; - const float hires_input_scale = (1.0f / 128.0f) * input_scale; - const float reluish_scale = 3.0f / 32768.0f; - const float output_scale = output->params.scale; - - const double output_multiplier = - static_cast(hires_input_scale / output_scale); - int32_t output_multiplier_fixedpoint_int32; - QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, - ¶ms->output_multiplier_exponent); - DownScaleInt32ToInt16Multiplier( - output_multiplier_fixedpoint_int32, - ¶ms->output_multiplier_fixedpoint_int16); - - TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); - - const double reluish_multiplier = - static_cast(hires_input_scale / reluish_scale); - int32_t reluish_multiplier_fixedpoint_int32; - QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, - ¶ms->reluish_multiplier_exponent); - DownScaleInt32ToInt16Multiplier( - reluish_multiplier_fixedpoint_int32, - ¶ms->reluish_multiplier_fixedpoint_int16); - } - - return kTfLiteOk; -} - TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kHardSwishInputTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kHardSwishOutputTensor); HardSwishParams* params = static_cast(node->user_data); switch (input->type) { @@ -99,13 +50,6 @@ TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); } break; - case kTfLiteUInt8: { - tflite::reference_ops::HardSwish( - *params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; case kTfLiteInt8: { tflite::reference_ops::HardSwish( *params, tflite::micro::GetTensorShape(input), @@ -114,29 +58,18 @@ TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); } break; default: { - TF_LITE_KERNEL_LOG( - context, - "Only float32/int8_t/uint8_t are supported currently, got %s", - TfLiteTypeGetName(input->type)); + MicroPrintf("Unsupported type %s", TfLiteTypeGetName(input->type)); return kTfLiteError; } } return kTfLiteOk; } -} // namespace hard_swish +} // namespace -TfLiteRegistration Register_HARD_SWISH() { - return {/*init=*/hard_swish::HardSwishInit, - /*free=*/nullptr, - /*prepare=*/hard_swish::HardSwishPrepare, - /*invoke=*/hard_swish::HardSwishEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_HARD_SWISH() { + return tflite::micro::RegisterOp(HardSwishInit, tflite::HardSwishPrepare, + HardSwishEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/hard_swish.h b/src/tensorflow/lite/micro/kernels/hard_swish.h new file mode 100644 index 0000000..3ffe60d --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/hard_swish.h @@ -0,0 +1,30 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +extern const int kHardSwishInputTensor; +extern const int kHardSwishOutputTensor; + +TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node); +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_ diff --git a/src/tensorflow/lite/micro/kernels/hard_swish_common.cpp b/src/tensorflow/lite/micro/kernels/hard_swish_common.cpp new file mode 100644 index 0000000..8f84652 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/hard_swish_common.cpp @@ -0,0 +1,86 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/hard_swish.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/hard_swish.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +const int kHardSwishInputTensor = 0; +const int kHardSwishOutputTensor = 0; + +TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TFLITE_DCHECK(node->user_data != nullptr); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kHardSwishInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kHardSwishOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + if (input->type == kTfLiteInt8) { + HardSwishParams* params = static_cast(node->user_data); + + params->input_zero_point = input->params.zero_point; + params->output_zero_point = output->params.zero_point; + + const float input_scale = input->params.scale; + const float hires_input_scale = (1.0f / 128.0f) * input_scale; + const float reluish_scale = 3.0f / 32768.0f; + const float output_scale = output->params.scale; + + const double output_multiplier = + static_cast(hires_input_scale / output_scale); + int32_t output_multiplier_fixedpoint_int32; + QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, + ¶ms->output_multiplier_exponent); + DownScaleInt32ToInt16Multiplier( + output_multiplier_fixedpoint_int32, + ¶ms->output_multiplier_fixedpoint_int16); + + TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); + + const double reluish_multiplier = + static_cast(hires_input_scale / reluish_scale); + int32_t reluish_multiplier_fixedpoint_int32; + QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, + ¶ms->reluish_multiplier_exponent); + DownScaleInt32ToInt16Multiplier( + reluish_multiplier_fixedpoint_int32, + ¶ms->reluish_multiplier_fixedpoint_int16); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/if.cpp b/src/tensorflow/lite/micro/kernels/if.cpp new file mode 100644 index 0000000..029846b --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/if.cpp @@ -0,0 +1,123 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +constexpr int kCondTensor = 0; + +struct OpData { + int then_subgraph_index; + int else_subgraph_index; +}; + +void* IfInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus IfPrepare(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + const auto* params = + reinterpret_cast(node->builtin_data); + op_data->then_subgraph_index = params->then_subgraph_index; + op_data->else_subgraph_index = params->else_subgraph_index; + + TF_LITE_ENSURE(context, node->inputs->size > 0); + + // The first input is the condition. + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + TfLiteTensor* cond = + micro_context->AllocateTempInputTensor(node, kCondTensor); + + TF_LITE_ENSURE(context, cond != nullptr); + TF_LITE_ENSURE_EQ(context, cond->type, kTfLiteBool); + TF_LITE_ENSURE_EQ(context, NumElements(cond), 1); + + micro_context->DeallocateTempTfLiteTensor(cond); + + // The first input of the node is the condition. The rest of inputs are + // passed to the branch subgraphs. Therefore, the number of subgraph inputs + // will be the number of node inputs - 1. + size_t num_inputs = node->inputs->size - 1; + size_t num_outputs = NumOutputs(node); + + MicroGraph& graph_info = micro_context->graph(); + + TF_LITE_ENSURE(context, + op_data->then_subgraph_index < graph_info.NumSubgraphs()); + TF_LITE_ENSURE(context, + op_data->else_subgraph_index < graph_info.NumSubgraphs()); + + TF_LITE_ENSURE_EQ(context, num_inputs, + graph_info.NumSubgraphInputs(op_data->then_subgraph_index)); + TF_LITE_ENSURE_EQ( + context, num_outputs, + graph_info.NumSubgraphOutputs(op_data->then_subgraph_index)); + + return kTfLiteOk; +} + +TfLiteStatus IfEval(TfLiteContext* context, TfLiteNode* node) { + const OpData* op_data = reinterpret_cast(node->user_data); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + const TfLiteEvalTensor* cond = + tflite::micro::GetEvalInput(context, node, kCondTensor); + TF_LITE_ENSURE(context, cond != nullptr); + const bool cond_value = cond->data.b[0]; + + MicroGraph* graph_info = µ_context->graph(); + // Currently we copy the input / output between the subgraphs. + int active_branch_subgraph_index = + cond_value ? op_data->then_subgraph_index : op_data->else_subgraph_index; + + TF_LITE_ENSURE_OK(context, + tflite::micro::CopyOpInputsToSubgraphInputs( + context, node, graph_info, active_branch_subgraph_index, + /*first_tensor_idx=*/1)); + + TF_LITE_ENSURE_OK(context, + graph_info->InvokeSubgraph(active_branch_subgraph_index)); + + TF_LITE_ENSURE_OK( + context, tflite::micro::CopySubgraphOutputsToOpOutputs( + context, node, graph_info, active_branch_subgraph_index)); + + return kTfLiteOk; +} + +} // namespace. + +TFLMRegistration Register_IF() { + return tflite::micro::RegisterOp(IfInit, IfPrepare, IfEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/kernel_runner.cpp b/src/tensorflow/lite/micro/kernels/kernel_runner.cpp index 5d03479..79824ef 100644 --- a/src/tensorflow/lite/micro/kernels/kernel_runner.cpp +++ b/src/tensorflow/lite/micro/kernels/kernel_runner.cpp @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,152 +15,130 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/kernel_runner.h" +#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" +#include "tensorflow/lite/micro/micro_arena_constants.h" +#include "tensorflow/lite/micro/micro_log.h" + namespace tflite { namespace micro { -namespace { -constexpr size_t kBufferAlignment = 16; -} // namespace - // TODO(b/161841696): Consider moving away from global arena buffers: -constexpr int KernelRunner::kNumScratchBuffers_; constexpr int KernelRunner::kKernelRunnerBufferSize_; uint8_t KernelRunner::kKernelRunnerBuffer_[]; -KernelRunner::KernelRunner(const TfLiteRegistration& registration, +void ClearBufferApi(TfLiteContext* context_) { + context_->GetScratchBuffer = nullptr; + context_->GetExternalContext = nullptr; + context_->AllocatePersistentBuffer = nullptr; + context_->RequestScratchBufferInArena = nullptr; +} + +KernelRunner::KernelRunner(const TFLMRegistration& registration, TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* inputs, TfLiteIntArray* outputs, - void* builtin_data, ErrorReporter* error_reporter) - : allocator_(SimpleMemoryAllocator::Create( - error_reporter, kKernelRunnerBuffer_, kKernelRunnerBufferSize_)), - registration_(registration), - tensors_(tensors), - error_reporter_(error_reporter) { + const void* builtin_data, + TfLiteIntArray* intermediates +#ifdef USE_TFLM_COMPRESSION + , + const CompressedTensorList* compressed_tensors +#endif // USE_TFLM_COMPRESSION + ) + : registration_(registration), + allocator_(SingleArenaBufferAllocator::Create(kKernelRunnerBuffer_, + kKernelRunnerBufferSize_)), + mock_micro_graph_(allocator_), + fake_micro_context_(tensors, allocator_, &mock_micro_graph_ +#ifdef USE_TFLM_COMPRESSION + , + compressed_tensors +#endif // USE_TFLM_COMPRESSION + ) { // Prepare TfLiteContext: - context_.impl_ = static_cast(this); - context_.ReportError = ReportOpError; + context_.impl_ = static_cast(&fake_micro_context_); + context_.ReportError = MicroContextReportOpError; context_.recommended_num_threads = 1; - context_.GetTensor = GetTensor; - context_.GetEvalTensor = GetEvalTensor; - context_.AllocatePersistentBuffer = AllocatePersistentBuffer; - context_.RequestScratchBufferInArena = RequestScratchBufferInArena; - context_.GetScratchBuffer = GetScratchBuffer; + context_.GetTensor = MicroContextGetTensor; + context_.GetEvalTensor = MicroContextGetEvalTensor; + tflite::micro::ClearBufferApi(&context_); + context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; + + context_.recommended_num_threads = 0; // Prepare TfLiteNode: node_.inputs = inputs; node_.outputs = outputs; - node_.builtin_data = builtin_data; + node_.builtin_data = const_cast(builtin_data); + node_.intermediates = intermediates; +} + +bool KernelRunner::ValidateTempBufferDeallocated() { + return fake_micro_context_.IsAllTempTfLiteTensorDeallocated(); } TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data, size_t length) { if (registration_.init) { + tflite::micro::ClearBufferApi(&context_); + context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; node_.user_data = registration_.init(&context_, init_data, length); } + + TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); + if (registration_.prepare) { + tflite ::micro::ClearBufferApi(&context_); + context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; + context_.RequestScratchBufferInArena = + MicroContextRequestScratchBufferInArena; + context_.GetExternalContext = MicroContextGetExternalContext; TF_LITE_ENSURE_STATUS(registration_.prepare(&context_, &node_)); } + + TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); + return kTfLiteOk; } TfLiteStatus KernelRunner::Invoke() { + tflite::micro::ClearBufferApi(&context_); + context_.GetScratchBuffer = MicroContextGetScratchBuffer; + if (registration_.invoke == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "TfLiteRegistration missing invoke function pointer!"); + MicroPrintf("TFLMRegistration missing invoke function pointer!"); return kTfLiteError; } - return registration_.invoke(&context_, &node_); -} -TfLiteTensor* KernelRunner::GetTensor(const struct TfLiteContext* context, - int tensor_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); + TF_LITE_ENSURE_STATUS(registration_.invoke(&context_, &node_)); - return &runner->tensors_[tensor_index]; -} - -TfLiteEvalTensor* KernelRunner::GetEvalTensor( - const struct TfLiteContext* context, int tensor_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - TfLiteEvalTensor* eval_tensor = - reinterpret_cast(runner->allocator_->AllocateTemp( - sizeof(TfLiteEvalTensor), alignof(TfLiteEvalTensor))); - TFLITE_DCHECK(eval_tensor != nullptr); - - // In unit tests, the TfLiteTensor pointer contains the source of truth for - // buffers and values: - eval_tensor->data = runner->tensors_[tensor_index].data; - eval_tensor->dims = runner->tensors_[tensor_index].dims; - eval_tensor->type = runner->tensors_[tensor_index].type; - return eval_tensor; -} + TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); -void* KernelRunner::AllocatePersistentBuffer(TfLiteContext* context, - size_t bytes) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - return runner->allocator_->AllocateFromTail(bytes, kBufferAlignment); + return kTfLiteOk; } -TfLiteStatus KernelRunner::RequestScratchBufferInArena(TfLiteContext* context, - size_t bytes, - int* buffer_index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(buffer_index != nullptr); - - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); +TfLiteStatus KernelRunner::Reset() { + tflite::micro::ClearBufferApi(&context_); + context_.GetScratchBuffer = MicroContextGetScratchBuffer; - if (runner->scratch_buffer_count_ == kNumScratchBuffers_) { - TF_LITE_REPORT_ERROR( - runner->error_reporter_, - "Exceeded the maximum number of scratch tensors allowed (%d).", - kNumScratchBuffers_); + if (registration_.reset == nullptr) { + MicroPrintf("TFLMRegistration missing reset function pointer!"); return kTfLiteError; } - // For tests, we allocate scratch buffers from the tail and keep them around - // for the lifetime of model. This means that the arena size in the tests will - // be more than what we would have if the scratch buffers could share memory. - runner->scratch_buffers_[runner->scratch_buffer_count_] = - runner->allocator_->AllocateFromTail(bytes, kBufferAlignment); - TFLITE_DCHECK(runner->scratch_buffers_[runner->scratch_buffer_count_] != - nullptr); - - *buffer_index = runner->scratch_buffer_count_++; + registration_.reset(&context_, node_.user_data); return kTfLiteOk; } -void* KernelRunner::GetScratchBuffer(TfLiteContext* context, int buffer_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); +TfLiteStatus KernelRunner::Free() { + tflite::micro::ClearBufferApi(&context_); + context_.GetScratchBuffer = MicroContextGetScratchBuffer; - TFLITE_DCHECK(runner->scratch_buffer_count_ <= kNumScratchBuffers_); - if (buffer_index >= runner->scratch_buffer_count_) { - return nullptr; + if (registration_.free == nullptr) { + MicroPrintf("TFLMRegistration missing free function pointer!"); + return kTfLiteError; } - return runner->scratch_buffers_[buffer_index]; -} -void KernelRunner::ReportOpError(struct TfLiteContext* context, - const char* format, ...) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - va_list args; - va_start(args, format); - TF_LITE_REPORT_ERROR(runner->error_reporter_, format, args); - va_end(args); + registration_.free(&context_, node_.user_data); + return kTfLiteOk; } - } // namespace micro } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/kernel_runner.h b/src/tensorflow/lite/micro/kernels/kernel_runner.h index 34a2149..8dbd7f8 100644 --- a/src/tensorflow/lite/micro/kernels/kernel_runner.h +++ b/src/tensorflow/lite/micro/kernels/kernel_runner.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,12 +18,14 @@ limitations under the License. #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/simple_memory_allocator.h" +#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" +#include "tensorflow/lite/micro/fake_micro_context.h" +#include "tensorflow/lite/micro/mock_micro_graph.h" namespace tflite { namespace micro { -// Helper class to perform a simulated kernel (i.e. TfLiteRegistration) +// Helper class to perform a simulated kernel (i.e. TFLMRegistration) // lifecycle (init, prepare, invoke). All internal allocations are handled by // this class. Simply pass in the registration, list of required tensors, inputs // array, outputs array, and any pre-builtin data. Calling Invoke() will @@ -31,51 +33,56 @@ namespace micro { // output provided during construction. class KernelRunner { public: - KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, + KernelRunner(const TFLMRegistration& registration, TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* inputs, - TfLiteIntArray* outputs, void* builtin_data, - ErrorReporter* error_reporter); - - // Calls init and prepare on the kernel (i.e. TfLiteRegistration) struct. Any - // exceptions will be reported through the error_reporter and returned as a - // status code here. + TfLiteIntArray* outputs, const void* builtin_data, + TfLiteIntArray* intermediates = nullptr +#ifdef USE_TFLM_COMPRESSION + , + const CompressedTensorList* compressed_tensors = nullptr +#endif // USE_TFLM_COMPRESSION + ); + + // Calls init and prepare on the kernel (i.e. TFLMRegistration) struct. + // Any exceptions will be DebugLog'd and returned as a status code. TfLiteStatus InitAndPrepare(const char* init_data = nullptr, size_t length = 0); - // Calls init, prepare, and invoke on a given TfLiteRegistration pointer. - // After successful invoke, results will be available in the output tensor as - // passed into the constructor of this class. + // Calls invoke on a given TFLMRegistration pointer. After successful + // invoke, results will be available in the output tensor as passed into the + // constructor of this class. TfLiteStatus Invoke(); - protected: - static TfLiteTensor* GetTensor(const struct TfLiteContext* context, - int tensor_index); - static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, - int tensor_index); - static void* AllocatePersistentBuffer(TfLiteContext* context, size_t bytes); - static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* context, - size_t bytes, - int* buffer_index); - static void* GetScratchBuffer(TfLiteContext* context, int buffer_index); - static void ReportOpError(struct TfLiteContext* context, const char* format, - ...); + // Calls Free on a given TFLMRegistration pointer(if it's implemented). + // After successful Free, kTfLiteOk status will be returned. If Free is not + // implemented for a given kernel kTfLiteError will be returned. + TfLiteStatus Free(); - private: - static constexpr int kNumScratchBuffers_ = 12; + // Calls Reset on a given TFLMRegistration pointer(if it's implemented). + // After successful Reset, kTfLiteOk status will be returned. If Free is not + // implemented for a given kernel kTfLiteError will be returned. + TfLiteStatus Reset(); + + // Returns a pointer to the internal MockMicroGraph which KernelRunner uses + // to stub out MicroGraph methods and track invocations on each subgraph. + MockMicroGraph* GetMockGraph() { return &mock_micro_graph_; } + // Returns true if all temp buffer in tests are deallocated. + // TODO(b/209453859): move this function to private after deallocation checks + // are enabled for all kernel tests. + bool ValidateTempBufferDeallocated(); + + private: static constexpr int kKernelRunnerBufferSize_ = 10000; static uint8_t kKernelRunnerBuffer_[kKernelRunnerBufferSize_]; - SimpleMemoryAllocator* allocator_ = nullptr; - const TfLiteRegistration& registration_; - TfLiteTensor* tensors_ = nullptr; - ErrorReporter* error_reporter_ = nullptr; - TfLiteContext context_ = {}; TfLiteNode node_ = {}; + const TFLMRegistration& registration_; - int scratch_buffer_count_ = 0; - uint8_t* scratch_buffers_[kNumScratchBuffers_]; + SingleArenaBufferAllocator* allocator_; + MockMicroGraph mock_micro_graph_; + FakeMicroContext fake_micro_context_; }; } // namespace micro diff --git a/src/tensorflow/lite/micro/kernels/kernel_util.cpp b/src/tensorflow/lite/micro/kernels/kernel_util.cpp index deca92b..a509f5d 100644 --- a/src/tensorflow/lite/micro/kernels/kernel_util.cpp +++ b/src/tensorflow/lite/micro/kernels/kernel_util.cpp @@ -16,10 +16,82 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/kernel_util.h" #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { namespace micro { +namespace { + +int ValidateTensorIndexing(const TfLiteContext* context, int index, + int max_size, const int* tensor_indices) { + if (index >= 0 && index < max_size) { + const int tensor_index = tensor_indices[index]; + if (tensor_index != kTfLiteOptionalTensor) { + return tensor_index; + } + } + return -1; +} + +} // namespace + +TFLMRegistration RegisterOp( + void* (*init)(TfLiteContext* context, const char* buffer, size_t length), + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node), + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node), + void (*free)(TfLiteContext* context, void* buffer), + void (*reset)(TfLiteContext* context, void* buffer)) { + return {/*init=*/init, + /*free=*/free, + /*prepare=*/prepare, + /*invoke=*/invoke, + /*reset*/ reset, + /*builtin_code=*/0, + /*custom_name=*/nullptr}; +} + +TFLMInferenceRegistration RegisterOp( + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node), + void (*reset)(TfLiteContext* context, void* buffer)) { + return { + /*invoke=*/invoke, + /*reset*/ reset, + }; +} + +// Returns a mutable tensor for a given input index. is_variable must be checked +// during prepare when the full TfLiteTensor is available. +TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, + const TfLiteNode* node, int index) { + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(node != nullptr); + const int tensor_index = ValidateTensorIndexing( + context, index, node->inputs->size, node->inputs->data); + + if (tensor_index < 0) { + return nullptr; + } + + return context->GetEvalTensor(context, node->inputs->data[index]); +} + +// Returns the TfLiteEvalTensor struct for a given input index in a node. +const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, + const TfLiteNode* node, int index) { + return GetMutableEvalInput(context, node, index); +} + +// Returns the TfLiteEvalTensor struct for a given output index in a node. +TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, + const TfLiteNode* node, int index) { + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(node != nullptr); + return context->GetEvalTensor(context, node->outputs->data[index]); +} + bool HaveSameShapes(const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2) { TFLITE_DCHECK(input1 != nullptr); @@ -37,5 +109,181 @@ const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) { return RuntimeShape(dims_size, dims_data); } +PaddingType RuntimePaddingType(TfLitePadding padding) { + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + +// Relocate tensor dims from FlatBuffer to the persistent storage arena. +// The old dims data is copied to the new storage area. +// The tensor and eval_tensor must be the same tensor. +// Only use during Prepare phase. +TfLiteStatus CreateWritableTensorDimsWithCopy(TfLiteContext* context, + TfLiteTensor* tensor, + TfLiteEvalTensor* eval_tensor) { + TF_LITE_ENSURE(context, tensor != nullptr); + TF_LITE_ENSURE(context, eval_tensor != nullptr); + TF_LITE_ENSURE(context, context->AllocatePersistentBuffer != nullptr); + int ranks = tensor->dims->size; + size_t alloc_size = TfLiteIntArrayGetSizeInBytes(ranks); + TfLiteIntArray* new_dims = static_cast( + context->AllocatePersistentBuffer(context, alloc_size)); + TfLiteIntArray* old_dims = tensor->dims; + new_dims->size = ranks; + tensor->dims = new_dims; + eval_tensor->dims = new_dims; + for (int i = 0; i < ranks; i++) { + new_dims->data[i] = old_dims->data[i]; + } + + return kTfLiteOk; +} + +// Verify that both tensors have the same type and size, then return the size +// of both tensors in bytes if they are the same, or -1 if they are different. +size_t ValidateAndGetTensorSizes(const TfLiteEvalTensor* tensor1, + const TfLiteEvalTensor* tensor2) { + TFLITE_DCHECK(tensor1->type == tensor2->type); + size_t tensor1_size = 0; + size_t tensor2_size = 0; + TfLiteEvalTensorByteLength(tensor1, &tensor1_size); + TfLiteEvalTensorByteLength(tensor2, &tensor2_size); + return (tensor1_size == tensor2_size) ? tensor1_size : -1; +} + +TfLiteStatus CopyOpInputsToOpOutputs(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE(context, node->inputs->size == node->outputs->size); + for (int i = 0; i < node->inputs->size; i++) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, i); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); + int bytes = ValidateAndGetTensorSizes(input, output); + TF_LITE_ENSURE(context, bytes >= 0); + memcpy(output->data.raw, input->data.raw, bytes); + } + return kTfLiteOk; +} + +// Args: +// 1. int8_t tensor_data - int8_t buffer of unknown size who's data you'd +// like +// to print +// 2. int n_btyes - a small int representing number of bytes you want to +// print +// to debug output. It should always be <= tensor_data's size. +// 3. prefix - optional message you'd like to print before printing bytes +// +// Purpose: +// Function takes in parameters above and prints n_bytes bytes from the +// tensor_data buffer. This can be use to debug the output of a model and it's +// op. + +void PrintNBytes(const int8_t* tensor_data, int n_bytes, const char* prefix) { + if (prefix != nullptr) { + MicroPrintf("%s", prefix); + } + + for (int i = 0; i < n_bytes; ++i) { + MicroPrintf(" %x", tensor_data[i]); + } + MicroPrintf("\n"); +} + +// same as the PrintNBytes above but the buffer needs to be extracted out of the +// TfLiteEvalTensor* +void PrintNBytes(const TfLiteEvalTensor* tensor, int n_bytes, + const char* prefix) { + const int8_t* tensor_data = tflite::micro::GetTensorData(tensor); + PrintNBytes(tensor_data, n_bytes, prefix); +} + +// same as the PrintNBytes above but the buffer needs to be extracted out of the +// TfLiteEvalTensor* +void PrintNBytes(const TfLiteTensor* tensor, int n_bytes, const char* prefix) { + const int8_t* tensor_data = tflite::GetTensorData(tensor); + PrintNBytes(tensor_data, n_bytes, prefix); +} + +TfLiteStatus CopyOpInputsToSubgraphInputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx, + int first_tensor_idx) { + TF_LITE_ENSURE(context, + static_cast(node->inputs->size - first_tensor_idx) == + graph_info->NumSubgraphInputs(subgraph_idx)); + for (int i = 0; i < node->inputs->size - first_tensor_idx; i++) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, i + first_tensor_idx); + TfLiteEvalTensor* subgraph_input = + graph_info->GetSubgraphInput(subgraph_idx, i); + int bytes = ValidateAndGetTensorSizes(input, subgraph_input); + TF_LITE_ENSURE(context, bytes >= 0); + memcpy(subgraph_input->data.raw, input->data.raw, bytes); + } + return kTfLiteOk; +} + +TfLiteStatus CopyOpOutputsToSubgraphInputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx) { + TF_LITE_ENSURE(context, static_cast(node->outputs->size) == + graph_info->NumSubgraphInputs(subgraph_idx)); + for (int i = 0; i < node->outputs->size; i++) { + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); + TfLiteEvalTensor* subgraph_input = + graph_info->GetSubgraphInput(subgraph_idx, i); + int bytes = ValidateAndGetTensorSizes(output, subgraph_input); + TF_LITE_ENSURE(context, bytes >= 0); + memcpy(subgraph_input->data.raw, output->data.raw, bytes); + } + return kTfLiteOk; +} + +TfLiteStatus CopySubgraphOutputsToOpOutputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx) { + if (graph_info->NumSubgraphOutputs(subgraph_idx) == 0) return kTfLiteOk; + TF_LITE_ENSURE(context, static_cast(node->outputs->size) == + graph_info->NumSubgraphOutputs(subgraph_idx)); + for (int i = 0; i < node->outputs->size; i++) { + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); + TfLiteEvalTensor* subgraph_output = + graph_info->GetSubgraphOutput(subgraph_idx, i); + int bytes = ValidateAndGetTensorSizes(output, subgraph_output); + TF_LITE_ENSURE(context, bytes >= 0); + memcpy(output->data.raw, subgraph_output->data.raw, bytes); + } + return kTfLiteOk; +} + +TfLiteEvalTensor MakeUnpackedInt4Tensor(TfLiteContext* context, + int scratch_buffer_index, + const TfLiteEvalTensor* tensor) { + if (tensor->type != kTfLiteInt4) { + return *tensor; + } + + TfLiteEvalTensor new_tensor; + new_tensor.data.data = static_cast( + context->GetScratchBuffer(context, scratch_buffer_index)); + new_tensor.dims = tensor->dims; + new_tensor.type = kTfLiteInt8; + tflite::tensor_utils::UnpackDenseInt4IntoInt8( + tflite::micro::GetTensorData(tensor), + tflite::micro::GetTensorShape(tensor).FlatSize(), + tflite::micro::GetTensorData(&new_tensor)); + return new_tensor; +} + } // namespace micro } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/kernel_util.h b/src/tensorflow/lite/micro/kernels/kernel_util.h index 79cd58e..5cb71af 100644 --- a/src/tensorflow/lite/micro/kernels/kernel_util.h +++ b/src/tensorflow/lite/micro/kernels/kernel_util.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,50 +18,130 @@ limitations under the License. #include +#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/micro/micro_context.h" + +#ifdef USE_TFLM_COMPRESSION + +#include "tensorflow/lite/micro/micro_arena_constants.h" +#include "tensorflow/lite/micro/micro_utils.h" + +#endif // USE_TFLM_COMPRESSION namespace tflite { namespace micro { +TFLMRegistration RegisterOp( + void* (*init)(TfLiteContext* context, const char* buffer, size_t length), + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node), + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node), + void (*free)(TfLiteContext* context, void* buffer) = nullptr, + void (*reset)(TfLiteContext* context, void* buffer) = nullptr); + +TFLMInferenceRegistration RegisterOp( + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node), + void (*reset)(TfLiteContext* context, void* buffer) = nullptr); + +// Prints out n bytes in a int8_t buffer as hex +void PrintNBytes(const int8_t* tensor_data, int n_bytes, + const char* prefix = nullptr); + +// Prints out the n bytes in a TfLiteEvalTensor as hex +void PrintNBytes(const TfLiteEvalTensor* tensor, int n_bytes, + const char* prefix = nullptr); + +// Prints out n bytes in a TfLiteTensor as hex +void PrintNBytes(const TfLiteTensor* tensor, int n_bytes, + const char* prefix = nullptr); + // Returns a mutable tensor for a given input index. is_variable must be checked // during prepare when the full TfLiteTensor is available. -inline TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, - const TfLiteNode* node, - int index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - return context->GetEvalTensor(context, node->inputs->data[index]); -} +TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, + const TfLiteNode* node, int index); // Returns the TfLiteEvalTensor struct for a given input index in a node. -inline const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - return GetMutableEvalInput(context, node, index); -} +const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, + const TfLiteNode* node, int index); // Returns the TfLiteEvalTensor struct for a given output index in a node. -inline TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - return context->GetEvalTensor(context, node->outputs->data[index]); -} +TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, + const TfLiteNode* node, int index); -// Returns data for a TfLiteEvalTensor struct. +// Returns data for a TfLiteEvalTensor struct that are expected to exist. template T* GetTensorData(TfLiteEvalTensor* tensor) { - return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; + TFLITE_DCHECK(tensor != nullptr); + return reinterpret_cast(tensor->data.raw); } -// Returns const data for a TfLiteEvalTensor struct. +// Returns const data for a TfLiteEvalTensor struct that are expected to exist. template const T* GetTensorData(const TfLiteEvalTensor* tensor) { TFLITE_DCHECK(tensor != nullptr); return reinterpret_cast(tensor->data.raw); } +// Returns data for a TfLiteEvalTensor struct that could be null. +template +T* GetOptionalTensorData(TfLiteEvalTensor* tensor) { + return tensor == nullptr ? nullptr : reinterpret_cast(tensor->data.raw); +} + +// Returns const data for a TfLiteEvalTensor struct that could be null. +template +const T* GetOptionalTensorData(const TfLiteEvalTensor* tensor) { + return tensor == nullptr ? nullptr + : reinterpret_cast(tensor->data.raw); +} + +#ifdef USE_TFLM_COMPRESSION + +// Overloads existing GetOptionalTensorData. If not compressed, this will return +// tensor->data. +template +const T* GetOptionalTensorData(MicroContext* micro_context, + const TfLiteEvalTensor* tensor, + const CompressionTensorData* compression_data, + int scratch_buffer_handle) { + if (tensor == nullptr) { + return nullptr; + } + if (compression_data == nullptr) { + return reinterpret_cast(tensor->data.data); + } + + void* scratch_buffer = nullptr; + if (scratch_buffer_handle != -1) { + scratch_buffer = micro_context->GetScratchBuffer(scratch_buffer_handle); + } else { + size_t bytes_to_allocate = EvalTensorBytes(tensor); + scratch_buffer = micro_context->AllocateDecompressionMemory( + bytes_to_allocate, MicroArenaBufferAlignment()); + } + TFLITE_DCHECK(scratch_buffer != nullptr); + void* uncompressed_data = micro_context->DecompressTensorToBuffer( + *tensor, *compression_data, scratch_buffer); + return reinterpret_cast(uncompressed_data); +} + +// Overloads existing GetTensorData. If not compressed, this will return +// tensor->data. +template +const T* GetTensorData(MicroContext* micro_context, + const TfLiteEvalTensor* tensor, + const CompressionTensorData* compression_data, + int scratch_buffer_handle) { + TFLITE_DCHECK(tensor != nullptr); + return GetOptionalTensorData(micro_context, tensor, compression_data, + scratch_buffer_handle); +} + +#endif // USE_TFLM_COMPRESSION + // Returns the shape of a TfLiteEvalTensor struct. const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor); @@ -69,6 +149,52 @@ const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor); bool HaveSameShapes(const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2); +PaddingType RuntimePaddingType(TfLitePadding padding); + +// Relocate tensor dims from FlatBuffer to the persistent storage arena. +// The old dims data is copied to the new storage area. +// The tensor and eval_tensor must be the same tensor. +// Only use during Prepare phase. +TfLiteStatus CreateWritableTensorDimsWithCopy(TfLiteContext* context, + TfLiteTensor* tensor, + TfLiteEvalTensor* eval_tensor); + +// Copy all op input tensors to op output tensors. Requires all op input tensor +// shapes and types to be identical to op output tensor shapes and types. +TfLiteStatus CopyOpInputsToOpOutputs(TfLiteContext* context, TfLiteNode* node); + +// Copy all op input tensors to subgraph input tensors. Requires all op input +// tensor shapes and types to be identical to subgraph input tensor shapes and +// types. +TfLiteStatus CopyOpInputsToSubgraphInputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx, + int first_tensor_idx); + +// Copy all op output tensors to subgraph input tensors. Requires all op output +// tensor shapes and types to be identical to subgraph input tensor shapes and +// types. +TfLiteStatus CopyOpOutputsToSubgraphInputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx); + +// Copy all subgraph output tensors to op outputs. Requires all subgraph output +// tensor shapes and types to be identical to op output tensor shapes and types. +TfLiteStatus CopySubgraphOutputsToOpOutputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx); + +// If tensor is INT4, make a new TfLiteEvalTensor with data unpacked into +// a scratch buffer. The returned tensor will have the kTfLiteInt8 type. +// Assume scratch buffer is previously requested in Prepare, and +// scratch_buffer_index can be used to retrieve that buffer. +// If the tensor is not INT4, a shallow copy is returned. +TfLiteEvalTensor MakeUnpackedInt4Tensor(TfLiteContext* context, + int scratch_buffer_index, + const TfLiteEvalTensor* tensor); } // namespace micro } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/l2_pool_2d.cpp b/src/tensorflow/lite/micro/kernels/l2_pool_2d.cpp new file mode 100644 index 0000000..c5eb70f --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/l2_pool_2d.cpp @@ -0,0 +1,142 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/pooling.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// required rank for input/output tensor shape +constexpr int kTensorShapeRank = 4; + +// input/output tensor shape rank associations +enum { kBatchRank = 0, kHeightRank, kWidthRank, kChannelRank }; + +TfLiteStatus L2Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + auto* params = static_cast(node->builtin_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), kTensorShapeRank); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), kTensorShapeRank); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + int batches = SizeOfDimension(input, kBatchRank); + int height = SizeOfDimension(input, kHeightRank); + int width = SizeOfDimension(input, kWidthRank); + int channels_out = SizeOfDimension(input, kChannelRank); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params->padding; + int out_width, out_height; + + params->computed.padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, 1, 1, height, width, + params->filter_height, params->filter_width, padding, &out_height, + &out_width); + + // We currently don't have a quantized implementation of L2Pool + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); + + // We must update the output tensor dimensions. + // The dims storage is expected to be the same area in memory + // for both TfLiteTensor and TfLiteEvalTensor. This is important + // because TfLiteTensor in the MicroInterpreter is a temporary + // allocation. For the KernelRunner interpreter, TfLiteEvalTensor + // is a temporary allocation. We must therefore relocate the dims + // from the FlatBuffer to the persistent storage arena. + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + output->dims->data[kBatchRank] = batches; + output->dims->data[kHeightRank] = out_height; + output->dims->data[kWidthRank] = out_width; + output->dims->data[kChannelRank] = channels_out; + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + + return kTfLiteOk; +} + +void L2EvalFloat(const TfLitePoolParams& params, const TfLiteEvalTensor& input, + tflite::PoolParams* op_params, TfLiteEvalTensor* output) { + float activation_min, activation_max; + CalculateActivationRange(params.activation, &activation_min, &activation_max); + + op_params->float_activation_min = activation_min; + op_params->float_activation_max = activation_max; + reference_ops::L2Pool(*op_params, tflite::micro::GetTensorShape(&input), + tflite::micro::GetTensorData(&input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = static_cast(node->builtin_data); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + + tflite::PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = params->computed.padding.height; + op_params.padding_values.width = params->computed.padding.width; + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + L2EvalFloat(*params, *input, &op_params, output); + break; + default: + MicroPrintf("L2_POOL_2D only supports float32 currently, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_L2_POOL_2D() { + return tflite::micro::RegisterOp(nullptr, L2Prepare, L2Eval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/l2norm.cpp b/src/tensorflow/lite/micro/kernels/l2norm.cpp index 401741a..bde38de 100644 --- a/src/tensorflow/lite/micro/kernels/l2norm.cpp +++ b/src/tensorflow/lite/micro/kernels/l2norm.cpp @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +14,13 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h" #include "tensorflow/lite/kernels/internal/reference/l2normalization.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace l2norm { namespace { @@ -36,9 +33,7 @@ enum KernelType { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; -} // namespace - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus L2NormPrepare(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); TFLITE_DCHECK(node->builtin_data != nullptr); @@ -49,38 +44,41 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE(context, NumDimensions(input) <= 4); - TF_LITE_ENSURE(context, output->type == kTfLiteFloat32 || - output->type == kTfLiteUInt8 || - output->type == kTfLiteInt8); + TF_LITE_ENSURE(context, + output->type == kTfLiteFloat32 || output->type == kTfLiteInt8); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { + if (output->type == kTfLiteInt8) { data->input_zero_point = input->params.zero_point; } else if (output->type == kTfLiteFloat32) { data->input_zero_point = 0; } - // TODO(ahentz): For some reason our implementations don't support - // activations. + // Our implementations don't currently support activations. TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* L2NormInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(L2NormalizationParams)); } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus L2NormEval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); const L2NormalizationParams& data = *(static_cast(node->user_data)); @@ -110,12 +108,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output), epsilon); - } else if (output->type == kTfLiteUInt8) { - reference_ops::L2Normalization( - data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); } else if (output->type == kTfLiteInt8) { const auto input_shape = tflite::micro::GetTensorShape(input); const auto output_shape = tflite::micro::GetTensorShape(output); @@ -129,29 +121,20 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input), tflite::micro::GetTensorData(output)); } else { - TF_LITE_KERNEL_LOG(context, "Output type is %s, requires float.", - TfLiteTypeGetName(output->type)); + MicroPrintf("Output type is %s, requires float.", + TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; } -} // namespace l2norm - -TfLiteRegistration Register_L2NORM_REF() { - return {/*init=*/l2norm::Init, - /*free=*/nullptr, - /*prepare=*/l2norm::Prepare, - /*invoke=*/l2norm::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +} // namespace + +TFLMRegistration Register_L2NORM_REF() { + return tflite::micro::RegisterOp(L2NormInit, L2NormPrepare, L2NormEval); } -TfLiteRegistration Register_L2_NORMALIZATION() { return Register_L2NORM_REF(); } +TFLMRegistration Register_L2_NORMALIZATION() { return Register_L2NORM_REF(); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/leaky_relu.cpp b/src/tensorflow/lite/micro/kernels/leaky_relu.cpp new file mode 100644 index 0000000..ee86f19 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/leaky_relu.cpp @@ -0,0 +1,95 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/leaky_relu.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/leaky_relu.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +template +void QuantizeLeakyRelu(const LeakyReluOpData& data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + LeakyReluParams op_params = {}; + + op_params.input_offset = data.input_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.output_multiplier_alpha = data.output_multiplier_alpha; + op_params.output_shift_alpha = data.output_shift_alpha; + op_params.output_multiplier_identity = data.output_multiplier_identity; + op_params.output_shift_identity = data.output_shift_identity; + reference_ops::QuantizeLeakyRelu(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(LeakyReluOpData)); +} + +TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const LeakyReluOpData& data = *static_cast(node->user_data); + + switch (input->type) { + case kTfLiteFloat32: { + LeakyReluParams op_params = {}; + const auto* params = + static_cast(node->builtin_data); + + op_params.alpha = params->alpha; + reference_ops::LeakyRelu(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } break; + case kTfLiteInt8: { + QuantizeLeakyRelu(data, input, output); + return kTfLiteOk; + } break; + case kTfLiteInt16: { + QuantizeLeakyRelu(data, input, output); + return kTfLiteOk; + } break; + default: + MicroPrintf("Only float32, int8 are supported by LEAKY_RELU, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteError; +} + +TFLMRegistration Register_LEAKY_RELU() { + return tflite::micro::RegisterOp(LeakyReluInit, LeakyReluPrepare, + LeakyReluEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/leaky_relu.h b/src/tensorflow/lite/micro/kernels/leaky_relu.h new file mode 100644 index 0000000..dfcd6e9 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/leaky_relu.h @@ -0,0 +1,43 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LEAKY_RELU_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LEAKY_RELU_H_ + +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +// Input/output tensor index. +extern const int kInputTensor; +extern const int kOutputTensor; + +struct LeakyReluOpData { + // quantization parameters + int32_t output_multiplier_alpha; + int32_t output_shift_alpha; + int32_t output_multiplier_identity; + int32_t output_shift_identity; + int32_t input_zero_point; + int32_t output_zero_point; +}; + +TfLiteStatus CalculateOpDataLeakyRelu(TfLiteContext* context, TfLiteNode* node); + +TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LEAKY_RELU_H_ diff --git a/src/tensorflow/lite/micro/kernels/leaky_relu_common.cpp b/src/tensorflow/lite/micro/kernels/leaky_relu_common.cpp new file mode 100644 index 0000000..3d1ffeb --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/leaky_relu_common.cpp @@ -0,0 +1,78 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/leaky_relu.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/leaky_relu.h" + +namespace tflite { + +// Input/output tensor index. +const int kInputTensor = 0; +const int kOutputTensor = 0; + +TfLiteStatus CalculateOpDataLeakyRelu(TfLiteContext* context, + TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + LeakyReluOpData* data = static_cast(node->user_data); + const auto* params = + static_cast(node->builtin_data); + + data->input_zero_point = input->params.zero_point; + data->output_zero_point = output->params.zero_point; + + int output_shift_alpha; + double alpha_multiplier = static_cast( + input->params.scale * params->alpha / output->params.scale); + QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, + &output_shift_alpha); + data->output_shift_alpha = static_cast(output_shift_alpha); + + int output_shift_identity; + double identity_multiplier = + static_cast(input->params.scale / output->params.scale); + QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, + &output_shift_identity); + data->output_shift_identity = static_cast(output_shift_identity); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpDataLeakyRelu(context, node); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/log_softmax.cpp b/src/tensorflow/lite/micro/kernels/log_softmax.cpp new file mode 100644 index 0000000..47f5937 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/log_softmax.cpp @@ -0,0 +1,148 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/log_softmax.h" + +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +// used only with quantized data +struct LogSoftmaxOpData { + int32_t input_multiplier; + int32_t input_left_shift; + int32_t reverse_scaling_divisor; + int32_t reverse_scaling_right_shift; + int diff_min; + size_t outer_size; // number of tensor elements skipping computation axis + size_t depth; // number of tensor elements on computation axis +}; + +// input/output tensor index +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + TF_LITE_ENSURE(context, HaveSameShapes(input, output)); + + if (input->type == kTfLiteInt8) { + node->user_data = + context->AllocatePersistentBuffer(context, sizeof(LogSoftmaxOpData)); + auto data = static_cast(node->user_data); + + // quantization datum + constexpr int32_t kOutputZeroPoint = 127; + constexpr float kOutputScale = 16.0 / 256; + constexpr double kBeta = 1.0; + constexpr int kScaledDiffIntegerBits = 5; + + TF_LITE_ENSURE(context, output->params.scale == kOutputScale); + TF_LITE_ENSURE(context, output->params.zero_point == kOutputZeroPoint); + + int input_left_shift; + int reverse_scaling_right_shift; + tflite::PreprocessLogSoftmaxScalingExp( + kBeta, static_cast(input->params.scale), kScaledDiffIntegerBits, + &data->input_multiplier, &input_left_shift, + &data->reverse_scaling_divisor, &reverse_scaling_right_shift); + data->input_left_shift = static_cast(input_left_shift); + data->reverse_scaling_right_shift = + static_cast(-reverse_scaling_right_shift); + // diff_min has a negative value, and is used to limit the maximum magnitude + // of the diffs, which are <= 0. + data->diff_min = + -tflite::CalculateInputRadius(kScaledDiffIntegerBits, input_left_shift); + + RuntimeShape input_shape = GetTensorShape(input); + const int trailing_dim = input_shape.DimensionsCount() - 1; + data->outer_size = + static_cast(FlatSizeSkipDim(input_shape, trailing_dim)); + data->depth = static_cast(input_shape.Dims(trailing_dim)); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { + const LogSoftmaxOpData* data = + static_cast(node->user_data); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + switch (input->type) { + case kTfLiteFloat32: { + SoftmaxParams op_params = {}; + reference_ops::LogSoftmax(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + case kTfLiteInt8: { + SoftmaxParams op_params = {}; + op_params.input_multiplier = data->input_multiplier; + op_params.input_left_shift = data->input_left_shift; + op_params.reverse_scaling_divisor = data->reverse_scaling_divisor; + op_params.reverse_scaling_right_shift = data->reverse_scaling_right_shift; + op_params.diff_min = data->diff_min; + reference_ops::LogSoftmax(op_params, data->outer_size, data->depth, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + default: + MicroPrintf("LOG_SOFTMAX only supports float32, int8, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } +} + +} // namespace + +TFLMRegistration Register_LOG_SOFTMAX() { + return tflite::micro::RegisterOp(nullptr, LogSoftmaxPrepare, LogSoftmaxEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/logical.cpp b/src/tensorflow/lite/micro/kernels/logical.cpp index f4033ba..53e282d 100644 --- a/src/tensorflow/lite/micro/kernels/logical.cpp +++ b/src/tensorflow/lite/micro/kernels/logical.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/lite/micro/kernels/logical.h" + #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" @@ -19,87 +21,24 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/kernel_util.h" namespace tflite { -namespace ops { -namespace micro { -namespace logical { namespace { -// Input/output tensor index. -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, - bool (*func)(bool, bool)) { - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - if (tflite::micro::HaveSameShapes(input1, input2)) { - reference_ops::BinaryFunction( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), func); - } else { - reference_ops::BroadcastBinaryFunction4DSlow( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), func); - } - - return kTfLiteOk; -} - -bool LogicalOr(bool x, bool y) { return x || y; } - TfLiteStatus LogicalOrEval(TfLiteContext* context, TfLiteNode* node) { return LogicalImpl(context, node, LogicalOr); } -bool LogicalAnd(bool x, bool y) { return x && y; } - TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) { return LogicalImpl(context, node, LogicalAnd); } } // namespace -} // namespace logical -TfLiteRegistration Register_LOGICAL_OR() { - // Init, Free, Prepare, Eval are satisfying the Interface required by - // TfLiteRegistration. - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/logical::LogicalOrEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_LOGICAL_OR() { + return tflite::micro::RegisterOp(nullptr, nullptr, LogicalOrEval); } -TfLiteRegistration Register_LOGICAL_AND() { - // Init, Free, Prepare, Eval are satisfying the Interface required by - // TfLiteRegistration. - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/logical::LogicalAndEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_LOGICAL_AND() { + return tflite::micro::RegisterOp(nullptr, nullptr, LogicalAndEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/logical.h b/src/tensorflow/lite/micro/kernels/logical.h new file mode 100644 index 0000000..e70e457 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/logical.h @@ -0,0 +1,35 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LOGICAL_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LOGICAL_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" + +namespace tflite { +// Input/output tensor index. +extern const int kLogicalInputTensor1; +extern const int kLogicalInputTensor2; +extern const int kLogicalOutputTensor; + +TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, + bool (*func)(bool, bool)); + +bool LogicalOr(bool x, bool y); +bool LogicalAnd(bool x, bool y); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LOGICAL_H_ diff --git a/src/tensorflow/lite/micro/kernels/logical_common.cpp b/src/tensorflow/lite/micro/kernels/logical_common.cpp new file mode 100644 index 0000000..2612d3a --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/logical_common.cpp @@ -0,0 +1,63 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/binary_function.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/logical.h" + +namespace tflite { + +// Input/output tensor index. +const int kLogicalInputTensor1 = 0; +const int kLogicalInputTensor2 = 1; +const int kLogicalOutputTensor = 0; + +TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, + bool (*func)(bool, bool)) { + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kLogicalInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kLogicalInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kLogicalOutputTensor); + + if (tflite::micro::HaveSameShapes(input1, input2)) { + reference_ops::BinaryFunction( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), func); + } else { + reference_ops::BroadcastBinaryFunction4DSlow( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), func); + } + + return kTfLiteOk; +} + +bool LogicalOr(bool x, bool y) { return x || y; } + +bool LogicalAnd(bool x, bool y) { return x && y; } + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/logistic.cpp b/src/tensorflow/lite/micro/kernels/logistic.cpp index 3fa81ba..da2b34f 100644 --- a/src/tensorflow/lite/micro/kernels/logistic.cpp +++ b/src/tensorflow/lite/micro/kernels/logistic.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,71 +24,25 @@ limitations under the License. #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/logistic.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace activations { namespace { -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t input_zero_point; - int32_t input_range_radius; - int32_t input_multiplier; - int input_left_shift; -}; - -TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, - OpData* data) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, - std::numeric_limits::min()); - - static constexpr int kInputIntegerBits = 4; - const double input_real_multiplier = - static_cast(input->params.scale) * - static_cast(1 << (31 - kInputIntegerBits)); - - data->input_zero_point = input->params.zero_point; - - const double q = std::frexp(input_real_multiplier, &data->input_left_shift); - data->input_multiplier = static_cast(TfLiteRound(q * (1ll << 31))); - - data->input_range_radius = - CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); - } - return kTfLiteOk; -} -} // namespace void* LogisticInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus LogisticPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - return CalculateArithmeticOpData(context, node, data); + return context->AllocatePersistentBuffer(context, sizeof(OpDataLogistic)); } TfLiteStatus LogisticEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kLogisticInputTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kLogisticOutputTensor); TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); + OpDataLogistic* data = static_cast(node->user_data); if (input->type == kTfLiteFloat32) { switch (output->type) { @@ -100,9 +54,25 @@ TfLiteStatus LogisticEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteInt16) { + switch (output->type) { + case kTfLiteInt16: { + reference_integer_ops::Logistic( + data->input_multiplier, data->input_left_shift, + NumElements(input->dims), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else if (input->type == kTfLiteInt8) { @@ -117,34 +87,25 @@ TfLiteStatus LogisticEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { // TODO(b/141211002): Also support other data types once we have supported // temporary tensors in TFLM. - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; } -} // namespace activations +} // namespace -TfLiteRegistration Register_LOGISTIC() { - return {/*init=*/activations::LogisticInit, - /*free=*/nullptr, - /*prepare=*/activations::LogisticPrepare, - /*invoke=*/activations::LogisticEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_LOGISTIC() { + return tflite::micro::RegisterOp(LogisticInit, LogisticPrepare, LogisticEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/logistic.h b/src/tensorflow/lite/micro/kernels/logistic.h new file mode 100644 index 0000000..1de0cda --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/logistic.h @@ -0,0 +1,42 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LOGISTIC_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LOGISTIC_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" + +namespace tflite { +extern const int kLogisticInputTensor; +extern const int kLogisticOutputTensor; + +struct OpDataLogistic { + int32_t input_zero_point; + int32_t input_range_radius; + int32_t input_multiplier; + int input_left_shift; +}; + +TfLiteStatus CalculateArithmeticOpDataLogistic(TfLiteContext* context, + TfLiteNode* node, + OpDataLogistic* data); + +TfLiteStatus LogisticPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LOGISTIC_H_ diff --git a/src/tensorflow/lite/micro/kernels/logistic_common.cpp b/src/tensorflow/lite/micro/kernels/logistic_common.cpp new file mode 100644 index 0000000..67bb397 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/logistic_common.cpp @@ -0,0 +1,119 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" +#include "tensorflow/lite/kernels/internal/reference/logistic.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/logistic.h" + +namespace tflite { +const int kLogisticInputTensor = 0; +const int kLogisticOutputTensor = 0; + +TfLiteStatus CalculateArithmeticOpDataLogistic(TfLiteContext* context, + TfLiteNode* node, + OpDataLogistic* data) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kLogisticInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kLogisticOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, output->params.zero_point, + std::numeric_limits::min()); + + static constexpr int kInputIntegerBits = 4; + const double input_real_multiplier = + static_cast(input->params.scale) * + static_cast(1 << (31 - kInputIntegerBits)); + + data->input_zero_point = input->params.zero_point; + + const double q = std::frexp(input_real_multiplier, &data->input_left_shift); + data->input_multiplier = static_cast(TfLiteRound(q * (1ll << 31))); + + data->input_range_radius = + CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); + } + + if (input->type == kTfLiteInt16) { + static constexpr int kInputIntegerBits = 3; + static constexpr int kOutputFractionalBits = 15; + + // See comments in TanhPrepare about requiring zero_point==0 + // and a power-of-two ("POT") scale. + + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + + int input_scale_log2_rounded; + bool param_scale_pot = + CheckedLog2(input->params.scale, &input_scale_log2_rounded); + + data->input_left_shift = + (15 - kInputIntegerBits) + input_scale_log2_rounded; + param_scale_pot &= (data->input_left_shift == 0); + + if (param_scale_pot) { + data->input_multiplier = 0; + } else { + // Calculate multiplier to change input scale to 1/(3*4096) + // as required by the table lookup. + // In this scaling +/-2^17 represents +/-10.7 + double multiplier = + static_cast(input->params.scale) * 4096.0 * 3.0; + + data->input_left_shift = 0; + + while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) { + data->input_left_shift++; + multiplier = multiplier * 2.0; + } + + data->input_multiplier = static_cast(multiplier); + } + TFLITE_DCHECK_LE(data->input_multiplier, 32767); + int output_scale_log2_rounded; + TF_LITE_ENSURE( + context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); + TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, + -kOutputFractionalBits); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus LogisticPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + OpDataLogistic* data = static_cast(node->user_data); + + return CalculateArithmeticOpDataLogistic(context, node, data); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/lstm_eval.cpp b/src/tensorflow/lite/micro/kernels/lstm_eval.cpp new file mode 100644 index 0000000..93d6bc7 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/lstm_eval.cpp @@ -0,0 +1,295 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/micro/kernels/lstm_eval.h" + +#include + +#include "tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" +#include "tensorflow/lite/kernels/internal/reference/logistic.h" +#include "tensorflow/lite/kernels/internal/reference/mul.h" +#include "tensorflow/lite/kernels/internal/reference/tanh.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +LstmTensors::LstmTensors(TfLiteContext* context, TfLiteNode* node) { + micro_context_ = GetMicroContext(context); + // 24 internal tensors. see lstm_shared.h for tensor names + for (size_t i = 0; i < 24; i++) { + internal_tensors_[i] = micro_context_->AllocateTempInputTensor(node, i); + } + output_tensor_ = + micro_context_->AllocateTempOutputTensor(node, kLstmOutputTensor); +} + +LstmTensors::~LstmTensors() { + for (size_t i = 0; i < 24; i++) { + if (internal_tensors_[i] != nullptr) { + micro_context_->DeallocateTempTfLiteTensor(internal_tensors_[i]); + } + } + micro_context_->DeallocateTempTfLiteTensor(output_tensor_); +} + +// Verify the LSTM internal tensor properties (e.g., type checks) +// Input/output/states/fc weights tensors are required for kernel evaulation. +// The state tensors should be variables. Variants of the standard LSTM +// are not supported here, therefore their corresponding tensors should be +// invalid +TfLiteStatus LstmTensors::ValidateTensorStatus(TfLiteContext* context) const { + // Verify certain tensor properties + // input tensor + TF_LITE_ENSURE(context, internal_tensors_[kLstmInputTensor] != nullptr); + // hidden state + TF_LITE_ENSURE(context, internal_tensors_[kLstmOutputStateTensor] != nullptr); + TF_LITE_ENSURE(context, + internal_tensors_[kLstmOutputStateTensor]->is_variable); + // hidden state becomes input so they must have the same type + TF_LITE_ENSURE_EQ(context, internal_tensors_[kLstmOutputStateTensor]->type, + internal_tensors_[kLstmInputTensor]->type); + // cell state + TF_LITE_ENSURE(context, internal_tensors_[kLstmCellStateTensor] != nullptr); + TF_LITE_ENSURE(context, internal_tensors_[kLstmCellStateTensor]->is_variable); + // output + TF_LITE_ENSURE(context, output_tensor_ != nullptr); + // output type is the same as the input type (activations) + TF_LITE_ENSURE_EQ(context, output_tensor_->type, + internal_tensors_[kLstmInputTensor]->type); + + // weight tensors (1-9, see lstm_shared for index definition) + const auto weight_type = + internal_tensors_[kLstmInputToForgetWeightsTensor]->type; + for (size_t i = 1; i < 9; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] != nullptr); + TF_LITE_ENSURE_EQ(context, internal_tensors_[i]->type, weight_type); + } + + // bias tensors (12-15, see lstm_shared for index definition) + const auto bias_type = internal_tensors_[kLstmForgetGateBiasTensor]->type; + for (size_t i = 12; i < 16; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] != nullptr); + TF_LITE_ENSURE_EQ(context, internal_tensors_[i]->type, bias_type); + } + // Tensors from LSTM variants are invalid + // No peephole + for (size_t i = 9; i < 12; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr); + } + // No projection + for (size_t i = 16; i < 18; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr); + } + // No internal layer norm + for (size_t i = 20; i < 24; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr); + } + return kTfLiteOk; +} + +namespace lstm_internal { + +const int32_t kInt16Max = std::numeric_limits::max(); +const int32_t kInt16Min = std::numeric_limits::min(); + +void AddElementWise(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int16_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + int32_t sum = input_1[index] + input_2[index]; + const int32_t sum_clamped = std::min(kInt16Max, std::max(kInt16Min, sum)); + output[index] = static_cast(sum_clamped); + } + } +} + +void AddElementWise(const float* input_1, const float* input_2, int n_batch, + int n_input, float* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + output[index] = input_1[index] + input_2[index]; + } + } +} + +void Sigmoid(const RuntimeShape& data_shape, int16_t* data) { + reference_integer_ops::Logistic( + 0 /*data->input_multiplier*/, 0 /*data->input_left_shift */, + data_shape.FlatSize() /*NumElements(input->dims)*/, + data /* tflite::micro::GetTensorData(input) */, + data /*tflite::micro::GetTensorData(output) */); +} + +void Sigmoid(const RuntimeShape& data_shape, float* data) { + reference_ops::Logistic(data_shape, data, data_shape, data); +} + +void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape, + int16_t* input_data, const RuntimeShape& output_data_shape, + int16_t* output_data) { + int32_t tanh_input_left_shift = (15 + cell_state_scale_power) - 3; + int32_t input_multiplier = 0; + if (tanh_input_left_shift < 0) /* handling negative shift value */ + { + tanh_input_left_shift = -tanh_input_left_shift; + input_multiplier = 3; + } + reference_integer_ops::Tanh(input_multiplier, tanh_input_left_shift, + input_data_shape, input_data, output_data_shape, + output_data); +} + +void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape, + float* input_data, const RuntimeShape& output_data_shape, + float* output_data) { + reference_ops::Tanh(input_data_shape, input_data, output_data_shape, + output_data); +} + +// Input and output have the same shape in LSTM +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const int16_t* input1_data, const int16_t* input2_data, + int8_t* output_data) { + return reference_integer_ops::MulElementwise( + shape.FlatSize(), params, input1_data, input2_data, output_data); +} + +// Input and output have the same shape in LSTM +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const int16_t* input1_data, const int16_t* input2_data, + int16_t* output_data) { + return reference_integer_ops::MulElementwise( + shape.FlatSize(), params, input1_data, input2_data, output_data); +} + +// Input and output have the same shape in LSTM +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const float* input1_data, const float* input2_data, + float* output_data) { + return reference_ops::Mul(params, shape, input1_data, shape, input2_data, + shape, output_data); +} + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const int8_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const int32_t* bias_data, + const RuntimeShape& output_shape, int16_t* output_data) { + return tflite::reference_integer_ops::FullyConnected( + params, input_shape, input_data, filter_shape, filter_data, bias_shape, + bias_data, output_shape, output_data); +} + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const int16_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const int64_t* bias_data, + const RuntimeShape& output_shape, int16_t* output_data) { + return tflite::reference_integer_ops::FullyConnected( + params, input_shape, input_data, filter_shape, filter_data, bias_shape, + bias_data, output_shape, output_data); +} + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& filter_shape, const float* filter_data, + const RuntimeShape& bias_shape, const float* bias_data, + const RuntimeShape& output_shape, float* output_data) { + return tflite::reference_ops::FullyConnected( + params, input_shape, input_data, filter_shape, filter_data, bias_shape, + bias_data, output_shape, output_data); +} + +void Clipping(const int v_size, const CellStateInfo& cell_state_info, + int16_t* vector) { + for (int i = 0; i < v_size; i++) { + vector[i] = + std::max(std::min(cell_state_info.quantized_cell_clip, vector[i]), + static_cast(-cell_state_info.quantized_cell_clip)); + } +} + +void Clipping(const int v_size, const CellStateInfo& cell_state_info, + float* vector) { + for (int i = 0; i < v_size; i++) { + vector[i] = std::max(std::min(cell_state_info.cell_clip, vector[i]), + -cell_state_info.cell_clip); + } +} + +// Increment the data offset so the sigle time step invocation call can access +// the corresponding input/output tensor data at the time step +void LstmStepManager::UpdateTime() { + current_time_ += 1; + TFLITE_DCHECK_LE(current_time_, size_info_.time_steps); + // default as one batch per inference + int input_step = size_info_.input_dimension; + int output_step = size_info_.state_dimension; + // time major: batch inference + if (size_info_.time_major) { + input_step = input_step * size_info_.batch_size; + output_step = output_step * size_info_.batch_size; + } + + input_offset_ += input_step; + output_offset_ += output_step; +} + +// Increment the data offset so the sigle time step invocation call can access +// the corresponding hidden/cell state tensor data at the time step (for single +// batch inference only) +void LstmStepManager::UpdateBatch() { + current_batch_ += 1; + TFLITE_DCHECK_LE(current_batch_, size_info_.batch_size); + // batch inference for time major: no action needed + if (size_info_.time_major) { + return; + } + // otherwise: singe batch inference, go to the next batch + hidden_state_offset_ += size_info_.state_dimension; + cell_state_offset_ += size_info_.state_dimension; +} + +// Input shape for each single time LSTM invocation. +// Multi-batch for time_major input +RuntimeShape LstmStepManager::InputShape() const { + int batch_size = 1; + if (size_info_.time_major) { + batch_size = size_info_.batch_size; + } + const int dims[2] = {batch_size, size_info_.input_dimension}; + const int32_t* dims_data = reinterpret_cast(dims); + return RuntimeShape(2, dims_data); +} + +// State shape (both hidden and cell) for each single time LSTM invocation. +// Multi-batch for time_major input +RuntimeShape LstmStepManager::StateShape() const { + int batch_size = 1; + if (size_info_.time_major) { + batch_size = size_info_.batch_size; + } + const int dims[2] = {batch_size, size_info_.state_dimension}; + const int32_t* dims_data = reinterpret_cast(dims); + return RuntimeShape(2, dims_data); +} + +} // namespace lstm_internal +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/lstm_eval.h b/src/tensorflow/lite/micro/kernels/lstm_eval.h new file mode 100644 index 0000000..62bc635 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/lstm_eval.h @@ -0,0 +1,541 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Functions to perform integer evaulation for standard LSTM (e.g., defined in +// the keras lstm layer, no peephole etc.). Currently used by the 16 bits +// activation case only + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_GENERAL_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_GENERAL_H_ +#include +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/lstm_shared.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +// Interface to access all the TempTfLiteTensors of the LSTM kernel during the +// preparation phase. Can only be constructed through the constructor to avoid +// memory leakage. All TempTfLiteTensors will be deallocated through the +// destructor. +class LstmTensors { + public: + LstmTensors(const LstmTensors& other) = delete; + LstmTensors& operator=(const LstmTensors& other) = delete; + + LstmTensors(TfLiteContext* context, TfLiteNode* node); + ~LstmTensors(); + + // Verify the LSTM internal tensor properties (e.g., type checks) + // Input/output/states/fc weights tensors are required for kernel evaulation. + // The state tensors should be variables. Variants of the standard LSTM + // are not supported here, therefore their corresponding tensors should be + // invalid + TfLiteStatus ValidateTensorStatus(TfLiteContext* context) const; + + // Internal tensors. see lstm_shared.h for tensor names + const TfLiteTensor* GetInternalTensor(const int tensor_index) const { + return internal_tensors_[tensor_index]; + } + + const TfLiteTensor* HiddenStateTensor() const { + return internal_tensors_[kLstmOutputStateTensor]; + } + const TfLiteTensor* CellStateTensor() const { + return internal_tensors_[kLstmCellStateTensor]; + } + const TfLiteTensor* OutputTensor() const { return output_tensor_; } + + private: + // see lstm_shared.h for tensor names + MicroContext* micro_context_; + TfLiteTensor* internal_tensors_[24]; + TfLiteTensor* output_tensor_; +}; + +// Deduce the size information (Batch (B), Time Steps (T), Input dimension (I), +// State dimension (S)) that defines the LSTM using the input and hidden state +// tensor +LstmSizeInfo CreateLstmSizeInfo( + const bool time_major, const TfLiteIntArray* input_tensor_shape, + const TfLiteIntArray* hidden_state_tensor_shape); + +TfLiteStatus ValidateWeightTensorSize(TfLiteContext* context, + const TfLiteTensor* tensor, int dim1_size, + int dim2_size); + +TfLiteStatus ValidateBiasTensorSize(TfLiteContext* context, + const TfLiteTensor* tensor, int size); + +// Go through every tensors and make sure their shape match the kernel +// configuration +TfLiteStatus ValidateTensorSize(TfLiteContext* context, + const LstmTensors& tensors, + const LstmSizeInfo& size_info); + +// Wrapper function to create gate parameters for the four internal LSTM gates +TfLiteStatus CreateGateParams( + TfLiteContext* context, + /*Input tensors*/ + const TfLiteTensor* input, const TfLiteTensor* input_weight, + const TfLiteTensor* input_bias, + /*Hidden state tensors*/ + const TfLiteTensor* hidden_state, const TfLiteTensor* hidden_state_weight, + const TfLiteTensor* hidden_state_bias, + /*Scale of the fc output (input to non-linear activation)*/ + const float nonlinear_activation_input_scale, const TfLiteType cell_type, + const tflite::GateParameters& gate_params); + +// Create parameters for element wise multiplication that happens in a) cell +// state update ; b) hidden state update +// Note that all the output of gates are symmetrically quantized so only scales +// are required for input. However, during the hidden state update phase, the +// output is the updated hidden state, which is asymmetrically quantized. Thus +// output may require zero point +tflite::ArithmeticParams CreateInterGateMulParams(const float input1_scale, + const float input2_scale, + const float output_scale, + const TfLiteType output_type, + const int output_zp = 0); + +// Create the additional information about the cell state, which include: +// cell_state_scale_power: used in integer nonlinear function (e.g., tanh) +// quantized_cell_clip: quantized cell clip range +CellStateInfo CreateLstmCellStateInfo(const float cell_state_scale, + const float cell_clip); + +CellStateInfo CreateLstmCellStateInfoFloat(const float cell_clip); +tflite::FullyConnectedParams CreateFCParamsFloat(); + +tflite::GateParameters CreateGateParamsFloat(); + +tflite::ArithmeticParams CreateInterGateMulParamsFloat(); + +TfLiteStatus PrepareGateParametersFloat(TfLiteContext* context, + const LstmTensors& lstm_tensors, + OpDataLSTM* op_data_lstm); + +TfLiteStatus PrepareGateParametersInteger(TfLiteContext* context, + const LstmTensors& lstm_tensors, + OpDataLSTM* op_data_lstm); + +LSTMKernelContents CreateLSTMKernelContent(TfLiteContext* context, + TfLiteNode* node); + +template +LSTMBuffers CreateLSTMBuffers(TfLiteContext* context, + const int* buffer_indices) { + LSTMBuffers buffers; + buffers.buffer0 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[0])); + buffers.buffer1 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[1])); + buffers.buffer2 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[2])); + buffers.buffer3 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[3])); + return buffers; +} + +// Since LSTM includes multiple intermediate stages, introducing the internal +// namespace to expose them for testing +namespace lstm_internal { + +void Sigmoid(const RuntimeShape& data_shape, int16_t* data); + +void Sigmoid(const RuntimeShape& data_shape, float* data); + +void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape, + int16_t* input_data, const RuntimeShape& output_data_shape, + int16_t* output_data); + +void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape, + float* input_data, const RuntimeShape& output_data_shape, + float* output_data); + +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const int16_t* input1_data, const int16_t* input2_data, + int8_t* output_data); + +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const int16_t* input1_data, const int16_t* input2_data, + int16_t* output_data); + +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const float* input1_data, const float* input2_data, + float* output_data); + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const int8_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const int32_t* bias_data, + const RuntimeShape& output_shape, int16_t* output_data); + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const int16_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const int64_t* bias_data, + const RuntimeShape& output_shape, int16_t* output_data); + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& filter_shape, const float* filter_data, + const RuntimeShape& bias_shape, const float* bias_data, + const RuntimeShape& output_shape, float* output_data); + +void AddElementWise(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int16_t* output); + +void AddElementWise(const float* input_1, const float* input_2, int n_batch, + int n_input, float* output); + +void Clipping(const int v_size, const CellStateInfo& cell_state_info, + int16_t* vector); + +void Clipping(const int v_size, const CellStateInfo& cell_state_info, + float* vector); + +// Manages the slice position (offset), slice length (sliced tensor shape), +// and update rules for input/output/hidden state/cell state tensors at each +// time step. +class LstmStepManager { + public: + LstmStepManager() = delete; + // Does not take any ownership, and all pointers must refer to valid objects + // that outlive the one constructed. + explicit LstmStepManager(const LstmSizeInfo* size_info) + : size_info_(*size_info) {} + + void UpdateTime(); + void UpdateBatch(); + + void ResetTime() { current_time_ = 0; } + RuntimeShape InputShape() const; + RuntimeShape StateShape() const; + + int InputOffset() const { return input_offset_; } + int OutputOffset() const { return output_offset_; } + int HiddenStateOffset() const { return hidden_state_offset_; } + int CellStateOffset() const { return cell_state_offset_; } + + private: + int current_time_ = 0; + int current_batch_ = 0; + int input_offset_ = 0; + int output_offset_ = 0; + int hidden_state_offset_ = 0; + int cell_state_offset_ = 0; + // Sizeinfo is from LstmOpData, which reside in the memory arena + // (guarante to outlast LSTMStepManager, which reside in stack) + const LstmSizeInfo& size_info_; +}; + +// Calculates a single LSTM gate. +// Implements the following formula: +// gate = activate(FC(input) + FC(recurrent)) +// Activation is sigmoid except for the "cell" gate (configurable, usually tanh) +template +void CalculateLstmGate( + const LstmStepManager& step_info, const GateParameters& gate_params, + // Input FC + const TfLiteEvalTensor* input, const TfLiteEvalTensor* input_weight, + const TfLiteEvalTensor* input_bias, + // Recurrent FC + const TfLiteEvalTensor* recurrent, const TfLiteEvalTensor* recurrent_weight, + const TfLiteEvalTensor* recurrent_bias, + // Output + CellType* gate_output, + // Scratch arrays + CellType* fc_output_buffer, const TfLiteFusedActivation activation) { + const auto gate_output_shape = step_info.StateShape(); + // Check offset validity to avoid memory overflow + TFLITE_DCHECK_LE(step_info.InputOffset() + step_info.InputShape().FlatSize(), + tflite::micro::GetTensorShape(input).FlatSize()); + TFLITE_DCHECK_LE( + step_info.HiddenStateOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(recurrent).FlatSize()); + + // Input FC + FullyConnected(gate_params.input_fc_params, step_info.InputShape(), + tflite::micro::GetTensorData(input) + + step_info.InputOffset(), + micro::GetTensorShape(input_weight), + tflite::micro::GetTensorData(input_weight), + tflite::micro::GetTensorShape(input_bias), + tflite::micro::GetOptionalTensorData(input_bias), + gate_output_shape, gate_output); + + // Recurrent FC + FullyConnected(gate_params.recurrent_fc_params, step_info.StateShape(), + tflite::micro::GetTensorData(recurrent) + + step_info.HiddenStateOffset(), + tflite::micro::GetTensorShape(recurrent_weight), + tflite::micro::GetTensorData(recurrent_weight), + tflite::micro::GetTensorShape(recurrent_bias), + tflite::micro::GetOptionalTensorData(recurrent_bias), + gate_output_shape, fc_output_buffer); + + AddElementWise(gate_output, fc_output_buffer, + /*n_batch=*/gate_output_shape.DimsData()[0], + /*n_state=*/gate_output_shape.DimsData()[1], gate_output); + // Apply activation + switch (activation) { + case kTfLiteActSigmoid: + Sigmoid(gate_output_shape, gate_output); + break; + case kTfLiteActTanh: { + // Set the scale power to -12 to avoid shift + Tanh(/*cell_state_scale_power=*/-12, gate_output_shape, gate_output, + gate_output_shape, gate_output); + } break; + default: + // Only Sigmoid or Tanh is used. + TFLITE_ASSERT_FALSE; + } +} + +// Update the cell state using the output from the forget gate, input gate, and +// cell gate Formula: updated_cell_state = forget_gate_output*cell_state + +// input_gate_output * cell_gate_output, where * denotes element wise +// multiplication +template +void UpdateLstmCell(const LstmStepManager& step_info, + TfLiteEvalTensor* cell_state, + // Gate outputs + CellType* forget_gate_output, + const CellType* input_gate_output, + const CellType* cell_gate_output, + // Mul parameters + const ArithmeticParams& forget_cell_mul_params, + const ArithmeticParams& input_mul_params, + const CellStateInfo& cell_state_info, CellType* buffer) { + // Check offset validity to avoid memory overflow + TFLITE_DCHECK_LE( + step_info.CellStateOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(cell_state).FlatSize()); + + auto cell_state_shape = step_info.StateShape(); + // Forget Gate x Cell State + Mul(cell_state_shape, forget_cell_mul_params, forget_gate_output, + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset(), + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset()); + // Input Gate x Cell Gate + Mul(cell_state_shape, input_mul_params, input_gate_output, cell_gate_output, + buffer); + + // Update the cell state + AddElementWise(tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset(), + buffer, + /*n_batch=*/cell_state_shape.DimsData()[0], + /*n_state=*/cell_state_shape.DimsData()[1], + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset()); + + if (cell_state_info.cell_clip > 0) { + Clipping(cell_state_shape.FlatSize(), cell_state_info, + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset()); + } +} + +// Update the hidden state of the LSTM kernel using the following formula: +// updated_hidden_state = Tanh(updated_cell_state) * output_gate_output, * means +// element wise multiplication +template +void UpdateLstmHidden(const LstmStepManager& step_info, + TfLiteEvalTensor* cell_state, + TfLiteEvalTensor* hidden_state, + const CellType* output_gate_output, + const ArithmeticParams& mul_params, + int32_t cell_state_scale_power, CellType* buffer) { + // Check offset validity to avoid memory overflow + TFLITE_DCHECK_LE( + step_info.CellStateOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(cell_state).FlatSize()); + TFLITE_DCHECK_LE( + step_info.HiddenStateOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(hidden_state).FlatSize()); + + auto cell_state_shape = step_info.StateShape(); + CellType* cell_state_data = + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset(); + // Tanh(cell_state) + Tanh(cell_state_scale_power, cell_state_shape, cell_state_data, + cell_state_shape, buffer); + // Update the hidden state + Mul(cell_state_shape, mul_params, buffer, output_gate_output, + tflite::micro::GetTensorData(hidden_state) + + step_info.HiddenStateOffset()); +} + +template +void LstmStep(const LstmStepManager& step_info, const OpDataLSTM& op_data, + LSTMKernelContents& kernel_content, + const LSTMBuffers& buffers) { + /*Step1: Calculate gate outputs to prepare cell state update*/ + CellType* gate_internal_buffer = buffers.buffer3; + CellType* forget_gate_output = buffers.buffer0; + CalculateLstmGate( + step_info, op_data.forget_gate_parameters, + // Input FC + kernel_content.GetInternalTensor(tflite::kLstmInputTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputToForgetWeightsTensor), + kernel_content.GetInternalTensor(tflite::kLstmForgetGateBiasTensor), + // Recurrent FC + kernel_content.HiddenStateTensor(), + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToForgetWeightsTensor), + /*recurrent_bias*/ nullptr, + // Output + forget_gate_output, + // Scratch arrays + gate_internal_buffer, kTfLiteActSigmoid); + + // Input Gate calculation; + CellType* input_gate_output = buffers.buffer1; + CalculateLstmGate( + step_info, op_data.input_gate_parameters, + // Input FC + kernel_content.GetInternalTensor(tflite::kLstmInputTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputToInputWeightsTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputGateBiasTensor), + // Recurrent FC + kernel_content.HiddenStateTensor(), + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToInputWeightsTensor), + /*recurrent_bias*/ nullptr, + // Output + input_gate_output, + // Scratch arrays + gate_internal_buffer, kTfLiteActSigmoid); + + // Cell Gate calculation + CellType* cell_gate_output = buffers.buffer2; + CalculateLstmGate( + step_info, op_data.cell_gate_parameters, + // Input FC + kernel_content.GetInternalTensor(tflite::kLstmInputTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputToCellWeightsTensor), + kernel_content.GetInternalTensor(tflite::kLstmCellGateBiasTensor), + // Recurrent FC + kernel_content.HiddenStateTensor(), + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToCellWeightsTensor), + /*recurrent_bias*/ nullptr, + // Output + cell_gate_output, + // Scratch arrays + gate_internal_buffer, op_data.cell_gate_nonlinear_type); + + /*Step2: update the cell state */ + const InterGateParameters& inter_gate_params = op_data.inter_gate_parameters; + CellType* updated_input_buffer = buffers.buffer1; // reuse buffer + + UpdateLstmCell(step_info, kernel_content.CellStateTensor(), + forget_gate_output, input_gate_output, + cell_gate_output, + inter_gate_params.forget_cell_mul_params, + inter_gate_params.input_mul_params, + op_data.cell_state_info, updated_input_buffer); + + /*Step3: update the hidden state */ + CellType* output_gate_output = buffers.buffer1; // reuse buffer + CalculateLstmGate( + step_info, op_data.output_gate_parameters, + // Input FC + kernel_content.GetInternalTensor(tflite::kLstmInputTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputToOutputWeightsTensor), + kernel_content.GetInternalTensor(tflite::kLstmOutputGateBiasTensor), + // Recurrent FC + kernel_content.HiddenStateTensor(), + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToOutputWeightsTensor), + /*recurrent_bias*/ nullptr, + // Output + output_gate_output, + // Scratch arrays + gate_internal_buffer, kTfLiteActSigmoid); + + CellType* tanh_activated_cell_buffer = buffers.buffer0; // reuse buffer + tflite::lstm_internal::UpdateLstmHidden( + step_info, kernel_content.CellStateTensor(), + kernel_content.HiddenStateTensor(), output_gate_output, + inter_gate_params.output_mul_params, + op_data.cell_state_info.cell_state_scale_power, + tanh_activated_cell_buffer); + + /*Step4: copy the update the hidden state to output*/ + // Check offset validity to avoid memory overflow + TFLITE_DCHECK_LE( + step_info.OutputOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(kernel_content.output_tensor).FlatSize()); + // record the output (from the updated hidden state) + ActivationType* output_ptr = tflite::micro::GetTensorData( + kernel_content.output_tensor); + const auto* hidden_state = kernel_content.HiddenStateTensor(); + std::memcpy(output_ptr + step_info.OutputOffset(), + tflite::micro::GetTensorData(hidden_state) + + step_info.HiddenStateOffset(), + step_info.StateShape().FlatSize() * sizeof(ActivationType)); +} + +} // namespace lstm_internal + +// Evaulate the LSTM kernel with (potential) multi-steps and multi-batch input +// Since +template +TfLiteStatus EvalLstm(const OpDataLSTM& op_data, + LSTMKernelContents& kernel_content, + const LSTMBuffers& buffers) { + lstm_internal::LstmStepManager step_info(&op_data.size_info); + const auto& size_info = op_data.size_info; + // time is the first dimention, enable batch computation + if (size_info.time_major) { + for (int t = 0; t < size_info.time_steps; t++) { + lstm_internal::LstmStep( + step_info, op_data, kernel_content, buffers); + // prepare for the next time step + step_info.UpdateTime(); + } + } else { + // batch first, unable to size the input data. single batch inference + for (int b = 0; b < size_info.batch_size; b++) { + for (int t = 0; t < size_info.time_steps; t++) { + lstm_internal::LstmStep( + step_info, op_data, kernel_content, buffers); + // prepare for the next time step + step_info.UpdateTime(); + } + // prepare for the next batch + step_info.UpdateBatch(); + step_info.ResetTime(); + } + } + return kTfLiteOk; +} +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_16ACT_H_ diff --git a/src/tensorflow/lite/micro/kernels/lstm_eval_common.cpp b/src/tensorflow/lite/micro/kernels/lstm_eval_common.cpp new file mode 100644 index 0000000..9631b4c --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/lstm_eval_common.cpp @@ -0,0 +1,326 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/fully_connected.h" +#include "tensorflow/lite/micro/kernels/lstm_eval.h" + +namespace tflite { + +// Deduce the size information (Batch (B), Time Steps (T), Input dimension (I), +// State dimension (S)) that defines the LSTM using the input and hidden state +// tensor +LstmSizeInfo CreateLstmSizeInfo( + const bool time_major, const TfLiteIntArray* input_tensor_shape, + const TfLiteIntArray* hidden_state_tensor_shape) { + LstmSizeInfo size_info; + size_info.time_major = time_major; + size_info.batch_size = + time_major ? input_tensor_shape->data[1] : input_tensor_shape->data[0]; + size_info.time_steps = + time_major ? input_tensor_shape->data[0] : input_tensor_shape->data[1]; + size_info.input_dimension = input_tensor_shape->data[2]; + size_info.state_dimension = hidden_state_tensor_shape->data[1]; + return size_info; +} + +TfLiteStatus ValidateWeightTensorSize(TfLiteContext* context, + const TfLiteTensor* tensor, int dim1_size, + int dim2_size) { + TF_LITE_ENSURE_EQ(context, tensor->dims->size, 2); + TF_LITE_ENSURE_EQ(context, tensor->dims->data[0], dim1_size); + TF_LITE_ENSURE_EQ(context, tensor->dims->data[1], dim2_size); + return kTfLiteOk; +} + +TfLiteStatus ValidateBiasTensorSize(TfLiteContext* context, + const TfLiteTensor* tensor, int size) { + TF_LITE_ENSURE_EQ(context, tensor->dims->size, 1); + TF_LITE_ENSURE_EQ(context, tensor->dims->data[0], size); + return kTfLiteOk; +} + +// Go through every tensors and make sure their shape match the kernel +// configuration +TfLiteStatus ValidateTensorSize(TfLiteContext* context, + const LstmTensors& tensors, + const LstmSizeInfo& size_info) { + // Input FC weights + for (size_t i = 1; i < 5; i++) { + TF_LITE_ENSURE_OK( + context, ValidateWeightTensorSize(context, tensors.GetInternalTensor(i), + size_info.state_dimension, + size_info.input_dimension)); + } + // Recurrent FC weights + for (size_t i = 5; i < 9; i++) { + TF_LITE_ENSURE_OK( + context, ValidateWeightTensorSize(context, tensors.GetInternalTensor(i), + size_info.state_dimension, + size_info.state_dimension)); + } + // Biases + for (size_t i = 12; i < 16; i++) { + TF_LITE_ENSURE_OK( + context, ValidateBiasTensorSize(context, tensors.GetInternalTensor(i), + size_info.state_dimension)); + } + + // Check the shape of input state tensors. + // These tensor may be 1D or 2D. It's fine as long as the total size is + // correct. + TF_LITE_ENSURE_EQ(context, NumElements(tensors.HiddenStateTensor()), + size_info.batch_size * size_info.state_dimension); + TF_LITE_ENSURE_EQ(context, NumElements(tensors.CellStateTensor()), + size_info.batch_size * size_info.state_dimension); + + // Check the shape of output tensor against that of input tensor + TF_LITE_ENSURE_EQ(context, tensors.OutputTensor()->dims->size, 3); + TF_LITE_ENSURE_EQ(context, + tensors.GetInternalTensor(kLstmInputTensor)->dims->data[0], + tensors.OutputTensor()->dims->data[0]); + TF_LITE_ENSURE_EQ(context, + tensors.GetInternalTensor(kLstmInputTensor)->dims->data[1], + tensors.OutputTensor()->dims->data[1]); + TF_LITE_ENSURE_EQ(context, tensors.OutputTensor()->dims->data[2], + size_info.state_dimension); + return kTfLiteOk; +} + +// Wrapper function to create gate parameters for the four internal LSTM gates +TfLiteStatus CreateGateParams( + TfLiteContext* context, + /*Input tensors*/ + const TfLiteTensor* input, const TfLiteTensor* input_weight, + const TfLiteTensor* input_bias, + /*Hidden state tensors*/ + const TfLiteTensor* hidden_state, const TfLiteTensor* hidden_state_weight, + const TfLiteTensor* hidden_state_bias, + /*Scale of the fc output (input to non-linear activation)*/ + const float nonlinear_activation_input_scale, const TfLiteType cell_type, + tflite::GateParameters& gate_params) { + // A temp tflite tensor to represent the output of fc operation. Only the data + // type and quantization parameters are set since it is only used for + // parameter calculations + TfLiteTensor fc_output_temp; + fc_output_temp.type = cell_type; + fc_output_temp.params.scale = nonlinear_activation_input_scale; + fc_output_temp.params.zero_point = 0; // symmetrical quantized + + // A temp fc opdata to reuse the helper function on creating fc parameters + tflite::OpDataFullyConnected fc_data_temp; + // TODO(b/265853320): due to the lack of precision for the float scale, + // scale_diff / output_scale <= 0.02 (potentially requires 1e-8 precision) can + // not be satisified for the bias. Here we rely on the correctiveness of the + // conversion process (set input_bias=nullptr to avoid checking) for + // tensor scales + TF_LITE_ENSURE_STATUS(CalculateOpDataFullyConnected( + context, kTfLiteActNone, input->type, input, input_weight, + /*input_bias=*/nullptr, &fc_output_temp, &fc_data_temp)); + gate_params.input_fc_params = FullyConnectedParamsQuantized(fc_data_temp); + double real_multiplier = 0.0; + GetQuantizedConvolutionMultipler(context, input, input_weight, nullptr, + &fc_output_temp, &real_multiplier); + + TF_LITE_ENSURE_STATUS(CalculateOpDataFullyConnected( + context, kTfLiteActNone, hidden_state->type, hidden_state, + hidden_state_weight, hidden_state_bias, &fc_output_temp, &fc_data_temp)); + gate_params.recurrent_fc_params = FullyConnectedParamsQuantized(fc_data_temp); + return kTfLiteOk; +} + +// Create parameters for element wise multiplication that happens in a) cell +// state update ; b) hidden state update +// Note that all the output of gates are symmetrically quantized so only scales +// are required for input. However, during the hidden state update phase, the +// output is the updated hidden state, which is asymmetrically quantized. Thus +// output may require zero point +tflite::ArithmeticParams CreateInterGateMulParams(const float input1_scale, + const float input2_scale, + const float output_scale, + const TfLiteType output_type, + const int output_zp) { + tflite::ArithmeticParams op_params = {}; + if (output_type == kTfLiteInt16) { + op_params.quantized_activation_min = std::numeric_limits::min(); + op_params.quantized_activation_max = std::numeric_limits::max(); + } else if (output_type == kTfLiteInt8) { + op_params.quantized_activation_min = std::numeric_limits::min(); + op_params.quantized_activation_max = std::numeric_limits::max(); + } + + op_params.input1_offset = 0; // symmetric + op_params.input2_offset = 0; // symmetric + op_params.output_offset = output_zp; + + const double input_product_scale = + static_cast(input1_scale) * static_cast(input2_scale); + double effective_scale = + input_product_scale / static_cast(output_scale); + + QuantizeMultiplier(effective_scale, &op_params.output_multiplier, + &op_params.output_shift); + return op_params; +} + +// Create the additional information about the cell state, which include: +// cell_state_scale_power: used in integer nonlinear function (e.g., tanh) +// quantized_cell_clip: quantized cell clip range +CellStateInfo CreateLstmCellStateInfo(const float cell_state_scale, + const float cell_clip) { + CellStateInfo cell_state_info; + // cell_state_scale_power: 2^-cell_state_scale_power = cell state scale + int buffer; + tflite::CheckedLog2(cell_state_scale, &buffer); + cell_state_info.cell_state_scale_power = buffer; + // Cell state specifics + cell_state_info.cell_clip = cell_clip; + cell_state_info.quantized_cell_clip = static_cast( + std::min(std::max(static_cast(cell_clip) / + static_cast(cell_state_scale), + -32768.0), + 32767.0)); + + return cell_state_info; +} + +CellStateInfo CreateLstmCellStateInfoFloat(const float cell_clip) { + CellStateInfo cell_state_info; + cell_state_info.cell_clip = cell_clip; + cell_state_info.cell_state_scale_power = 0; // no quantization + cell_state_info.quantized_cell_clip = 0; // no quantization + return cell_state_info; +} + +tflite::FullyConnectedParams CreateFCParamsFloat() { + FullyConnectedParams op_params; + CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +tflite::GateParameters CreateGateParamsFloat() { + tflite::GateParameters gate_params = {}; + gate_params.input_fc_params = CreateFCParamsFloat(); + gate_params.recurrent_fc_params = CreateFCParamsFloat(); + return gate_params; +} + +tflite::ArithmeticParams CreateInterGateMulParamsFloat() { + tflite::ArithmeticParams op_params = {}; + CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +TfLiteStatus PrepareGateParametersFloat(TfLiteContext* context, + const LstmTensors& lstm_tensors, + OpDataLSTM* op_data_lstm) { + // Gate Parameters + op_data_lstm->forget_gate_parameters = CreateGateParamsFloat(); + op_data_lstm->input_gate_parameters = CreateGateParamsFloat(); + op_data_lstm->cell_gate_parameters = CreateGateParamsFloat(); + op_data_lstm->output_gate_parameters = CreateGateParamsFloat(); + // Inter gate multiplication parameters + op_data_lstm->inter_gate_parameters.forget_cell_mul_params = + CreateInterGateMulParamsFloat(); + op_data_lstm->inter_gate_parameters.input_mul_params = + CreateInterGateMulParamsFloat(); + op_data_lstm->inter_gate_parameters.output_mul_params = + CreateInterGateMulParamsFloat(); + return kTfLiteOk; +} + +TfLiteStatus PrepareGateParametersInteger(TfLiteContext* context, + const LstmTensors& lstm_tensors, + OpDataLSTM* op_data_lstm) { + float nonlinear_input_scale = 0.000244140625; // 2^-12 Q3.12 -> Q0.15 + TF_LITE_ENSURE_OK( + context, + CreateGateParams( + context, lstm_tensors.GetInternalTensor(kLstmInputTensor), + lstm_tensors.GetInternalTensor(kLstmInputToForgetWeightsTensor), + lstm_tensors.GetInternalTensor(kLstmForgetGateBiasTensor), + lstm_tensors.GetInternalTensor(kLstmOutputStateTensor), + lstm_tensors.GetInternalTensor(kLstmRecurrentToForgetWeightsTensor), + /*hidden_state_bias=*/nullptr, nonlinear_input_scale, kTfLiteInt16, + op_data_lstm->forget_gate_parameters)); + TF_LITE_ENSURE_OK( + context, + CreateGateParams( + context, lstm_tensors.GetInternalTensor(kLstmInputTensor), + lstm_tensors.GetInternalTensor(kLstmInputToInputWeightsTensor), + lstm_tensors.GetInternalTensor(kLstmInputGateBiasTensor), + lstm_tensors.GetInternalTensor(kLstmOutputStateTensor), + lstm_tensors.GetInternalTensor(kLstmRecurrentToInputWeightsTensor), + /*hidden_state_bias=*/nullptr, nonlinear_input_scale, kTfLiteInt16, + op_data_lstm->input_gate_parameters)); + TF_LITE_ENSURE_OK( + context, + CreateGateParams( + context, lstm_tensors.GetInternalTensor(kLstmInputTensor), + lstm_tensors.GetInternalTensor(kLstmInputToCellWeightsTensor), + lstm_tensors.GetInternalTensor(kLstmCellGateBiasTensor), + lstm_tensors.GetInternalTensor(kLstmOutputStateTensor), + lstm_tensors.GetInternalTensor(kLstmRecurrentToCellWeightsTensor), + /*hidden_state_bias=*/nullptr, nonlinear_input_scale, kTfLiteInt16, + op_data_lstm->cell_gate_parameters)); + TF_LITE_ENSURE_OK( + context, + CreateGateParams( + context, lstm_tensors.GetInternalTensor(kLstmInputTensor), + lstm_tensors.GetInternalTensor(kLstmInputToOutputWeightsTensor), + lstm_tensors.GetInternalTensor(kLstmOutputGateBiasTensor), + lstm_tensors.GetInternalTensor(kLstmOutputStateTensor), + lstm_tensors.GetInternalTensor(kLstmRecurrentToOutputWeightsTensor), + /*hidden_state_bias=*/nullptr, nonlinear_input_scale, kTfLiteInt16, + op_data_lstm->output_gate_parameters)); + + // Inter gate multiplication parameters + float nonlinear_output_scale = 0.000030517578125; // 2^-15 Q3.12 -> Q0.15 + float cell_state_scale = lstm_tensors.CellStateTensor()->params.scale; + // forget gate output (nonlinear output) x cell state -> cell state + op_data_lstm->inter_gate_parameters.forget_cell_mul_params = + CreateInterGateMulParams(nonlinear_output_scale, cell_state_scale, + cell_state_scale, kTfLiteInt16); + // input gate output x cell gate output -> cell state + op_data_lstm->inter_gate_parameters.input_mul_params = + CreateInterGateMulParams(nonlinear_output_scale, nonlinear_output_scale, + cell_state_scale, kTfLiteInt16); + // tanh output x output gate output -> hidden state (potentially asymmetric) + op_data_lstm->inter_gate_parameters.output_mul_params = + CreateInterGateMulParams( + nonlinear_output_scale, nonlinear_output_scale, + lstm_tensors.HiddenStateTensor()->params.scale, + lstm_tensors.HiddenStateTensor()->type, + lstm_tensors.HiddenStateTensor()->params.zero_point); + return kTfLiteOk; +} + +LSTMKernelContents CreateLSTMKernelContent(TfLiteContext* context, + TfLiteNode* node) { + LSTMKernelContents kernel_content; + // Point to correct tensors + for (size_t i = 0; i < 24; i++) { + kernel_content.internal_tensors[i] = + tflite::micro::GetMutableEvalInput(context, node, i); + } + // Output tensor + kernel_content.output_tensor = tflite::micro::GetEvalOutput(context, node, 0); + return kernel_content; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/lstm_eval_test.h b/src/tensorflow/lite/micro/kernels/lstm_eval_test.h new file mode 100644 index 0000000..aee12cf --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/lstm_eval_test.h @@ -0,0 +1,817 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_TEST_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_TEST_H_ + +#include +#include + +#include "tensorflow/lite/micro/kernels/lstm_eval.h" +#include "tensorflow/lite/micro/kernels/testdata/lstm_test_data.h" +#include "tensorflow/lite/micro/test_helpers.h" +#include "tensorflow/lite/micro/testing/micro_test.h" + +namespace tflite { +namespace testing { + +/*Helper Functions (mainly about mimicking the kernel preparation)*/ + +// Create fully connected parameters using quantization settings of input and +// weight tensors. +// Since TfLiteContext is not available during the kernel test, here we mimic +// (put into stack memory) CalculateOpDataFullyConnected in +// tensorflow/lite/micro/kernels/fully_connected_common.cc +template +tflite::FullyConnectedParams CreateFCParams( + const TensorQuantizationParameters& input_quant_params, + const TensorQuantizationParameters& weight_quant_params, + const float nonlinear_activation_input_scale) { + OpDataFullyConnected data; + const double input_product_scale = + input_quant_params.scale * weight_quant_params.scale; + double effective_scale = + input_product_scale / + static_cast(nonlinear_activation_input_scale); + + QuantizeMultiplier(effective_scale, &data.output_multiplier, + &data.output_shift); + + data.input_zero_point = input_quant_params.zero_point; + + data.filter_zero_point = 0; // symmetrically quantized + data.output_zero_point = 0; // symmetrically quantized + + data.output_activation_min = std::numeric_limits::min(); + data.output_activation_max = std::numeric_limits::max(); + + return tflite::FullyConnectedParamsQuantized(data); +} + +inline tflite::FullyConnectedParams CreateFCParamsFloat() { + FullyConnectedParams op_params; + CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +// Wrapper function to create gate parameters for the four internal LSTM gates +template +tflite::GateParameters CreateGateParams( + const TensorQuantizationParameters& input_quant_params, + const TensorQuantizationParameters& hidden_state_quant_params, + const GateQuantizationParameters& gate_quantization_settings, + const float nonlinear_activation_input_scale) { + tflite::GateParameters gate_params = {}; + gate_params.input_fc_params = CreateFCParams( + input_quant_params, gate_quantization_settings.activation_weight, + nonlinear_activation_input_scale); + gate_params.recurrent_fc_params = CreateFCParams( + hidden_state_quant_params, gate_quantization_settings.recurrent_weight, + nonlinear_activation_input_scale); + return gate_params; +} + +inline tflite::GateParameters CreateGateParamsFloat() { + tflite::GateParameters gate_params = {}; + gate_params.input_fc_params = CreateFCParamsFloat(); + gate_params.recurrent_fc_params = CreateFCParamsFloat(); + return gate_params; +} +// Create parameters for element wise multiplication that happens in a) cell +// state update ; b) hidden state update +// Note that all the output of gates are symmetrically quantized so only scales +// are required for input. However, during the hidden state update phase, the +// output is the updated hidden state, which is asymmetrically quantized. Thus +// output may require zero point +template +tflite::ArithmeticParams CreateInterGateMulParams(const float input1_scale, + const float input2_scale, + const float output_scale, + const int output_zp = 0) { + tflite::ArithmeticParams op_params = {}; + op_params.quantized_activation_min = std::numeric_limits::min(); + op_params.quantized_activation_max = std::numeric_limits::max(); + op_params.input1_offset = 0; + op_params.input2_offset = 0; + op_params.output_offset = output_zp; + + const double input_product_scale = + static_cast(input1_scale) * static_cast(input2_scale); + double effective_scale = + input_product_scale / static_cast(output_scale); + + QuantizeMultiplier(effective_scale, &op_params.output_multiplier, + &op_params.output_shift); + return op_params; +} + +inline tflite::ArithmeticParams CreateInterGateMulParamsFloat() { + tflite::ArithmeticParams op_params = {}; + CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +// Create the additional information about the cell state, which include: +// cell_state_scale_power: used in integer nonlinear function (e.g., tanh) +// quantized_cell_clip: quantized cell clip range +CellStateInfo CreateLstmCellStateInfo(const float cell_state_scale, + const float cell_clip) { + CellStateInfo cell_state_info; + // cell_state_scale_power: 2^-cell_state_scale_power = cell state scale + int buffer; + tflite::CheckedLog2(cell_state_scale, &buffer); + cell_state_info.cell_state_scale_power = buffer; + // Cell state specifics + cell_state_info.cell_clip = cell_clip; + cell_state_info.quantized_cell_clip = static_cast( + std::min(std::max(static_cast(cell_clip) / + static_cast(cell_state_scale), + -32768.0), + 32767.0)); + return cell_state_info; +} + +// Create LSTMKernelContents from LstmNodeContent by copying TfLiteEvalTensor +// pointers +template +LSTMKernelContents CreateLSTMKernelContent( + LstmNodeContent& + node_contents) { + LSTMKernelContents kernel_content; + // Point to correct tensors + kernel_content.internal_tensors[kLstmInputTensor] = + node_contents.GetEvalTensor(kLstmInputTensor); + kernel_content.internal_tensors[kLstmInputToInputWeightsTensor] = + node_contents.GetEvalTensor(kLstmInputToInputWeightsTensor); + kernel_content.internal_tensors[kLstmInputToForgetWeightsTensor] = + node_contents.GetEvalTensor(kLstmInputToForgetWeightsTensor); + kernel_content.internal_tensors[kLstmInputToCellWeightsTensor] = + node_contents.GetEvalTensor(kLstmInputToCellWeightsTensor); + kernel_content.internal_tensors[kLstmInputToOutputWeightsTensor] = + node_contents.GetEvalTensor(kLstmInputToOutputWeightsTensor); + kernel_content.internal_tensors[kLstmRecurrentToInputWeightsTensor] = + node_contents.GetEvalTensor(kLstmRecurrentToInputWeightsTensor); + kernel_content.internal_tensors[kLstmRecurrentToForgetWeightsTensor] = + node_contents.GetEvalTensor(kLstmRecurrentToForgetWeightsTensor); + kernel_content.internal_tensors[kLstmRecurrentToCellWeightsTensor] = + node_contents.GetEvalTensor(kLstmRecurrentToCellWeightsTensor); + kernel_content.internal_tensors[kLstmRecurrentToOutputWeightsTensor] = + node_contents.GetEvalTensor(kLstmRecurrentToOutputWeightsTensor); + kernel_content.internal_tensors[kLstmInputGateBiasTensor] = + node_contents.GetEvalTensor(kLstmInputGateBiasTensor); + kernel_content.internal_tensors[kLstmForgetGateBiasTensor] = + node_contents.GetEvalTensor(kLstmForgetGateBiasTensor); + kernel_content.internal_tensors[kLstmCellGateBiasTensor] = + node_contents.GetEvalTensor(kLstmCellGateBiasTensor); + kernel_content.internal_tensors[kLstmOutputGateBiasTensor] = + node_contents.GetEvalTensor(kLstmOutputGateBiasTensor); + kernel_content.internal_tensors[kLstmOutputStateTensor] = + node_contents.GetEvalTensor(kLstmOutputStateTensor); + kernel_content.internal_tensors[kLstmOutputGateBiasTensor] = + node_contents.GetEvalTensor(kLstmOutputGateBiasTensor); + kernel_content.internal_tensors[kLstmCellStateTensor] = + node_contents.GetEvalTensor(kLstmCellStateTensor); + // Not used internal tensors + kernel_content.internal_tensors[kLstmCellToInputWeightsTensor] = nullptr; + kernel_content.internal_tensors[kLstmCellToForgetWeightsTensor] = nullptr; + kernel_content.internal_tensors[kLstmCellToOutputWeightsTensor] = nullptr; + kernel_content.internal_tensors[kLstmProjectionWeightsTensor] = nullptr; + kernel_content.internal_tensors[kLstmProjectionBiasTensor] = nullptr; + kernel_content.internal_tensors[kLstmInputLayerNormCoefficientsTensor] = + nullptr; + kernel_content.internal_tensors[kLstmForgetLayerNormCoefficientsTensor] = + nullptr; + kernel_content.internal_tensors[kLstmInputLayerNormCoefficientsTensor] = + nullptr; + kernel_content.internal_tensors[kLstmCellLayerNormCoefficientsTensor] = + nullptr; + kernel_content.internal_tensors[kLstmOutputLayerNormCoefficientsTensor] = + nullptr; + // Output tensor + kernel_content.output_tensor = node_contents.OutputEvalTensor(); + return kernel_content; +} + +// Deduce the size information (Batch (B), Time Steps (T), Input dimension (I), +// State dimension (S)) that defines the LSTM using the input and hidden state +// tensor +LstmSizeInfo CreateLstmSizeInfo( + const bool time_major, const TfLiteIntArray* input_tensor_shape, + const TfLiteIntArray* hidden_state_tensor_shape) { + LstmSizeInfo size_info; + size_info.time_major = time_major; + size_info.batch_size = + time_major ? input_tensor_shape->data[1] : input_tensor_shape->data[0]; + size_info.time_steps = + time_major ? input_tensor_shape->data[0] : input_tensor_shape->data[1]; + size_info.input_dimension = input_tensor_shape->data[2]; + size_info.state_dimension = hidden_state_tensor_shape->data[1]; + return size_info; +} + +// Create the LstmOpData using the LstmNodeContent and +// NodeQuantizationParameters (defined in test_data/lstm_test_data) During the +// actual inference phase, OpDataLSTM is created using information from the +// flatbuffer file. The test divide the complete LSTM node information into +// LstmNodeContent and NodeQuantizationParameters for easy construction +// purposes +template +OpDataLSTM CreateLstmOpData( + LstmNodeContent& + node_contents) { + const auto& builtin_data = node_contents.BuiltinData(); + const auto& quantization_settings = node_contents.QuantizationSettings(); + OpDataLSTM op_data; + + op_data.cell_gate_nonlinear_type = builtin_data.activation; + op_data.size_info = + CreateLstmSizeInfo(builtin_data.time_major, + node_contents.GetEvalTensor(kLstmInputTensor)->dims, + node_contents.HiddenStateEvalTensor()->dims); + + op_data.cell_state_info = CreateLstmCellStateInfo( + quantization_settings.cell_state.scale, builtin_data.cell_clip); + + // Gate Parameters + op_data.forget_gate_parameters = CreateGateParams( + quantization_settings.input, quantization_settings.hidden_state, + quantization_settings.forget_gate, + quantization_settings.nonlinear_activation_input_scale); + op_data.input_gate_parameters = CreateGateParams( + quantization_settings.input, quantization_settings.hidden_state, + quantization_settings.input_gate, + quantization_settings.nonlinear_activation_input_scale); + op_data.cell_gate_parameters = CreateGateParams( + quantization_settings.input, quantization_settings.hidden_state, + quantization_settings.cell_gate, + quantization_settings.nonlinear_activation_input_scale); + op_data.output_gate_parameters = CreateGateParams( + quantization_settings.input, quantization_settings.hidden_state, + quantization_settings.output_gate, + quantization_settings.nonlinear_activation_input_scale); + // Inter gate multiplication parameters + op_data.inter_gate_parameters.forget_cell_mul_params = + CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.cell_state.scale, + quantization_settings.cell_state.scale); + op_data.inter_gate_parameters.input_mul_params = + CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.cell_state.scale); + op_data.inter_gate_parameters.output_mul_params = + CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point); + return op_data; +} + +template +OpDataLSTM CreateLstmOpDataFloat( + LstmNodeContent& node_contents) { + const auto& builtin_data = node_contents.BuiltinData(); + OpDataLSTM op_data; + + op_data.cell_gate_nonlinear_type = builtin_data.activation; + op_data.size_info = + CreateLstmSizeInfo(builtin_data.time_major, + node_contents.GetEvalTensor(kLstmInputTensor)->dims, + node_contents.HiddenStateEvalTensor()->dims); + op_data.cell_state_info.cell_clip = builtin_data.cell_clip; + op_data.cell_state_info.quantized_cell_clip = 0; // No quantization + op_data.cell_state_info.cell_state_scale_power = 0; // No quantization + + // Gate Parameters + op_data.forget_gate_parameters = CreateGateParamsFloat(); + op_data.input_gate_parameters = CreateGateParamsFloat(); + op_data.cell_gate_parameters = CreateGateParamsFloat(); + op_data.output_gate_parameters = CreateGateParamsFloat(); + // Inter gate multiplication parameters + op_data.inter_gate_parameters.forget_cell_mul_params = + CreateInterGateMulParamsFloat(); + op_data.inter_gate_parameters.input_mul_params = + CreateInterGateMulParamsFloat(); + op_data.inter_gate_parameters.output_mul_params = + CreateInterGateMulParamsFloat(); + return op_data; +} + +/*Test Functions Below Here*/ +template +void ValidateResultGoldens(const T* golden, const T* output_data, + const int output_len, const float tolerance) { + for (int i = 0; i < output_len; ++i) { + TF_LITE_MICRO_EXPECT_NEAR(golden[i], output_data[i], tolerance); + } +} + +template +void TestCalculateLstmGateFloat(const TfLiteEvalTensor* input, + const TfLiteEvalTensor* input_weight, + const TfLiteEvalTensor* input_bias, + // Recurrent FC + const TfLiteEvalTensor* recurrent, + const TfLiteEvalTensor* recurrent_weight, + const TfLiteEvalTensor* recurrent_bias, + // Result comparison + TfLiteFusedActivation nonlinear_type, + const float* expected_vals, float tolerance) { + float gate_output[batch_size * state_dimension] = {}; + float fc_output_buffer[batch_size * state_dimension] = {}; + + tflite::GateParameters gate_params = CreateGateParamsFloat(); + + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, input->dims, recurrent->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + tflite::lstm_internal::CalculateLstmGate( + step_info, gate_params, + // Input FC + input, input_weight, input_bias, + // Recurrent FC + recurrent, recurrent_weight, recurrent_bias, + // Output + gate_output, + // Scratch arrays + fc_output_buffer, nonlinear_type); + + ValidateResultGoldens(expected_vals, gate_output, + batch_size * state_dimension, tolerance); +} + +template +void TestCalculateLstmGateInteger( + const TfLiteEvalTensor* input, const TfLiteEvalTensor* input_weight, + const TfLiteEvalTensor* input_bias, + // Recurrent FC + const TfLiteEvalTensor* recurrent, const TfLiteEvalTensor* recurrent_weight, + const TfLiteEvalTensor* recurrent_bias, + // Quantization settings + const NodeQuantizationParameters& node_quantization_settings, + const GateQuantizationParameters& gate_quantization_settings, + // Result comparison + TfLiteFusedActivation nonlinear_type, const float* expected_vals, + float tolerance) { + CellType gate_output[batch_size * state_dimension] = {}; + CellType fc_output_buffer[batch_size * state_dimension] = {}; + + tflite::GateParameters gate_params = CreateGateParams( + node_quantization_settings.input, node_quantization_settings.hidden_state, + gate_quantization_settings, + node_quantization_settings.nonlinear_activation_input_scale); + + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, input->dims, recurrent->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + // only int8 weight is supported now + tflite::lstm_internal::CalculateLstmGate( + step_info, gate_params, + // Input FC + input, input_weight, input_bias, + // Recurrent FC + recurrent, recurrent_weight, recurrent_bias, + // Output + gate_output, + // Scratch arrays + fc_output_buffer, nonlinear_type); + + float gate_output_float[batch_size * state_dimension] = {}; + Dequantize(gate_output, batch_size * state_dimension, + node_quantization_settings.nonlinear_activation_output_scale, 0, + gate_output_float); + + ValidateResultGoldens(expected_vals, gate_output_float, + batch_size * state_dimension, tolerance); +} + +template +void TestUpdateLstmCellFloat( + const GateOutputCheckData& gate_output_data, + LstmNodeContent& node_content, + const float tolerance) { + float buffer[batch_size * state_dimension] = {}; + + auto forget_cell_mul_params = CreateInterGateMulParamsFloat(); + auto input_mul_params = CreateInterGateMulParamsFloat(); + + auto cell_state = node_content.CellStateEvalTensor(); + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, + node_content.GetEvalTensor(tflite::kLstmInputTensor)->dims, + node_content.HiddenStateEvalTensor()->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + // copy the data since it will be updated + float forget_gate[batch_size * state_dimension] = {}; + std::memcpy(forget_gate, gate_output_data.expected_forget_gate_output, + batch_size * state_dimension * sizeof(float)); + + CellStateInfo cell_state_info; + cell_state_info.cell_clip = node_content.BuiltinData().cell_clip; + // Call the function to be tested + tflite::lstm_internal::UpdateLstmCell( + step_info, cell_state, forget_gate, + gate_output_data.expected_input_gate_output, + gate_output_data.expected_cell_gate_output, forget_cell_mul_params, + input_mul_params, cell_state_info, buffer); + + ValidateResultGoldens(gate_output_data.expected_updated_cell, + tflite::micro::GetTensorData(cell_state), + batch_size * state_dimension, tolerance); +} + +template +void TestUpdateLstmCellInteger( + const GateOutputCheckData& gate_output_data, + LstmNodeContent& node_content, + const float tolerance) { + const auto& quantization_settings = node_content.QuantizationSettings(); + CellType quantized_forget_gate[batch_size * state_dimension] = {}; + tflite::Quantize(gate_output_data.expected_forget_gate_output, + quantized_forget_gate, batch_size * state_dimension, + quantization_settings.nonlinear_activation_output_scale, 0); + + CellType quantized_input_gate[batch_size * state_dimension] = {}; + tflite::Quantize(gate_output_data.expected_input_gate_output, + quantized_input_gate, batch_size * state_dimension, + quantization_settings.nonlinear_activation_output_scale, 0); + + CellType quantized_cell_gate[batch_size * state_dimension] = {}; + tflite::Quantize(gate_output_data.expected_cell_gate_output, + quantized_cell_gate, batch_size * state_dimension, + quantization_settings.nonlinear_activation_output_scale, 0); + + CellType buffer[batch_size * state_dimension] = {}; + + auto forget_cell_mul_params = CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.cell_state.scale, + quantization_settings.cell_state.scale); + auto input_mul_params = CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.cell_state.scale); + + auto cell_state_info = + CreateLstmCellStateInfo(quantization_settings.cell_state.scale, + node_content.BuiltinData().cell_clip); + + auto cell_state = node_content.CellStateEvalTensor(); + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, + node_content.GetEvalTensor(tflite::kLstmInputTensor)->dims, + node_content.HiddenStateEvalTensor()->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + // Call the function to be tested + tflite::lstm_internal::UpdateLstmCell( + step_info, cell_state, quantized_forget_gate, quantized_input_gate, + quantized_cell_gate, forget_cell_mul_params, input_mul_params, + cell_state_info, buffer); + + float cell_state_float[batch_size * state_dimension] = {}; + Dequantize(tflite::micro::GetTensorData(cell_state), + batch_size * state_dimension, + quantization_settings.cell_state.scale, + quantization_settings.cell_state.zero_point, cell_state_float); + + ValidateResultGoldens(gate_output_data.expected_updated_cell, + cell_state_float, batch_size * state_dimension, + tolerance); +} + +template +void TestUpdateLstmHiddenFloat( + const GateOutputCheckData& gate_output_data, + LstmNodeContent& node_content, + const float tolerance) { + float buffer[batch_size * state_dimension] = {}; + + auto mul_params = CreateInterGateMulParamsFloat(); + + int32_t cell_state_scale_power = 0; + + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, + node_content.GetEvalTensor(tflite::kLstmInputTensor)->dims, + node_content.HiddenStateEvalTensor()->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + auto cell_state = node_content.CellStateEvalTensor(); + auto hidden_state = node_content.HiddenStateEvalTensor(); + + tflite::lstm_internal::UpdateLstmHidden( + step_info, cell_state, hidden_state, + gate_output_data.expected_output_gate_output, mul_params, + cell_state_scale_power, buffer); + + ValidateResultGoldens(gate_output_data.expected_updated_hidden, + tflite::micro::GetTensorData(hidden_state), + batch_size * state_dimension, tolerance); +} + +template +void TestUpdateLstmHiddenInteger( + const GateOutputCheckData& gate_output_data, + LstmNodeContent& node_content, + const float tolerance) { + const auto& quantization_settings = node_content.QuantizationSettings(); + CellType quantized_output_gate[batch_size * state_dimension] = {}; + tflite::Quantize(gate_output_data.expected_output_gate_output, + quantized_output_gate, batch_size * state_dimension, + quantization_settings.nonlinear_activation_output_scale, 0); + + CellType buffer[batch_size * state_dimension] = {}; + + auto mul_params = CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point); + + int cell_state_scale_power_buffer; + tflite::CheckedLog2(quantization_settings.cell_state.scale, + &cell_state_scale_power_buffer); + int32_t cell_state_scale_power = cell_state_scale_power_buffer; + + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, + node_content.GetEvalTensor(tflite::kLstmInputTensor)->dims, + node_content.HiddenStateEvalTensor()->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + auto cell_state = node_content.CellStateEvalTensor(); + auto hidden_state = node_content.HiddenStateEvalTensor(); + + tflite::lstm_internal::UpdateLstmHidden( + step_info, cell_state, hidden_state, quantized_output_gate, mul_params, + cell_state_scale_power, buffer); + + float hidden_state_float[batch_size * state_dimension] = {}; + Dequantize(tflite::micro::GetTensorData(hidden_state), + batch_size * state_dimension, + quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point, hidden_state_float); + + ValidateResultGoldens(gate_output_data.expected_updated_hidden, + hidden_state_float, batch_size * state_dimension, + tolerance); +} + +template +void TestLstmStepFloat( + const GateOutputCheckData& gate_output_data, + const float hidden_state_tolerance, const float cell_state_tolerance, + /*can not be const, state will be updated*/ + LstmNodeContent& node_contents) { + // Mimicking the kernel preparation phase, node_contents approximate the + LSTMKernelContents kernel_content = CreateLSTMKernelContent(node_contents); + LSTMBuffers buffers; + // Scratch buffers on the stack + float buffer0[batch_size * state_dimension] = {}; + buffers.buffer0 = buffer0; + float buffer1[batch_size * state_dimension] = {}; + buffers.buffer1 = buffer1; + float buffer2[batch_size * state_dimension] = {}; + buffers.buffer2 = buffer2; + float buffer3[batch_size * state_dimension] = {}; + buffers.buffer3 = buffer3; + + OpDataLSTM op_data = CreateLstmOpDataFloat(node_contents); + // set time_major to true to test batch inference + op_data.size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&op_data.size_info); + tflite::lstm_internal::LstmStep( + step_info, op_data, kernel_content, buffers); + + ValidateResultGoldens( + gate_output_data.expected_updated_hidden, + tflite::micro::GetTensorData(kernel_content.HiddenStateTensor()), + batch_size * state_dimension, hidden_state_tolerance); + ValidateResultGoldens( + gate_output_data.expected_updated_cell, + tflite::micro::GetTensorData(kernel_content.CellStateTensor()), + batch_size * state_dimension, cell_state_tolerance); +} + +template +void TestLstmStepInteger( + const GateOutputCheckData& gate_output_data, + const float hidden_state_tolerance, const float cell_state_tolerance, + /*can not be const, state will be updated*/ + LstmNodeContent& + node_contents) { + // Mimicking the kernel preparation phase, node_contents approximate the + LSTMKernelContents kernel_content = CreateLSTMKernelContent(node_contents); + LSTMBuffers buffers; + + // Scratch buffers on the stack + CellType buffer0[batch_size * state_dimension] = {}; + buffers.buffer0 = buffer0; + CellType buffer1[batch_size * state_dimension] = {}; + buffers.buffer1 = buffer1; + CellType buffer2[batch_size * state_dimension] = {}; + buffers.buffer2 = buffer2; + CellType buffer3[batch_size * state_dimension] = {}; + buffers.buffer3 = buffer3; + + OpDataLSTM op_data = CreateLstmOpData(node_contents); + // set time_major to true to test batch inference + op_data.size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&op_data.size_info); + tflite::lstm_internal::LstmStep(step_info, op_data, kernel_content, + buffers); + + const auto& quantization_settings = node_contents.QuantizationSettings(); + float dequantized_hidden_state[batch_size * state_dimension] = {}; + Dequantize( + tflite::micro::GetTensorData( + kernel_content.HiddenStateTensor()), + batch_size * state_dimension, quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point, dequantized_hidden_state); + + float dequantized_cell_state[batch_size * state_dimension] = {}; + Dequantize( + tflite::micro::GetTensorData(kernel_content.CellStateTensor()), + batch_size * state_dimension, quantization_settings.cell_state.scale, + quantization_settings.cell_state.zero_point, dequantized_cell_state); + + ValidateResultGoldens(gate_output_data.expected_updated_hidden, + dequantized_hidden_state, batch_size * state_dimension, + hidden_state_tolerance); + ValidateResultGoldens(gate_output_data.expected_updated_cell, + dequantized_cell_state, batch_size * state_dimension, + cell_state_tolerance); +} + +template +void TestEvalLstmFloat( + const LstmEvalCheckData< + batch_size * time_steps * input_dimension, batch_size * state_dimension, + batch_size * state_dimension * time_steps>& eval_check_data, + const float hidden_state_tolerance, const float cell_state_tolerance, + LstmNodeContent& node_contents) { + // Mimicking the kernel preparation phase, node_contents approximate the node + LSTMKernelContents kernel_content = CreateLSTMKernelContent(node_contents); + // Scratch buffers on the stack + LSTMBuffers buffers; + float buffer0[batch_size * state_dimension] = {}; + buffers.buffer0 = buffer0; + float buffer1[batch_size * state_dimension] = {}; + buffers.buffer1 = buffer1; + float buffer2[batch_size * state_dimension] = {}; + buffers.buffer2 = buffer2; + float buffer3[batch_size * state_dimension] = {}; + buffers.buffer3 = buffer3; + + OpDataLSTM op_data = CreateLstmOpDataFloat(node_contents); + + tflite::EvalLstm(op_data, kernel_content, + buffers); + + ValidateResultGoldens(eval_check_data.expected_hidden_state, + node_contents.GetHiddenStateData(), + batch_size * state_dimension, hidden_state_tolerance); + + ValidateResultGoldens(eval_check_data.expected_cell_state, + node_contents.GetCellStateData(), + batch_size * state_dimension, cell_state_tolerance); + + ValidateResultGoldens(eval_check_data.expected_output, + node_contents.GetOutputData(), + batch_size * state_dimension, hidden_state_tolerance); +} + +template +void TestEvalLstmInteger( + const LstmEvalCheckData< + batch_size * time_steps * input_dimension, batch_size * state_dimension, + batch_size * state_dimension * time_steps>& eval_check_data, + const float hidden_state_tolerance, const float cell_state_tolerance, + LstmNodeContent& + node_contents) { + // Mimicking the kernel preparation phase, node_contents approximate the node + LSTMKernelContents kernel_content = CreateLSTMKernelContent(node_contents); + // Scratch buffers on the stack + LSTMBuffers buffers; + CellType buffer0[batch_size * state_dimension] = {}; + buffers.buffer0 = buffer0; + CellType buffer1[batch_size * state_dimension] = {}; + buffers.buffer1 = buffer1; + CellType buffer2[batch_size * state_dimension] = {}; + buffers.buffer2 = buffer2; + CellType buffer3[batch_size * state_dimension] = {}; + buffers.buffer3 = buffer3; + + OpDataLSTM op_data = CreateLstmOpData(node_contents); + + tflite::EvalLstm( + op_data, kernel_content, buffers); + + const auto& quantization_settings = node_contents.QuantizationSettings(); + float dequantized_hidden_state[batch_size * state_dimension] = {}; + Dequantize(node_contents.GetHiddenStateData(), batch_size * state_dimension, + quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point, + dequantized_hidden_state); + + ValidateResultGoldens(eval_check_data.expected_hidden_state, + dequantized_hidden_state, batch_size * state_dimension, + hidden_state_tolerance); + + float dequantized_cell_state[batch_size * state_dimension] = {}; + Dequantize(node_contents.GetCellStateData(), batch_size * state_dimension, + quantization_settings.cell_state.scale, + quantization_settings.cell_state.zero_point, + dequantized_cell_state); + ValidateResultGoldens(eval_check_data.expected_cell_state, + dequantized_cell_state, batch_size * state_dimension, + cell_state_tolerance); + + float dequantized_output[batch_size * state_dimension * time_steps] = {}; + Dequantize(node_contents.GetOutputData(), + batch_size * state_dimension * time_steps, + quantization_settings.output.scale, + quantization_settings.output.zero_point, dequantized_output); + ValidateResultGoldens(eval_check_data.expected_output, dequantized_output, + batch_size * state_dimension, hidden_state_tolerance); +} + +} // namespace testing +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_TEST_H_ diff --git a/src/tensorflow/lite/micro/kernels/lstm_shared.h b/src/tensorflow/lite/micro/kernels/lstm_shared.h new file mode 100644 index 0000000..dbdc3c5 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/lstm_shared.h @@ -0,0 +1,150 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +// Input Tensors of size {n_batch, n_input} +constexpr int kLstmInputTensor = 0; + +// Input weight tensors of size: {n_cell, n_input} +constexpr int kLstmInputToInputWeightsTensor = 1; // Optional +constexpr int kLstmInputToForgetWeightsTensor = 2; +constexpr int kLstmInputToCellWeightsTensor = 3; +constexpr int kLstmInputToOutputWeightsTensor = 4; + +// Recurrent weight tensors of size {n_cell, n_output} +constexpr int kLstmRecurrentToInputWeightsTensor = 5; // Optional +constexpr int kLstmRecurrentToForgetWeightsTensor = 6; +constexpr int kLstmRecurrentToCellWeightsTensor = 7; +constexpr int kLstmRecurrentToOutputWeightsTensor = 8; + +// Peephole weights tensors of size {n_cell}, representing a diagonal matrix. +constexpr int kLstmCellToInputWeightsTensor = 9; // Optional +constexpr int kLstmCellToForgetWeightsTensor = 10; // Optional +constexpr int kLstmCellToOutputWeightsTensor = 11; // Optional + +// Gates bias tensors of size {n_cell} +constexpr int kLstmInputGateBiasTensor = 12; // Optional +constexpr int kLstmForgetGateBiasTensor = 13; +constexpr int kLstmCellGateBiasTensor = 14; +constexpr int kLstmOutputGateBiasTensor = 15; + +// Projection weight tensor of size {n_output, n_cell} +constexpr int kLstmProjectionWeightsTensor = 16; // Optional +// Projection bias tensor of size {n_output} +constexpr int kLstmProjectionBiasTensor = 17; // Optional + +// These state tensors are defined as variable tensors, and will be modified by +// this op. +constexpr int kLstmOutputStateTensor = 18; +constexpr int kLstmCellStateTensor = 19; + +// Layer norm coefficient tensors of size {n_cell}, representing a diagonal +// matrix. +constexpr int kLstmInputLayerNormCoefficientsTensor = 20; // Optional +constexpr int kLstmForgetLayerNormCoefficientsTensor = 21; // Optional +constexpr int kLstmCellLayerNormCoefficientsTensor = 22; // Optional +constexpr int kLstmOutputLayerNormCoefficientsTensor = 23; // Optional + +// Output tensors. +constexpr int kLstmOutputTensor = 0; + +// Parameters for the two fully conncted computation inside each gate +struct GateParameters { + FullyConnectedParams input_fc_params; + FullyConnectedParams recurrent_fc_params; +}; + +// Paramaters for the element wise multiplications between gate outputs +struct InterGateParameters { + ArithmeticParams forget_cell_mul_params; + ArithmeticParams input_mul_params; + ArithmeticParams output_mul_params; +}; + +// Size information about the LSTM kernel, which is deduced from tensors stored +// in the flat buffer file. +struct LstmSizeInfo { + bool time_major; + int batch_size; + int time_steps; + int input_dimension; + int state_dimension; +}; + +// Contains information about the cell state tensor +struct CellStateInfo { + float cell_clip; + // clipping range for cell state only 16 bits cell is supported (could be + // generalized through templatation) + int16_t quantized_cell_clip; + // 2^-cell_state_scale_power = cell state scale, required by integer tanh + // computation + int32_t cell_state_scale_power; +}; + +// Contains required computation information for LSTM kernel evaluation. +// Specifically, it includes shape and quantization settings for the LSTM +// internal operations. Formatted to support operations defined in the +// tensorflow/lite/kernels/internal/reference/integer_ops +// Should be constructed during the preparation phase +struct OpDataLSTM { + LstmSizeInfo size_info; + CellStateInfo cell_state_info; + TfLiteFusedActivation cell_gate_nonlinear_type; + GateParameters forget_gate_parameters; + GateParameters input_gate_parameters; + GateParameters cell_gate_parameters; + GateParameters output_gate_parameters; + InterGateParameters inter_gate_parameters; + int buffer_indices[4]; // TFLM only +}; + +// Provide an interface to access the internal tensors and buffers used for LSTM +// invocation. Constructed during the invocation phase +struct LSTMKernelContents { + public: + // Internal tensors, fixed (const). see lstm_shared.h for tensor names + const TfLiteEvalTensor* GetInternalTensor(const int tensor_index) const { + return internal_tensors[tensor_index]; + } + // Variable tensors (will be changed, can not be const) + TfLiteEvalTensor* HiddenStateTensor() { + return internal_tensors[kLstmOutputStateTensor]; + } + TfLiteEvalTensor* CellStateTensor() { + return internal_tensors[kLstmCellStateTensor]; + } + // Node internal tensors with indexes defined at the beginning of the file + TfLiteEvalTensor* internal_tensors[24]; + TfLiteEvalTensor* output_tensor; +}; + +template +struct LSTMBuffers { + // TFLM buffers requires buffer index from LstmOpData. + CellType* buffer0; + CellType* buffer1; + CellType* buffer2; + CellType* buffer3; +}; + +} // namespace tflite +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_ diff --git a/src/tensorflow/lite/micro/kernels/maximum_minimum.cpp b/src/tensorflow/lite/micro/kernels/maximum_minimum.cpp index a7c343b..4dc87b4 100644 --- a/src/tensorflow/lite/micro/kernels/maximum_minimum.cpp +++ b/src/tensorflow/lite/micro/kernels/maximum_minimum.cpp @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,11 +23,10 @@ limitations under the License. #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace maximum_minimum { + namespace { // This file has a reference implementation of TFMaximum/TFMinimum. @@ -64,8 +63,6 @@ struct MinimumOp { } }; -} // namespace - template void TFLiteOperation(TfLiteContext* context, TfLiteNode* node, const OpContext& op_context) { @@ -88,12 +85,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteFloat32: TFLiteOperation(context, node, op_context); break; - case kTfLiteUInt8: - TFLiteOperation(context, node, op_context); - break; case kTfLiteInt8: TFLiteOperation(context, node, op_context); break; + case kTfLiteInt16: + TFLiteOperation(context, node, op_context); + break; case kTfLiteInt32: TFLiteOperation(context, node, op_context); break; @@ -101,48 +98,28 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TFLiteOperation(context, node, op_context); break; default: - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by Maximum/Minimum.", - TfLiteTypeGetName(op_context.output->type), - op_context.output->type); + MicroPrintf("Type %s (%d) is not supported by Maximum/Minimum.", + TfLiteTypeGetName(op_context.output->type), + op_context.output->type); return kTfLiteError; } } else { - TF_LITE_KERNEL_LOG(context, - "Kernel type not supported by Maximum/Minimum."); + MicroPrintf("Kernel type not supported by Maximum/Minimum."); return kTfLiteError; } return kTfLiteOk; } -} // namespace maximum_minimum - -TfLiteRegistration Register_MAXIMUM() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/ - maximum_minimum::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +} // namespace + +TFLMRegistration Register_MAXIMUM() { + return tflite::micro::RegisterOp(nullptr, nullptr, + Eval); } -TfLiteRegistration Register_MINIMUM() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/ - maximum_minimum::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_MINIMUM() { + return tflite::micro::RegisterOp(nullptr, nullptr, + Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/micro_ops.h b/src/tensorflow/lite/micro/kernels/micro_ops.h index a65fc4f..2e33a67 100644 --- a/src/tensorflow/lite/micro/kernels/micro_ops.h +++ b/src/tensorflow/lite/micro/kernels/micro_ops.h @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ +#include "signal/micro/kernels/irfft.h" +#include "signal/micro/kernels/rfft.h" #include "tensorflow/lite/c/common.h" // Forward declaration of all micro op kernel registration methods. These @@ -31,70 +33,126 @@ namespace tflite { // (https://abseil.io/tips/130). Any new ops (or cleanup of existing ops should // have their Register function declarations in the tflite namespace. -TfLiteRegistration Register_CONV_2D(); -TfLiteRegistration Register_DEPTHWISE_CONV_2D(); -TfLiteRegistration Register_QUANTIZE(); -TfLiteRegistration Register_SHAPE(); -TfLiteRegistration Register_SOFTMAX(); -TfLiteRegistration Register_SVDF(); +TFLMRegistration Register_ABS(); +TFLMRegistration Register_ADD(); +TFLMRegistration Register_ADD_N(); +TFLMRegistration Register_ARG_MAX(); +TFLMRegistration Register_ARG_MIN(); +TFLMRegistration Register_ASSIGN_VARIABLE(); +TFLMRegistration Register_AVERAGE_POOL_2D(); +TFLMRegistration Register_BATCH_MATMUL(); +TFLMRegistration Register_BATCH_TO_SPACE_ND(); +TFLMRegistration Register_BROADCAST_ARGS(); +TFLMRegistration Register_BROADCAST_TO(); +TFLMRegistration Register_CALL_ONCE(); +TFLMRegistration Register_CAST(); +TFLMRegistration Register_CEIL(); +// TODO(b/160234179): Change custom OPs to also return by value. +TFLMRegistration* Register_CIRCULAR_BUFFER(); +TFLMRegistration Register_CONCATENATION(); +TFLMRegistration Register_CONV_2D(); +TFLMRegistration Register_COS(); +TFLMRegistration Register_CUMSUM(); +TFLMRegistration Register_DEPTH_TO_SPACE(); +TFLMRegistration Register_DEPTHWISE_CONV_2D(); +TFLMRegistration Register_DEQUANTIZE(); +TFLMRegistration Register_DIV(); +TFLMRegistration Register_ELU(); +TFLMRegistration Register_EMBEDDING_LOOKUP(); +TFLMRegistration Register_EQUAL(); +TFLMRegistration* Register_ETHOSU(); +TFLMRegistration Register_EXP(); +TFLMRegistration Register_EXPAND_DIMS(); +TFLMRegistration Register_FILL(); +TFLMRegistration Register_FLOOR(); +TFLMRegistration Register_FLOOR_DIV(); +TFLMRegistration Register_FLOOR_MOD(); +TFLMRegistration Register_FULLY_CONNECTED(); +TFLMRegistration Register_GATHER(); +TFLMRegistration Register_GATHER_ND(); +TFLMRegistration Register_GREATER(); +TFLMRegistration Register_GREATER_EQUAL(); +TFLMRegistration Register_HARD_SWISH(); +TFLMRegistration Register_IF(); +TFLMRegistration Register_L2_NORMALIZATION(); +TFLMRegistration Register_L2_POOL_2D(); +TFLMRegistration Register_LEAKY_RELU(); +TFLMRegistration Register_LESS(); +TFLMRegistration Register_LESS_EQUAL(); +TFLMRegistration Register_LOG(); +TFLMRegistration Register_LOG_SOFTMAX(); +TFLMRegistration Register_LOGICAL_AND(); +TFLMRegistration Register_LOGICAL_NOT(); +TFLMRegistration Register_LOGICAL_OR(); +TFLMRegistration Register_LOGISTIC(); +TFLMRegistration Register_MAX_POOL_2D(); +TFLMRegistration Register_MAXIMUM(); +TFLMRegistration Register_MEAN(); +TFLMRegistration Register_MINIMUM(); +TFLMRegistration Register_MIRROR_PAD(); +TFLMRegistration Register_MUL(); +TFLMRegistration Register_NEG(); +TFLMRegistration Register_NOT_EQUAL(); +TFLMRegistration Register_PACK(); +TFLMRegistration Register_PAD(); +TFLMRegistration Register_PADV2(); +TFLMRegistration Register_PRELU(); +TFLMRegistration Register_QUANTIZE(); +TFLMRegistration Register_READ_VARIABLE(); +TFLMRegistration Register_REDUCE_MAX(); +TFLMRegistration Register_RELU(); +TFLMRegistration Register_RELU6(); +TFLMRegistration Register_RESHAPE(); +TFLMRegistration Register_RESIZE_BILINEAR(); +TFLMRegistration Register_RESIZE_NEAREST_NEIGHBOR(); +TFLMRegistration Register_ROUND(); +TFLMRegistration Register_RSQRT(); +TFLMRegistration Register_SELECT_V2(); +TFLMRegistration Register_SHAPE(); +TFLMRegistration Register_SIN(); +TFLMRegistration Register_SLICE(); +TFLMRegistration Register_SOFTMAX(); +TFLMRegistration Register_SPACE_TO_BATCH_ND(); +TFLMRegistration Register_SPACE_TO_DEPTH(); +TFLMRegistration Register_SPLIT(); +TFLMRegistration Register_SPLIT_V(); +TFLMRegistration Register_SQRT(); +TFLMRegistration Register_SQUARE(); +TFLMRegistration Register_SQUARED_DIFFERENCE(); +TFLMRegistration Register_SQUEEZE(); +TFLMRegistration Register_STRIDED_SLICE(); +TFLMRegistration Register_SUB(); +TFLMRegistration Register_SUM(); +TFLMRegistration Register_SVDF(); +TFLMRegistration Register_TANH(); +TFLMRegistration Register_TRANSPOSE(); +TFLMRegistration Register_TRANSPOSE_CONV(); +// TODO(b/230666079): resolve conflict with xtensa implementation +TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM(); +TFLMRegistration Register_UNPACK(); +TFLMRegistration Register_VAR_HANDLE(); +TFLMRegistration Register_WHILE(); +TFLMRegistration Register_ZEROS_LIKE(); -namespace ops { -namespace micro { +// TODO(b/295174388): Add the rest of inference only registration functions. +TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED(); -TfLiteRegistration Register_ABS(); -TfLiteRegistration Register_ADD(); -TfLiteRegistration Register_ARG_MAX(); -TfLiteRegistration Register_ARG_MIN(); -TfLiteRegistration Register_AVERAGE_POOL_2D(); -TfLiteRegistration Register_CEIL(); // TODO(b/160234179): Change custom OPs to also return by value. -TfLiteRegistration* Register_CIRCULAR_BUFFER(); -TfLiteRegistration Register_CONCATENATION(); -TfLiteRegistration Register_COS(); -TfLiteRegistration Register_DEQUANTIZE(); -TfLiteRegistration Register_EQUAL(); -TfLiteRegistration Register_FLOOR(); -TfLiteRegistration Register_GREATER(); -TfLiteRegistration Register_GREATER_EQUAL(); -TfLiteRegistration Register_HARD_SWISH(); -TfLiteRegistration Register_LESS(); -TfLiteRegistration Register_LESS_EQUAL(); -TfLiteRegistration Register_LOG(); -TfLiteRegistration Register_LOGICAL_AND(); -TfLiteRegistration Register_LOGICAL_NOT(); -TfLiteRegistration Register_LOGICAL_OR(); -TfLiteRegistration Register_LOGISTIC(); -TfLiteRegistration Register_MAXIMUM(); -TfLiteRegistration Register_MAX_POOL_2D(); -TfLiteRegistration Register_MEAN(); -TfLiteRegistration Register_MINIMUM(); -TfLiteRegistration Register_MUL(); -TfLiteRegistration Register_NEG(); -TfLiteRegistration Register_NOT_EQUAL(); -TfLiteRegistration Register_PACK(); -TfLiteRegistration Register_PAD(); -TfLiteRegistration Register_PADV2(); -TfLiteRegistration Register_PRELU(); -TfLiteRegistration Register_REDUCE_MAX(); -TfLiteRegistration Register_RELU(); -TfLiteRegistration Register_RELU6(); -TfLiteRegistration Register_RESHAPE(); -TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR(); -TfLiteRegistration Register_ROUND(); -TfLiteRegistration Register_RSQRT(); -TfLiteRegistration Register_SIN(); -TfLiteRegistration Register_SPLIT(); -TfLiteRegistration Register_SPLIT_V(); -TfLiteRegistration Register_SQRT(); -TfLiteRegistration Register_SQUARE(); -TfLiteRegistration Register_STRIDED_SLICE(); -TfLiteRegistration Register_SUB(); -TfLiteRegistration Register_UNPACK(); -TfLiteRegistration Register_L2_NORMALIZATION(); -TfLiteRegistration Register_TANH(); +namespace tflm_signal { +TFLMRegistration* Register_DELAY(); +TFLMRegistration* Register_FFT_AUTO_SCALE(); +TFLMRegistration* Register_FILTER_BANK(); +TFLMRegistration* Register_FILTER_BANK_LOG(); +TFLMRegistration* Register_FILTER_BANK_SPECTRAL_SUBTRACTION(); +TFLMRegistration* Register_FILTER_BANK_SQUARE_ROOT(); +TFLMRegistration* Register_ENERGY(); +TFLMRegistration* Register_FRAMER(); +TFLMRegistration* Register_OVERLAP_ADD(); +TFLMRegistration* Register_PCAN(); +TFLMRegistration* Register_STACKER(); +TFLMRegistration* Register_WINDOW(); +} // namespace tflm_signal -} // namespace micro -} // namespace ops } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ diff --git a/src/tensorflow/lite/micro/kernels/micro_tensor_utils.cpp b/src/tensorflow/lite/micro/kernels/micro_tensor_utils.cpp new file mode 100644 index 0000000..628be9b --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/micro_tensor_utils.cpp @@ -0,0 +1,67 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/micro/kernels/micro_tensor_utils.h" + +#include +#include +#include +#include +#include +#include + +#include "third_party/gemmlowp/fixedpoint/fixedpoint.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/cppmath.h" +#include "tensorflow/lite/kernels/op_macros.h" + +namespace tflite { + +// Apply sigmoid to elements of a vector. +void PortableApplySigmoidToVector(const float* vector, int v_size, + float* result) { + for (int v = 0; v < v_size; v++) { + result[v] = 1.0f / (1.0f + std::exp(-vector[v])); + } +} + +void PortableApplyTanhToVector(const float* vector, int v_size, float* result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::tanh(vector[v]); + } +} + +void PortableApplyActivationToVector(const float* vector, int v_size, + TfLiteFusedActivation activation, + float* result) { + switch (activation) { + case kTfLiteActNone: + return; + case kTfLiteActRelu: + return tflite::tensor_utils::ApplyReluToVector(vector, v_size, result); + case kTfLiteActReluN1To1: + return tflite::tensor_utils::ApplyRelu1ToVector(vector, v_size, result); + case kTfLiteActRelu6: + return tflite::tensor_utils::ApplyRelu6ToVector(vector, v_size, result); + case kTfLiteActTanh: + return PortableApplyTanhToVector(vector, v_size, result); + case kTfLiteActSignBit: + return tflite::tensor_utils::ApplySignbitToVector(vector, v_size, result); + case kTfLiteActSigmoid: + return PortableApplySigmoidToVector(vector, v_size, result); + } +} + +} // namespace tflite \ No newline at end of file diff --git a/src/tensorflow/lite/micro/kernels/micro_tensor_utils.h b/src/tensorflow/lite/micro/kernels/micro_tensor_utils.h new file mode 100644 index 0000000..0b87f0a --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/micro_tensor_utils.h @@ -0,0 +1,56 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file and the associated .cc file is branched from +// tensorflow/lite/kernels/internal/reference/portable_tensor_utils* +// TFLM needs to create its own because the original files are coupled with +// the tensor_utils module, which we cannot reuse due to its use of the +// Eigen library. + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_ + +#include +#include +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. +// TODO(b/230666277): consider removing this since micro does not utilize it +class CpuBackendContext; + +// Apply sigmoid to elements of a vector. +void PortableApplySigmoidToVector(const float* vector, int v_size, + float* result); +// Apply tanh to elements of a vector +void PortableApplyTanhToVector(const float* vector, int v_size, float* result); +// Apply appropriate activation function to elements of a vector. +void PortableApplyActivationToVector(const float* vector, int v_size, + TfLiteFusedActivation activation, + float* result); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_ \ No newline at end of file diff --git a/src/tensorflow/lite/micro/kernels/micro_utils.h b/src/tensorflow/lite/micro/kernels/micro_utils.h deleted file mode 100644 index 85db263..0000000 --- a/src/tensorflow/lite/micro/kernels/micro_utils.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ -namespace tflite { -namespace ops { -namespace micro { - -// Same as gtl::Greater but defined here to reduce dependencies and -// binary size for micro environment. -struct Greater { - template - bool operator()(const T& x, const T& y) const { - return x > y; - } -}; - -struct Less { - template - bool operator()(const T& x, const T& y) const { - return x < y; - } -}; - -} // namespace micro -} // namespace ops -} // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ diff --git a/src/tensorflow/lite/micro/kernels/mirror_pad.cpp b/src/tensorflow/lite/micro/kernels/mirror_pad.cpp new file mode 100644 index 0000000..aa94e1b --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/mirror_pad.cpp @@ -0,0 +1,216 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +struct OpDataMirrorPad { + int input_dims; + int output_size; + int offset; + int output_dims_num_elements_buffer_index; + int input_dims_num_elements_buffer_index; +}; + +// Helper method that fills the left and right pads. +template +inline void GetPadding(const T* data, int offset, int64_t* left_pad, + int64_t* right_pad) { + *left_pad = static_cast(*(data + offset * 2)); + *right_pad = static_cast(*(data + offset * 2 + 1)); +} + +// Given dimension index and the left/right padding. +// Returns the corresponding dimension in the input array. +inline int GetInputDimension(int padded_dimension, int left_pad, int right_pad, + int input_dim_size, int offset) { + if (padded_dimension < left_pad) { + const int original_ind = left_pad + offset - 1; + return original_ind - (std::min(padded_dimension, original_ind - offset)); + } + padded_dimension -= left_pad; + if (padded_dimension >= input_dim_size) { + padded_dimension -= input_dim_size; + const int original_ind = input_dim_size - (1 + offset); + return original_ind - std::min(padded_dimension, original_ind); + } + return padded_dimension; +} + +// Given and index in output array, returns the index of the value +// in input array. +int GetFlatIndex(int index, int num_dims, + const TfLiteEvalTensor* padding_matrix, + const TfLiteIntArray* input_dims, + int* output_dims_num_elements, int* input_dims_num_elements, + const int offset) { + int flat_index = 0; + int64_t left_pad = 0, right_pad = 0, dimension_index, index_in_input; + + for (int i = 0; i < num_dims; ++i) { + switch (padding_matrix->type) { + case kTfLiteInt32: + GetPadding(padding_matrix->data.i32, i, &left_pad, &right_pad); + break; + case kTfLiteInt64: + GetPadding(padding_matrix->data.i64, i, &left_pad, &right_pad); + break; + default: + break; + } + dimension_index = index / output_dims_num_elements[i]; + + index_in_input = GetInputDimension(dimension_index, left_pad, right_pad, + input_dims->data[i], offset); + + flat_index += index_in_input * (input_dims_num_elements)[i]; + index %= output_dims_num_elements[i]; + } + + return flat_index; +} + +template +void MirrorPad(const TfLiteEvalTensor* padding_matrix, + const TfLiteIntArray* input_dims, int* output_dims_num_elements, + int* input_dims_num_elements, const T* input_data, + T* output_data, const int offset, const int num_dims, + const int output_size) { + for (int i = 0; i < output_size; ++i) { + output_data[i] = input_data[GetFlatIndex( + i, num_dims, padding_matrix, input_dims, output_dims_num_elements, + input_dims_num_elements, offset)]; + } +} + +TfLiteStatus MirrorPadEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TfLiteStatus status = kTfLiteOk; + const OpDataMirrorPad* data = + static_cast(node->user_data); + + const TfLiteEvalTensor* input_tensor = + tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* padding_matrix = + tflite::micro::GetEvalInput(context, node, 1); + + TfLiteEvalTensor* output_tensor = + tflite::micro::GetEvalOutput(context, node, 0); + const int input_dims = data->input_dims; + const int output_size = data->output_size; + + int* input_dims_num_elements = (int*)context->GetScratchBuffer( + context, data->input_dims_num_elements_buffer_index); + int* output_dims_num_elements = (int*)context->GetScratchBuffer( + context, data->output_dims_num_elements_buffer_index); + + for (int i = 0; i < input_dims; i++) { + output_dims_num_elements[i] = 1; + input_dims_num_elements[i] = 1; + } + + for (int i = input_dims - 2; i >= 0; i--) { + output_dims_num_elements[i] = + output_dims_num_elements[i + 1] * output_tensor->dims->data[i + 1]; + + input_dims_num_elements[i] = + input_dims_num_elements[i + 1] * input_tensor->dims->data[i + 1]; + } + + switch (output_tensor->type) { + case kTfLiteFloat32: { + MirrorPad(padding_matrix, input_tensor->dims, output_dims_num_elements, + input_dims_num_elements, + tflite::micro::GetTensorData(input_tensor), + tflite::micro::GetTensorData(output_tensor), + data->offset, input_dims, output_size); + break; + } + case kTfLiteInt8: { + MirrorPad(padding_matrix, input_tensor->dims, output_dims_num_elements, + input_dims_num_elements, + tflite::micro::GetTensorData(input_tensor), + tflite::micro::GetTensorData(output_tensor), + data->offset, input_dims, output_size); + break; + } + default: + status = kTfLiteError; + break; + } + +#undef TF_LITE_MIRROR_PAD + + return status; +} + +void* MirrorPadInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataMirrorPad)); +} + +TfLiteStatus MirrorPadPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataMirrorPad* data = static_cast(node->user_data); + + TfLiteTensor* input_tensor = micro_context->AllocateTempInputTensor(node, 0); + TfLiteTensor* padding_matrix = + micro_context->AllocateTempInputTensor(node, 1); + TfLiteTensor* output_tensor = + micro_context->AllocateTempOutputTensor(node, 0); + + TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2); + TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0), + NumDimensions(input_tensor)); + auto* params = + reinterpret_cast(node->builtin_data); + if (params == nullptr) { + return kTfLiteError; + } + + data->offset = + params->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect ? 0 + : 1; + data->input_dims = NumDimensions(input_tensor); + data->output_size = NumElements(output_tensor); + + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, data->input_dims * sizeof(int), + &data->output_dims_num_elements_buffer_index)); + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, data->input_dims * sizeof(int), + &data->input_dims_num_elements_buffer_index)); + + micro_context->DeallocateTempTfLiteTensor(input_tensor); + micro_context->DeallocateTempTfLiteTensor(padding_matrix); + micro_context->DeallocateTempTfLiteTensor(output_tensor); + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_MIRROR_PAD() { + return tflite::micro::RegisterOp(MirrorPadInit, MirrorPadPrepare, + MirrorPadEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/mul.h b/src/tensorflow/lite/micro/kernels/mul.h new file mode 100644 index 0000000..9ae2d89 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/mul.h @@ -0,0 +1,74 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +extern const int kMulInput1Tensor; +extern const int kMulInput2Tensor; +extern const int kMulOutputTensor; + +struct OpDataMul { + int32_t input1_zero_point; + int32_t input2_zero_point; + + int32_t output_activation_min; + int32_t output_activation_max; + int32_t output_zero_point; + int32_t output_multiplier; + int output_shift; + + float output_activation_min_f32; + float output_activation_max_f32; +}; + +void* MulInit(TfLiteContext* context, const char* buffer, size_t length); + +TfLiteStatus CalculateOpDataMul(TfLiteContext* context, TfLiteNode* node, + TfLiteMulParams* params, OpDataMul* data); + +TfLiteStatus MulPrepare(TfLiteContext* context, TfLiteNode* node); + +TfLiteStatus EvalMulQuantizedReference(TfLiteContext* context, TfLiteNode* node, + const OpDataMul* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output); + +void EvalMulFloatReference(TfLiteContext* context, TfLiteNode* node, + TfLiteMulParams* params, const OpDataMul* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output); + +// Generic must define registration function. +TFLMRegistration Register_MUL(); + +#if defined(ARDUINO) +TFLMRegistration Register_MUL_INT8(); +#else +// Fallback registration +inline TFLMRegistration Register_MUL_INT8() { return Register_MUL(); } +#endif +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ diff --git a/src/tensorflow/lite/micro/kernels/mul_common.cpp b/src/tensorflow/lite/micro/kernels/mul_common.cpp new file mode 100644 index 0000000..45e7c1e --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/mul_common.cpp @@ -0,0 +1,213 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" +#include "tensorflow/lite/kernels/internal/reference/mul.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/mul.h" +#include "tensorflow/lite/micro/memory_helpers.h" + +namespace tflite { + +const int kMulInput1Tensor = 0; +const int kMulInput2Tensor = 1; +const int kMulOutputTensor = 0; + +void* MulInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataMul)); +} + +TfLiteStatus CalculateOpDataMul(TfLiteContext* context, TfLiteNode* node, + TfLiteMulParams* params, OpDataMul* data) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kMulInput1Tensor); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kMulInput2Tensor); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kMulOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + + double real_multiplier = static_cast(input1->params.scale) * + static_cast(input2->params.scale) / + static_cast(output->params.scale); + QuantizeMultiplier(real_multiplier, &data->output_multiplier, + &data->output_shift); + + data->input1_zero_point = input1->params.zero_point; + data->input2_zero_point = input2->params.zero_point; + data->output_zero_point = output->params.zero_point; + + if (input1->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, data->input1_zero_point, 0); + TF_LITE_ENSURE_EQ(context, data->input2_zero_point, 0); + TF_LITE_ENSURE_EQ(context, data->output_zero_point, 0); + } + } else if (output->type == kTfLiteInt32) { + CalculateActivationRange(params->activation, &data->output_activation_min, + &data->output_activation_max); + } else { + CalculateActivationRange(params->activation, + &data->output_activation_min_f32, + &data->output_activation_max_f32); + } + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus MulPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataMul* data = static_cast(node->user_data); + + return CalculateOpDataMul(context, node, params, data); +} + +TfLiteStatus EvalMulQuantizedReference(TfLiteContext* context, TfLiteNode* node, + const OpDataMul* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + op_params.quantized_activation_min = data->output_activation_min; + op_params.quantized_activation_max = data->output_activation_max; + op_params.float_activation_max = data->output_activation_max_f32; + op_params.input1_offset = -data->input1_zero_point; + op_params.input2_offset = -data->input2_zero_point; + op_params.output_offset = data->output_zero_point; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (input1->type == kTfLiteInt8) { + if (need_broadcast) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_integer_ops::Mul(op_params, + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } else if (input1->type == kTfLiteInt32) { + if (need_broadcast) { + reference_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Mul(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } else if (input1->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, op_params.input1_offset, 0); + TF_LITE_ENSURE_EQ(context, op_params.input2_offset, 0); + TF_LITE_ENSURE_EQ(context, op_params.output_offset, 0); + + if (need_broadcast) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_integer_ops::Mul(op_params, + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } + return kTfLiteOk; +} + +void EvalMulFloatReference(TfLiteContext* context, TfLiteNode* node, + TfLiteMulParams* params, const OpDataMul* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + op_params.float_activation_min = data->output_activation_min_f32; + op_params.float_activation_max = data->output_activation_max_f32; + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + reference_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Mul(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/neg.cpp b/src/tensorflow/lite/micro/kernels/neg.cpp index 74a95ca..a76ac01 100644 --- a/src/tensorflow/lite/micro/kernels/neg.cpp +++ b/src/tensorflow/lite/micro/kernels/neg.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,16 +18,16 @@ limitations under the License. #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace neg { + +namespace { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus NegEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); TfLiteEvalTensor* output = @@ -41,26 +41,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); return kTfLiteError; } return kTfLiteOk; } -} // namespace neg +} // namespace -TfLiteRegistration Register_NEG() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/neg::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_NEG() { + return tflite::micro::RegisterOp(nullptr, nullptr, NegEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/pack.cpp b/src/tensorflow/lite/micro/kernels/pack.cpp index d332fc6..f254329 100644 --- a/src/tensorflow/lite/micro/kernels/pack.cpp +++ b/src/tensorflow/lite/micro/kernels/pack.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,11 +17,10 @@ limitations under the License. #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace pack { + namespace { constexpr int kOutputTensor = 0; @@ -70,7 +69,7 @@ TfLiteStatus PackImpl(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus PackEval(TfLiteContext* context, TfLiteNode* node) { const TfLitePackParams* data = reinterpret_cast(node->builtin_data); @@ -82,10 +81,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return PackImpl(context, node, output, data->values_count, data->axis); } - case kTfLiteUInt8: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } case kTfLiteInt8: { return PackImpl(context, node, output, data->values_count, data->axis); @@ -99,8 +94,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { data->axis); } default: { - TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by pack.", - TfLiteTypeGetName(output->type)); + MicroPrintf("Type '%s' is not supported by pack.", + TfLiteTypeGetName(output->type)); return kTfLiteError; } } @@ -109,19 +104,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } } // namespace -} // namespace pack - -TfLiteRegistration Register_PACK() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/pack::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + +TFLMRegistration Register_PACK() { + return tflite::micro::RegisterOp(nullptr, nullptr, PackEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/pad.cpp b/src/tensorflow/lite/micro/kernels/pad.cpp index 5d9d436..29f08fa 100644 --- a/src/tensorflow/lite/micro/kernels/pad.cpp +++ b/src/tensorflow/lite/micro/kernels/pad.cpp @@ -18,16 +18,13 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace pad { namespace { struct OpData { @@ -35,27 +32,115 @@ struct OpData { int32_t output_zero_point; }; -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* PadInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(OpData)); } -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus PadEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const OpData* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, /*index=*/0); + const TfLiteEvalTensor* constant_values = + NumInputs(node) == 3 + ? tflite::micro::GetEvalInput(context, node, /*index=*/2) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, /*index=*/0); + + switch (input->type) { + case kTfLiteFloat32: { + float pad_value = + constant_values == nullptr + ? 0.f + : *tflite::micro::GetTensorData(constant_values); + if (data->params.resizing_category == ResizingCategory::kImageStyle) { + reference_ops::PadImageStyle( + data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), &pad_value, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + &pad_value, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } break; + case kTfLiteInt8: { + int8_t pad_value; + if (constant_values == nullptr) { + pad_value = static_cast(data->output_zero_point); + } else { + pad_value = *tflite::micro::GetTensorData(constant_values); + } + if (data->params.resizing_category == ResizingCategory::kImageStyle) { + reference_ops::PadImageStyle( + data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), &pad_value, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + &pad_value, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } break; + case kTfLiteInt16: { + int16_t pad_value = + constant_values == nullptr + ? 0 + : *tflite::micro::GetTensorData(constant_values); + reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + &pad_value, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } break; + case kTfLiteInt32: { + int32_t pad_value = + constant_values == nullptr + ? 0 + : *tflite::micro::GetTensorData(constant_values); + reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + &pad_value, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } break; + default: + + MicroPrintf("Type %s not currently supported by Pad.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteStatus PadPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TFLITE_DCHECK(node->user_data != nullptr); OpData* data = static_cast(node->user_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, /*index=*/0); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, /*index=*/0); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* paddings = GetInput(context, node, /*index=*/1); + TfLiteTensor* paddings = + micro_context->AllocateTempInputTensor(node, /*index=*/1); TF_LITE_ENSURE(context, paddings != nullptr); - const TfLiteTensor* constant_values = - NumInputs(node) == 3 ? GetInput(context, node, /*index=*/2) : nullptr; - TfLiteTensor* output = GetOutput(context, node, /*index=*/0); + TfLiteTensor* constant_values = + NumInputs(node) == 3 + ? micro_context->AllocateTempInputTensor(node, /*index=*/2) + : nullptr; + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, /*index=*/0); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_EQ(context, input->type, output->type); @@ -103,21 +188,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { data->params.right_padding[idx] = paddings_data[idx * 2 + 1]; } - if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { + if (input->type == kTfLiteInt8) { if (constant_values == nullptr) { // Quantized Pad requires that 0 is represented in the quantized // range. - if (input->type == kTfLiteUInt8) { - TF_LITE_ENSURE(context, output->params.zero_point >= - std::numeric_limits::min()); - TF_LITE_ENSURE(context, output->params.zero_point <= - std::numeric_limits::max()); - } else { - TF_LITE_ENSURE(context, output->params.zero_point >= - std::numeric_limits::min()); - TF_LITE_ENSURE(context, output->params.zero_point <= - std::numeric_limits::max()); - } + TF_LITE_ENSURE(context, output->params.zero_point >= + std::numeric_limits::min()); + TF_LITE_ENSURE(context, output->params.zero_point <= + std::numeric_limits::max()); } else { // Quantized Pad requires that 'constant_values' is represented in the // same quantized range as the input and output tensors. @@ -129,126 +207,23 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { data->output_zero_point = output->params.zero_point; } - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, /*index=*/0); - const TfLiteEvalTensor* constant_values = - NumInputs(node) == 3 - ? tflite::micro::GetEvalInput(context, node, /*index=*/2) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, /*index=*/0); - - switch (input->type) { - case kTfLiteFloat32: { - float pad_value = - constant_values == nullptr - ? 0.f - : *tflite::micro::GetTensorData(constant_values); - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteUInt8: { - uint8_t pad_value; - if (constant_values == nullptr) { - pad_value = static_cast(data->output_zero_point); - } else { - pad_value = *tflite::micro::GetTensorData(constant_values); - } - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteInt8: { - int8_t pad_value; - if (constant_values == nullptr) { - pad_value = static_cast(data->output_zero_point); - } else { - pad_value = *tflite::micro::GetTensorData(constant_values); - } - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteInt32: { - int32_t pad_value = - constant_values == nullptr - ? 0 - : *tflite::micro::GetTensorData(constant_values); - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - default: - - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported by Pad.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(paddings); + if (constant_values != nullptr) { + micro_context->DeallocateTempTfLiteTensor(constant_values); } -#undef TF_LITE_PAD + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } -} // namespace pad - -TfLiteRegistration Register_PAD() { - return {/*init=*/pad::Init, - /*free=*/nullptr, - /*prepare=*/pad::Prepare, - /*invoke=*/pad::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_PAD() { + return tflite::micro::RegisterOp(PadInit, PadPrepare, PadEval); } // Also register Pad as PadV2. -TfLiteRegistration Register_PADV2() { - return {/*init=*/pad::Init, - /*free=*/nullptr, - /*prepare=*/pad::Prepare, - /*invoke=*/pad::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_PADV2() { + return tflite::micro::RegisterOp(PadInit, PadPrepare, PadEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/pad.h b/src/tensorflow/lite/micro/kernels/pad.h new file mode 100644 index 0000000..ad90890 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/pad.h @@ -0,0 +1,27 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_ + +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +TfLiteStatus PadPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_ diff --git a/src/tensorflow/lite/micro/kernels/pooling.h b/src/tensorflow/lite/micro/kernels/pooling.h new file mode 100644 index 0000000..255fd47 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/pooling.h @@ -0,0 +1,142 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" +#include "tensorflow/lite/kernels/internal/reference/pooling.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/micro_ops.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +extern const int kPoolingInputTensor; +extern const int kPoolingOutputTensor; + +struct OpDataPooling { + TfLitePaddingValues padding; + int32_t activation_min; + int32_t activation_max; + float activation_min_f32; + float activation_max_f32; +}; + +TfLiteStatus CalculateOpDataPooling(const TfLiteContext* context, + const TfLitePoolParams* params, + const TfLiteTensor* input, + const TfLiteTensor* output, + OpDataPooling* data); + +TfLiteStatus PoolingPrepare(TfLiteContext* context, TfLiteNode* node); + +void AveragePoolingEvalFloat(const TfLiteContext* context, + const TfLiteNode* node, + const TfLitePoolParams* params, + const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output); + +template +void AveragePoolingEvalQuantized(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, + const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + TFLITE_DCHECK(input->type == kTfLiteInt8 || input->type == kTfLiteInt16); + + PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.quantized_activation_min = data->activation_min; + op_params.quantized_activation_max = data->activation_max; + + reference_integer_ops::AveragePool(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +void MaxPoolingEvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output); + +template +void MaxPoolingEvalQuantized(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, + const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + TFLITE_DCHECK(input->type == kTfLiteInt8 || input->type == kTfLiteInt16); + + tflite::PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.quantized_activation_min = data->activation_min; + op_params.quantized_activation_max = data->activation_max; + + reference_integer_ops::MaxPool(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +#if defined(ARDUINO) || defined(XTENSA) +TFLMRegistration Register_AVERAGE_POOL_2D_INT8(); + +TFLMRegistration Register_MAX_POOL_2D_INT8(); + +TFLMRegistration Register_AVERAGE_POOL_2D_INT16(); + +TFLMRegistration Register_MAX_POOL_2D_INT16(); +#else +inline TFLMRegistration Register_AVERAGE_POOL_2D_INT8() { + return tflite::Register_AVERAGE_POOL_2D(); +} + +inline TFLMRegistration Register_MAX_POOL_2D_INT8() { + return tflite::Register_MAX_POOL_2D(); +} + +inline TFLMRegistration Register_AVERAGE_POOL_2D_INT16() { + return tflite::Register_AVERAGE_POOL_2D(); +} + +inline TFLMRegistration Register_MAX_POOL_2D_INT16() { + return tflite::Register_MAX_POOL_2D(); +} +#endif +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ diff --git a/src/tensorflow/lite/micro/kernels/pooling_common.cpp b/src/tensorflow/lite/micro/kernels/pooling_common.cpp new file mode 100644 index 0000000..b39e9d8 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/pooling_common.cpp @@ -0,0 +1,128 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" +#include "tensorflow/lite/kernels/internal/reference/pooling.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/pooling.h" + +namespace tflite { + +const int kPoolingInputTensor = 0; +const int kPoolingOutputTensor = 0; + +TfLiteStatus CalculateOpDataPooling(const TfLiteContext* context, + const TfLitePoolParams* params, + const TfLiteTensor* input, + const TfLiteTensor* output, + OpDataPooling* data) { + // input: batch, height, width, channel + int height = SizeOfDimension(input, 1); + int width = SizeOfDimension(input, 2); + + int out_height, out_width; + + data->padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + /*dilation_rate_height=*/1, + /*dilation_rate_width=*/1, height, width, params->filter_height, + params->filter_width, params->padding, &out_height, &out_width); + + return kTfLiteOk; +} + +TfLiteStatus PoolingPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataPooling* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kPoolingInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kPoolingOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_STATUS( + CalculateOpDataPooling(context, params, input, output, data)); + + if (input->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, &data->activation_min_f32, + &data->activation_max_f32); + } else if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + CalculateActivationRangeQuantized(context, params->activation, output, + &data->activation_min, + &data->activation_max); + } else { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +void AveragePoolingEvalFloat(const TfLiteContext* context, + const TfLiteNode* node, + const TfLitePoolParams* params, + const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.float_activation_min = data->activation_min_f32; + op_params.float_activation_max = data->activation_max_f32; + reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +void MaxPoolingEvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + tflite::PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.float_activation_min = data->activation_min_f32; + op_params.float_activation_max = data->activation_max_f32; + reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/prelu.cpp b/src/tensorflow/lite/micro/kernels/prelu.cpp index b48491d..66a017b 100644 --- a/src/tensorflow/lite/micro/kernels/prelu.cpp +++ b/src/tensorflow/lite/micro/kernels/prelu.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,88 +22,16 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/prelu.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace activations { -namespace { - -TfLiteStatus CalculatePreluParams(const TfLiteTensor* input, - const TfLiteTensor* alpha, - TfLiteTensor* output, PreluParams* params) { - if (output->type == kTfLiteInt8 || output->type == kTfLiteUInt8 || - output->type == kTfLiteInt16) { - double real_multiplier_1 = static_cast(input->params.scale) / - static_cast(output->params.scale); - double real_multiplier_2 = static_cast(input->params.scale) * - static_cast(alpha->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier_1, ¶ms->output_multiplier_1, - ¶ms->output_shift_1); - QuantizeMultiplier(real_multiplier_2, ¶ms->output_multiplier_2, - ¶ms->output_shift_2); - - params->input_offset = -input->params.zero_point; - params->alpha_offset = -alpha->params.zero_point; - params->output_offset = output->params.zero_point; - } - - return kTfLiteOk; -} - -} // namespace - -inline void BroadcastPrelu4DSlowFloat( - const RuntimeShape& unextended_input1_shape, const float* input1_data, - const RuntimeShape& unextended_input2_shape, const float* input2_data, - const RuntimeShape& unextended_output_shape, float* output_data) { - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, - unextended_input2_shape, &desc1, &desc2); - - for (int b = 0; b < output_shape.Dims(0); ++b) { - for (int y = 0; y < output_shape.Dims(1); ++y) { - for (int x = 0; x < output_shape.Dims(2); ++x) { - for (int c = 0; c < output_shape.Dims(3); ++c) { - auto out_idx = Offset(output_shape, b, y, x, c); - auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); - auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); - auto in1_val = input1_data[in1_idx]; - auto in2_val = input2_data[in2_idx]; - output_data[out_idx] = in1_val >= 0.0f ? in1_val : in1_val * in2_val; - } - } - } - } -} void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(PreluParams)); } -TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - PreluParams* params = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* alpha = GetInput(context, node, 1); - TF_LITE_ENSURE(context, alpha != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - return CalculatePreluParams(input, alpha, output, params); -} - TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); const PreluParams& params = @@ -123,16 +51,6 @@ TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); return kTfLiteOk; } break; - case kTfLiteUInt8: { - reference_ops::BroadcastPrelu4DSlow( - params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; case kTfLiteInt8: { reference_ops::BroadcastPrelu4DSlow( params, tflite::micro::GetTensorShape(input), @@ -144,26 +62,14 @@ TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; default: - TF_LITE_KERNEL_LOG( - context, "Only float32 and uint8_t are supported currently, got %d.", - TfLiteTypeGetName(input->type)); + MicroPrintf("Only float32 and uint8_t are supported currently, got %d.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } } -} // namespace activations - -TfLiteRegistration Register_PRELU() { - return {/*init=*/activations::PreluInit, - /*free=*/nullptr, - /*prepare=*/activations::PreluPrepare, - /*invoke=*/activations::PreluEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_PRELU() { + return tflite::micro::RegisterOp(PreluInit, PreluPrepare, PreluEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/prelu.h b/src/tensorflow/lite/micro/kernels/prelu.h new file mode 100644 index 0000000..571d1e8 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/prelu.h @@ -0,0 +1,39 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +TfLiteStatus CalculatePreluParams(const TfLiteTensor* input, + const TfLiteTensor* alpha, + TfLiteTensor* output, PreluParams* params); + +void BroadcastPrelu4DSlowFloat(const RuntimeShape& unextended_input1_shape, + const float* input1_data, + const RuntimeShape& unextended_input2_shape, + const float* input2_data, + const RuntimeShape& unextended_output_shape, + float* output_data); + +TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_ diff --git a/src/tensorflow/lite/micro/kernels/prelu_common.cpp b/src/tensorflow/lite/micro/kernels/prelu_common.cpp new file mode 100644 index 0000000..1a89cad --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/prelu_common.cpp @@ -0,0 +1,105 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/prelu.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/prelu.h" + +namespace tflite { + +TfLiteStatus CalculatePreluParams(const TfLiteTensor* input, + const TfLiteTensor* alpha, + TfLiteTensor* output, PreluParams* params) { + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + double real_multiplier_1 = static_cast(input->params.scale) / + static_cast(output->params.scale); + double real_multiplier_2 = static_cast(input->params.scale) * + static_cast(alpha->params.scale) / + static_cast(output->params.scale); + QuantizeMultiplier(real_multiplier_1, ¶ms->output_multiplier_1, + ¶ms->output_shift_1); + QuantizeMultiplier(real_multiplier_2, ¶ms->output_multiplier_2, + ¶ms->output_shift_2); + + params->input_offset = -input->params.zero_point; + params->alpha_offset = -alpha->params.zero_point; + params->output_offset = output->params.zero_point; + } + + return kTfLiteOk; +} + +void BroadcastPrelu4DSlowFloat(const RuntimeShape& unextended_input1_shape, + const float* input1_data, + const RuntimeShape& unextended_input2_shape, + const float* input2_data, + const RuntimeShape& unextended_output_shape, + float* output_data) { + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + NdArrayDesc<4> desc1; + NdArrayDesc<4> desc2; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, + unextended_input2_shape, &desc1, &desc2); + + for (int b = 0; b < output_shape.Dims(0); ++b) { + for (int y = 0; y < output_shape.Dims(1); ++y) { + for (int x = 0; x < output_shape.Dims(2); ++x) { + for (int c = 0; c < output_shape.Dims(3); ++c) { + auto out_idx = Offset(output_shape, b, y, x, c); + auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); + auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); + auto in1_val = input1_data[in1_idx]; + auto in2_val = input2_data[in2_idx]; + output_data[out_idx] = in1_val >= 0.0f ? in1_val : in1_val * in2_val; + } + } + } + } +} + +TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + PreluParams* params = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* alpha = micro_context->AllocateTempInputTensor(node, 1); + TF_LITE_ENSURE(context, alpha != nullptr); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_OK(context, + CalculatePreluParams(input, alpha, output, params)); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(alpha); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/quantize.cpp b/src/tensorflow/lite/micro/kernels/quantize.cpp index 8b9bf7e..ba11f19 100644 --- a/src/tensorflow/lite/micro/kernels/quantize.cpp +++ b/src/tensorflow/lite/micro/kernels/quantize.cpp @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/quantize.h" + +#include "tensorflow/lite/micro/kernels/quantize.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/requantize.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" @@ -25,174 +25,18 @@ limitations under the License. namespace tflite { namespace { -struct OpData { - tflite::QuantizationParams quantization_params; - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - int32_t input_zero_point; -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* InitQuantizeReference(TfLiteContext* context, const char* buffer, + size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - // TODO(b/128934713): Add support for fixed-point per-channel quantization. - // Currently this only support affine per-layer quantization. - TF_LITE_ENSURE_EQ(context, output->quantization.type, - kTfLiteAffineQuantization); - const auto* affine_quantization = - reinterpret_cast(output->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); - - TF_LITE_ENSURE(context, input->type == kTfLiteFloat32 || - input->type == kTfLiteInt16 || - input->type == kTfLiteInt8); - TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 || - output->type == kTfLiteInt8 || - output->type == kTfLiteInt16 || - output->type == kTfLiteInt32); - - if ((input->type == kTfLiteInt16 && output->type == kTfLiteInt8) || - (input->type == kTfLiteInt8 && output->type == kTfLiteInt8) || - (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) || - (input->type == kTfLiteInt16 && output->type == kTfLiteInt32)) { - double effective_scale = static_cast(input->params.scale) / - static_cast(output->params.scale); - - QuantizeMultiplier(effective_scale, &data->output_multiplier, - &data->output_shift); - } - - data->quantization_params.zero_point = output->params.zero_point; - data->quantization_params.scale = static_cast(output->params.scale); - - data->input_zero_point = input->params.zero_point; - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - if (input->type == kTfLiteFloat32) { - switch (output->type) { - case kTfLiteInt8: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteUInt8: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (input->type == kTfLiteInt16) { - size_t size = ElementCount(*input->dims); - switch (output->type) { - case kTfLiteInt8: - reference_ops::Requantize(tflite::micro::GetTensorData(input), - size, data->output_multiplier, - data->output_shift, data->input_zero_point, - data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::Requantize( - tflite::micro::GetTensorData(input), size, - data->output_multiplier, data->output_shift, data->input_zero_point, - data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - case kTfLiteInt32: - reference_ops::Requantize( - tflite::micro::GetTensorData(input), size, - data->output_multiplier, data->output_shift, data->input_zero_point, - data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (input->type == kTfLiteInt8) { - // Int8 to Int8 requantization, required if the input and output tensors - // have different scales and/or zero points. - size_t size = ElementCount(*input->dims); - switch (output->type) { - case kTfLiteInt8: - reference_ops::Requantize(tflite::micro::GetTensorData(input), - size, data->output_multiplier, - data->output_shift, data->input_zero_point, - data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else { - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - return kTfLiteOk; + return context->AllocatePersistentBuffer(context, + sizeof(OpDataQuantizeReference)); } } // namespace -TfLiteRegistration Register_QUANTIZE() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_QUANTIZE() { + return tflite::micro::RegisterOp( + InitQuantizeReference, PrepareQuantizeReference, EvalQuantizeReference); } } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/quantize.h b/src/tensorflow/lite/micro/kernels/quantize.h new file mode 100644 index 0000000..ba93809 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/quantize.h @@ -0,0 +1,37 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +struct OpDataQuantizeReference { + tflite::QuantizationParams quantization_params; + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t requantize_output_multiplier; + int requantize_output_shift; + + int32_t input_zero_point; +}; + +TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node); +TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, TfLiteNode* node); +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ diff --git a/src/tensorflow/lite/micro/kernels/quantize_common.cpp b/src/tensorflow/lite/micro/kernels/quantize_common.cpp new file mode 100644 index 0000000..cb04eaf --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/quantize_common.cpp @@ -0,0 +1,239 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/quantize.h" +#include "tensorflow/lite/kernels/internal/reference/requantize.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/quantize.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, + TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + auto* data = static_cast(node->user_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + // TODO(b/128934713): Add support for fixed-point per-channel quantization. + // Currently this only support affine per-layer quantization. + TF_LITE_ENSURE_EQ(context, output->quantization.type, + kTfLiteAffineQuantization); + const auto* affine_quantization = + reinterpret_cast(output->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); + + TF_LITE_ENSURE( + context, input->type == kTfLiteFloat32 || input->type == kTfLiteInt32 || + input->type == kTfLiteInt16 || input->type == kTfLiteInt8 || + input->type == kTfLiteUInt8); + TF_LITE_ENSURE(context, output->type == kTfLiteInt8 || + output->type == kTfLiteInt16 || + output->type == kTfLiteInt32 || + output->type == kTfLiteUInt8); + + if ((input->type == kTfLiteInt16 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && output->type == kTfLiteUInt8) || + (input->type == kTfLiteUInt8 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && output->type == kTfLiteInt16) || + (input->type == kTfLiteInt8 && output->type == kTfLiteInt32) || + (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) || + (input->type == kTfLiteInt16 && output->type == kTfLiteInt32) || + (input->type == kTfLiteInt32 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt32 && output->type == kTfLiteInt16)) { + double effective_scale = static_cast(input->params.scale) / + static_cast(output->params.scale); + + QuantizeMultiplier(effective_scale, &data->requantize_output_multiplier, + &data->requantize_output_shift); + } + + data->quantization_params.zero_point = output->params.zero_point; + data->quantization_params.scale = static_cast(output->params.scale); + + data->input_zero_point = input->params.zero_point; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + auto* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + if (input->type == kTfLiteFloat32) { + switch (output->type) { + case kTfLiteInt8: + reference_ops::AffineQuantize( + data->quantization_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::AffineQuantize( + data->quantization_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteInt32) { + size_t size = ElementCount(*input->dims); + switch (output->type) { + case kTfLiteInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteInt16) { + size_t size = ElementCount(*input->dims); + switch (output->type) { + case kTfLiteInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + case kTfLiteInt32: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteInt8) { + // Int8 to Int8 requantization, required if the input and output tensors + // have different scales and/or zero points. + size_t size = ElementCount(*input->dims); + switch (output->type) { + case kTfLiteInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteUInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt32: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteUInt8) { + size_t size = ElementCount(*input->dims); + switch (output->type) { + case kTfLiteInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else { + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/read_variable.cpp b/src/tensorflow/lite/micro/kernels/read_variable.cpp new file mode 100644 index 0000000..87e720d --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/read_variable.cpp @@ -0,0 +1,87 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_resource_variable.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +constexpr int kInputVariableId = 0; +constexpr int kOutputValue = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(NumInputs(node) == 1); + TFLITE_DCHECK(NumOutputs(node) == 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input_resource_id_tensor = + micro_context->AllocateTempInputTensor(node, kInputVariableId); + + TFLITE_DCHECK(input_resource_id_tensor != nullptr); + TFLITE_DCHECK(input_resource_id_tensor->type == kTfLiteResource); + TFLITE_DCHECK(NumElements(input_resource_id_tensor) == 1); + + micro_context->DeallocateTempTfLiteTensor(input_resource_id_tensor); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input_resource_id_tensor = + tflite::micro::GetEvalInput(context, node, kInputVariableId); + TFLITE_DCHECK(input_resource_id_tensor != nullptr); + + TfLiteEvalTensor* output_value = + tflite::micro::GetEvalOutput(context, node, kOutputValue); + TFLITE_DCHECK(output_value != nullptr); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + MicroResourceVariables* resources = graph_info.GetResourceVariables(); + if (resources == nullptr) { + MicroPrintf( + "READ_VARIABLE requires resource variables. Please create " + "ResourceVariables and pass it to the interpreter."); + return kTfLiteError; + } + TF_LITE_ENSURE_OK( + context, + resources->Read(input_resource_id_tensor->data.i32[0], output_value)); + return kTfLiteOk; +} + +} // namespace. + +TFLMRegistration Register_READ_VARIABLE() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/reduce.cpp b/src/tensorflow/lite/micro/kernels/reduce.cpp index 8c60269..a689d3b 100644 --- a/src/tensorflow/lite/micro/kernels/reduce.cpp +++ b/src/tensorflow/lite/micro/kernels/reduce.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,320 +23,52 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/reduce.h" #include "tensorflow/lite/micro/micro_utils.h" namespace tflite { -namespace ops { -namespace micro { -namespace reduce { - -constexpr int kMaxNumberOfAxis = 4; -constexpr int kMaxNumberOfReducedAxis = 2; - -struct OpData { - int32_t multiplier; - int shift; - int temp_buffer_idx; - int resolved_axis_idx; - int input_zp; - float input_scale; - int output_zp; - float output_scale; - int num_output_elements; -}; void* InitReduce(TfLiteContext* context, const char* buffer, size_t length) { - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) { - // Inputs Tensor (dtype depends on quantization): - // [0] = Input - // [1] = Axis - const TfLiteTensor* input = GetInput(context, node, 0); - - // Outputs Tensor (dtype depends on quantization): - // [0] = Output - - // Validate number of inputs and outputs - TF_LITE_ENSURE_EQ(context, node->inputs->size, 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Validate axis type - const TfLiteTensor* axis = GetInput(context, node, 1); - TF_LITE_ENSURE(context, axis != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, axis->type, kTfLiteInt32); - - if (input->type == kTfLiteInt8) { - OpData* data = static_cast(node->user_data); - const TfLiteTensor* output = GetOutput(context, node, 0); - const double real_multiplier = static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &data->multiplier, &data->shift); - } - - return kTfLiteOk; + void* op_data = + context->AllocatePersistentBuffer(context, sizeof(OpDataReduce)); + return new (op_data) OpDataReduce(); } TfLiteStatus PrepareMax(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); - - OpData* op_data = static_cast(node->user_data); - const TfLiteTensor* input = GetInput(context, node, 0); - const TfLiteTensor* output = GetOutput(context, node, 0); - const TfLiteTensor* axis = GetInput(context, node, 1); - - op_data->input_scale = input->params.scale; - op_data->output_scale = output->params.scale; - op_data->num_output_elements = NumElements(output); - - context->RequestScratchBufferInArena(context, sizeof(int) * input->dims->size, - &op_data->temp_buffer_idx); - context->RequestScratchBufferInArena( - context, sizeof(int) * static_cast(ElementCount(*axis->dims)), - &op_data->resolved_axis_idx); - - return kTfLiteOk; + return PrepareMaxHelper(context, node, + static_cast(node->user_data)); } TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, 0); - OpData* op_data = reinterpret_cast(node->user_data); - const TfLiteTensor* output = GetOutput(context, node, 0); - if (input->type == kTfLiteInt8) { - const double real_multiplier = static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &op_data->multiplier, &op_data->shift); - } - - int output_size = NumElements(output); - if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { - context->RequestScratchBufferInArena(context, output_size * sizeof(int32_t), - &op_data->temp_buffer_idx); - op_data->input_zp = input->params.zero_point; - op_data->input_scale = input->params.scale; - op_data->output_zp = output->params.zero_point; - op_data->output_scale = output->params.scale; - } - - TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); - // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018) - return kTfLiteOk; -} - -void ResolveAxis(const int* axis_data, int axis_count, - tflite::MeanParams* op_params) { - int i = 0; - for (; i < axis_count; ++i) { - op_params->axis[i] = static_cast(axis_data[i]); - } - for (; i < 4; ++i) { - op_params->axis[i] = 1; - } - op_params->axis_count = axis_count; + return PrepareMeanOrSumHelper(context, node, + static_cast(node->user_data)); } TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TfLiteReducerParams* params = - reinterpret_cast(node->builtin_data); - OpData* op_data = reinterpret_cast(node->user_data); - - int num_axis = static_cast(ElementCount(*axis->dims)); - int temp_index[kMaxNumberOfAxis]; - int resolved_axis[kMaxNumberOfReducedAxis]; - - tflite::MeanParams op_params; - ResolveAxis(tflite::micro::GetTensorData(axis), num_axis, &op_params); - - // Special case mean implementation exists for 4D mean across axes 1 and 2. - bool special_case_4d_axes_1_and_2 = - input->dims->size == 4 && op_params.axis_count == 2 && - ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - - switch (input->type) { - case kTfLiteFloat32: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - TF_LITE_ENSURE( - context, - reference_ops::Mean( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, - tflite::micro::GetTensorData(output))); - } - } break; - case kTfLiteInt8: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_integer_ops::Mean( - op_params, op_data->multiplier, op_data->shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), op_data->input_zp, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), op_data->output_zp); - } else if (op_data->input_zp == op_data->output_zp && - op_data->input_scale == op_data->output_scale) { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::Mean( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, temp_buffer)); - } else { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale, output->dims->data, - output->dims->size, tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, resolved_axis, - temp_buffer, false)); - } - } break; - case kTfLiteUInt8: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - op_data->input_zp, op_data->input_scale, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale); - } else if (op_data->input_zp == op_data->output_zp && - op_data->input_scale == op_data->output_scale) { - uint32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::Mean(tflite::micro::GetTensorData(input), - input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, - resolved_axis, temp_buffer)); - } else { - uint32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale, output->dims->data, - output->dims->size, tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, resolved_axis, - temp_buffer, false)); - } - } break; - default: - TF_LITE_ENSURE_MSG(context, false, - "Currently, only float32, int8 or uint8 input type " - "is supported."); - } - return kTfLiteOk; + return EvalMeanHelper(context, node, + static_cast(node->user_data)); } TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TfLiteReducerParams* params = - static_cast(node->builtin_data); - OpData* op_data = static_cast(node->user_data); + OpDataReduce* op_data = static_cast(node->user_data); + return EvalMaxHelper(context, node, op_data); +} - // Interpret an axis tensor with null dimensions as a scalar - int num_axis = static_cast(ElementCount(*axis->dims)); - int* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - int* resolved_axis = static_cast( - context->GetScratchBuffer(context, op_data->resolved_axis_idx)); - switch (input->type) { - case kTfLiteFloat32: - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_buffer, resolved_axis, - std::numeric_limits::lowest(), - [](const float current, const float in) -> float { - return (in > current) ? in : current; - })); - break; - case kTfLiteInt8: - TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale), - static_cast(op_data->output_scale)); - TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp); - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_buffer, resolved_axis, - std::numeric_limits::lowest(), - [](const int8_t current, const int8_t in) -> int8_t { - return (in > current) ? in : current; - })); - break; - default: - TF_LITE_KERNEL_LOG(context, - "Only float32 and int8 types are supported.\n"); - return kTfLiteError; - } - return kTfLiteOk; +TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) { + return EvalSumHelper(context, node, + static_cast(node->user_data)); } -} // namespace reduce +TFLMRegistration Register_MEAN() { + return tflite::micro::RegisterOp(InitReduce, PrepareMeanOrSum, EvalMean); +} -TfLiteRegistration Register_MEAN() { - return {/*init=*/reduce::InitReduce, - /*free=*/nullptr, - /*prepare=*/reduce::PrepareMeanOrSum, - /*invoke=*/reduce::EvalMean, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_REDUCE_MAX() { + return tflite::micro::RegisterOp(InitReduce, PrepareMax, EvalMax); } -TfLiteRegistration Register_REDUCE_MAX() { - return {/*init=*/reduce::InitReduce, - /*free=*/nullptr, - /*prepare=*/reduce::PrepareMax, - /*invoke=*/reduce::EvalMax, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_SUM() { + return tflite::micro::RegisterOp(InitReduce, PrepareMeanOrSum, EvalSum); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/reduce.h b/src/tensorflow/lite/micro/kernels/reduce.h new file mode 100644 index 0000000..2daeef5 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/reduce.h @@ -0,0 +1,65 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_REDUCE_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_REDUCE_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +extern const int kMaxNumberOfAxis; +extern const int kMaxNumberOfReducedAxis; + +struct OpDataReduce { + int32_t multiplier; + int shift; + int temp_buffer_idx; + int resolved_axis_idx; + int input_zp; + float input_scale; + int output_zp; + float output_scale; + int num_output_elements; + int num_axis; +}; + +TfLiteStatus PrepareMaxHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); +TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); +TfLiteStatus EvalSumHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +void ReduceResolveAxis(const int* axis_data, int axis_count, + MeanParams* op_params); + +TFLMRegistration Register_MEAN(); +TFLMRegistration Register_REDUCE_MAX(); +TFLMRegistration Register_SUM(); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_REDUCE_H_ diff --git a/src/tensorflow/lite/micro/kernels/reduce_common.cpp b/src/tensorflow/lite/micro/kernels/reduce_common.cpp new file mode 100644 index 0000000..2c1a92a --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/reduce_common.cpp @@ -0,0 +1,340 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/mean.h" +#include "tensorflow/lite/kernels/internal/reference/reduce.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/reduce.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +const int kMaxNumberOfAxis = 5; +const int kMaxNumberOfReducedAxis = 2; + +TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node, + int32_t* multiplier, int* shift) { + MicroContext* micro_context = GetMicroContext(context); + + // Inputs Tensor (dtype depends on quantization): + // [0] = Input + // [1] = Axis + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + + // Outputs Tensor (dtype depends on quantization): + // [0] = Output + + // Validate number of inputs and outputs + TF_LITE_ENSURE_EQ(context, node->inputs->size, 2); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Validate axis type + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1); + TF_LITE_ENSURE(context, axis != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, axis->type, kTfLiteInt32); + + if (input->type == kTfLiteInt8) { + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + const double real_multiplier = static_cast(input->params.scale) / + static_cast(output->params.scale); + QuantizeMultiplier(real_multiplier, multiplier, shift); + micro_context->DeallocateTempTfLiteTensor(output); + } + micro_context->DeallocateTempTfLiteTensor(axis); + micro_context->DeallocateTempTfLiteTensor(input); + return kTfLiteOk; +} + +TfLiteStatus PrepareMaxHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + TF_LITE_ENSURE_OK(context, PrepareSimple(context, node, &op_data->multiplier, + &op_data->shift)); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1); + + op_data->input_zp = input->params.zero_point; + op_data->input_scale = input->params.scale; + op_data->output_zp = output->params.zero_point; + op_data->output_scale = output->params.scale; + op_data->num_output_elements = NumElements(output); + + context->RequestScratchBufferInArena(context, sizeof(int) * input->dims->size, + &op_data->temp_buffer_idx); + context->RequestScratchBufferInArena( + context, sizeof(int) * static_cast(ElementCount(*axis->dims)), + &op_data->resolved_axis_idx); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(axis); + return kTfLiteOk; +} + +TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1); + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + const double real_multiplier = static_cast(input->params.scale) / + static_cast(output->params.scale); + QuantizeMultiplier(real_multiplier, &op_data->multiplier, &op_data->shift); + } + + int output_size = NumElements(output); + op_data->num_axis = NumElements(axis); + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + context->RequestScratchBufferInArena(context, output_size * sizeof(int32_t), + &op_data->temp_buffer_idx); + op_data->input_zp = input->params.zero_point; + op_data->input_scale = input->params.scale; + op_data->output_zp = output->params.zero_point; + op_data->output_scale = output->params.scale; + } + + TF_LITE_ENSURE_OK( + context, + PrepareSimple(context, node, &(op_data->multiplier), &(op_data->shift))); + // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018) + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(axis); + return kTfLiteOk; +} + +void ResolveAxis(const int* axis_data, int axis_count, + tflite::MeanParams* op_params) { + int i = 0; + for (; i < axis_count; ++i) { + op_params->axis[i] = static_cast(axis_data[i]); + } + for (; i < 4; ++i) { + op_params->axis[i] = 1; + } + op_params->axis_count = axis_count; +} + +template +TfLiteStatus QuantizedMeanOrSum(TfLiteContext* context, TfLiteNode* node, + int* temp_index, int* resolved_axis, + int32_t* temp_sum, OpDataReduce* op_data, + bool compute_sum) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TfLiteReducerParams* params = + static_cast(node->builtin_data); + + bool result = reference_ops::QuantizedMeanOrSumExtraArgs( + tflite::micro::GetTensorData(input), op_data->input_zp, + op_data->input_scale, &input->dims->data[0], input->dims->size, + tflite::micro::GetTensorData(output), op_data->output_scale, + op_data->multiplier, op_data->shift, op_data->output_zp, + &output->dims->data[0], output->dims->size, + tflite::micro::GetTensorData(axis), op_data->num_axis, + params->keep_dims, temp_index, resolved_axis, temp_sum, compute_sum); + TF_LITE_ENSURE(context, result); + + return kTfLiteOk; +} + +template +TfLiteStatus EvalIntegerMean(TfLiteContext* context, TfLiteNode* node, + int num_axis, OpDataReduce* op_data, + int* temp_index, int* resolved_axis) { + int32_t* temp_sum = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + + QuantizedMeanOrSum(context, node, temp_index, resolved_axis, + temp_sum, op_data, /*compute_sum=*/false); + + return kTfLiteOk; +} + +TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TfLiteReducerParams* params = + reinterpret_cast(node->builtin_data); + + int num_axis = static_cast(ElementCount(*axis->dims)); + int temp_index[kMaxNumberOfAxis]; + int resolved_axis[kMaxNumberOfReducedAxis]; + + switch (input->type) { + case kTfLiteFloat32: { + tflite::MeanParams op_params; + ResolveAxis(tflite::micro::GetTensorData(axis), num_axis, + &op_params); + + // Special case mean implementation exists for 4D mean across axes 1 + // and 2. + bool special_case_4d_axes_1_and_2 = + input->dims->size == 4 && op_params.axis_count == 2 && + ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || + (op_params.axis[0] == 2 && op_params.axis[1] == 1)); + + // Defer to specialized implementation for 4D Mean across axes 1 & 2. + if (params->keep_dims && special_case_4d_axes_1_and_2) { + reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + TF_LITE_ENSURE( + context, + reference_ops::Mean( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_index, resolved_axis, + tflite::micro::GetTensorData(output))); + } + } break; + case kTfLiteInt8: { + TF_LITE_ENSURE_OK( + context, EvalIntegerMean(context, node, num_axis, op_data, + temp_index, resolved_axis)); + } break; + case kTfLiteInt16: { + TF_LITE_ENSURE_OK( + context, EvalIntegerMean(context, node, num_axis, op_data, + temp_index, resolved_axis)); + } break; + default: + TF_LITE_ENSURE_MSG(context, false, + "Currently, only float32, int8 or int16 input type " + "is supported."); + } + return kTfLiteOk; +} + +TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TfLiteReducerParams* params = + static_cast(node->builtin_data); + + // Interpret an axis tensor with null dimensions as a scalar + int num_axis = static_cast(ElementCount(*axis->dims)); + int* temp_buffer = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + int* resolved_axis = static_cast( + context->GetScratchBuffer(context, op_data->resolved_axis_idx)); + switch (input->type) { + case kTfLiteFloat32: + TF_LITE_ENSURE( + context, + reference_ops::ReduceGeneric( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_buffer, resolved_axis, + std::numeric_limits::lowest(), + [](const float current, const float in) -> float { + return (in > current) ? in : current; + })); + break; + case kTfLiteInt8: + TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale), + static_cast(op_data->output_scale)); + TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp); + TF_LITE_ENSURE( + context, + reference_ops::ReduceGeneric( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_buffer, resolved_axis, + std::numeric_limits::lowest(), + [](const int8_t current, const int8_t in) -> int8_t { + return (in > current) ? in : current; + })); + break; + default: + MicroPrintf("Only float32 and int8 types are supported."); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus EvalSumHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TfLiteReducerParams* params = + static_cast(node->builtin_data); + + // Interpret an axis tensor with null dimensions as a scalar. + int num_axis = static_cast(ElementCount(*axis->dims)); + int temp_index[kMaxNumberOfAxis]; + int resolved_axis[kMaxNumberOfReducedAxis]; + + switch (input->type) { + case kTfLiteFloat32: { + TF_LITE_ENSURE( + context, + reference_ops::ReduceGeneric( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_index, resolved_axis, /*init_value=*/0.f, + [](const float current, const float in) -> float { + return in + current; + })); + } break; + case kTfLiteInt8: { + int32_t* temp_sum = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + QuantizedMeanOrSum(context, node, temp_index, resolved_axis, + temp_sum, op_data, /*compute_sum=*/true); + } break; + case kTfLiteInt16: { + int32_t* temp_sum = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + QuantizedMeanOrSum(context, node, temp_index, resolved_axis, + temp_sum, op_data, /*compute_sum=*/true); + } break; + default: + MicroPrintf("Only float32, int8, and int16 types are supported."); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/reshape.cpp b/src/tensorflow/lite/micro/kernels/reshape.cpp index 8e47e2a..5527798 100644 --- a/src/tensorflow/lite/micro/kernels/reshape.cpp +++ b/src/tensorflow/lite/micro/kernels/reshape.cpp @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,6 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/lite/micro/kernels/reshape.h" + +#include + #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" @@ -23,66 +27,13 @@ limitations under the License. #include "tensorflow/lite/micro/micro_utils.h" namespace tflite { -namespace ops { -namespace micro { -namespace reshape { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus ReshapeOutput(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - // Tensorflow's Reshape allows one of the shape components to have the - // special -1 value, meaning it will be calculated automatically based on the - // input. Here we calculate what that dimension should be so that the number - // of output elements in the same as the number of input elements. - int num_input_elements = NumElements(input); - TfLiteIntArray* output_shape = output->dims; - - if (NumInputs(node) == 1 && // Legacy scalar supported with params. - output_shape->size == 1 && output_shape->data[0] == 0) { - // Legacy tflite models use a shape parameter of [0] to indicate scalars, - // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during - // toco conversion. - output_shape->size = 0; - } - - int num_output_elements = 1; - int stretch_dim = -1; - for (int i = 0; i < output_shape->size; ++i) { - int value = output_shape->data[i]; - if (value == -1) { - TF_LITE_ENSURE_EQ(context, stretch_dim, -1); - stretch_dim = i; - } else { - num_output_elements *= value; - } - } - if (stretch_dim != -1) { - output_shape->data[stretch_dim] = num_input_elements / num_output_elements; - num_output_elements *= output_shape->data[stretch_dim]; - } - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TF_LITE_ENSURE_EQ(context, ReshapeOutput(context, node), kTfLiteOk); - return kTfLiteOk; -} +namespace { -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus EvalReshapeReference(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kReshapeInputTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kReshapeOutputTensor); // TODO(b/162522304): storing input bytes in OpData increases some models // significantly, possibly due to alignment issues. @@ -93,26 +44,16 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Do nothing for in-place reshape. if (input->data.raw != output->data.raw) { // Otherwise perform reshape with copy. - for (size_t i = 0; i < input_bytes; ++i) { - output->data.raw[i] = input->data.raw[i]; - } + memcpy(output->data.raw, input->data.raw, input_bytes); } return kTfLiteOk; } -} // namespace reshape +} // namespace -TfLiteRegistration Register_RESHAPE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/reshape::Prepare, - /*invoke=*/reshape::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_RESHAPE() { + return tflite::micro::RegisterOp(nullptr, PrepareReshapeReference, + EvalReshapeReference); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/reshape.h b/src/tensorflow/lite/micro/kernels/reshape.h new file mode 100644 index 0000000..02bda32 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/reshape.h @@ -0,0 +1,26 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +constexpr int kReshapeInputTensor = 0; +constexpr int kReshapeOutputTensor = 0; + +TfLiteStatus PrepareReshapeReference(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/reshape_common.cpp b/src/tensorflow/lite/micro/kernels/reshape_common.cpp new file mode 100644 index 0000000..b86e2be --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/reshape_common.cpp @@ -0,0 +1,94 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/reshape.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace { + +TfLiteStatus ReshapeOutput(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kReshapeInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kReshapeOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + // Tensorflow's Reshape allows one of the shape components to have the + // special -1 value, meaning it will be calculated automatically based on the + // input. Here we calculate what that dimension should be so that the number + // of output elements in the same as the number of input elements. + int num_input_elements = NumElements(input); + TfLiteIntArray* output_shape = output->dims; + + if (NumInputs(node) == 1 && // Legacy scalar supported with params. + output_shape->size == 1 && output_shape->data[0] == 0) { + // Legacy tflite models use a shape parameter of [0] to indicate scalars, + // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during + // toco conversion. + output_shape->size = 0; + } + + int num_output_elements = 1; + int stretch_dim = -1; + for (int i = 0; i < output_shape->size; ++i) { + int value = output_shape->data[i]; + if (value == -1) { + TF_LITE_ENSURE_EQ(context, stretch_dim, -1); + stretch_dim = i; + } else { + num_output_elements *= value; + } + } + if (stretch_dim != -1) { + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kReshapeOutputTensor); + TF_LITE_ENSURE_STATUS(tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + output_shape = output->dims; // output tensor dims were moved + output_shape->data[stretch_dim] = num_input_elements / num_output_elements; + num_output_elements *= output_shape->data[stretch_dim]; + } + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +} // namespace + +TfLiteStatus PrepareReshapeReference(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TF_LITE_ENSURE_EQ(context, ReshapeOutput(context, node), kTfLiteOk); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/resize_bilinear.cpp b/src/tensorflow/lite/micro/kernels/resize_bilinear.cpp new file mode 100644 index 0000000..ab54e81 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/resize_bilinear.cpp @@ -0,0 +1,117 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/resize_bilinear.h" + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kSizeTensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus ResizeBilinearPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* size = + micro_context->AllocateTempInputTensor(node, kSizeTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); + TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); + + TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); + output->type = input->type; + + TF_LITE_ENSURE_MSG(context, IsConstantTensor(size), + "Non constant size tensor not supported"); + + // Ensure params are valid. + auto* params = + reinterpret_cast(node->builtin_data); + if (params->half_pixel_centers && params->align_corners) { + MicroPrintf("If half_pixel_centers is True, align_corners must be False."); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(size); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus ResizeBilinearEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* size = + tflite::micro::GetEvalInput(context, node, kSizeTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteFloat32) { + tflite::ResizeBilinearParams op_params; + op_params.align_corners = params->align_corners; + op_params.half_pixel_centers = params->half_pixel_centers; + reference_ops::ResizeBilinear(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(size), + tflite::micro::GetTensorData(size), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else if (output->type == kTfLiteInt8) { + tflite::ResizeBilinearParams op_params; + op_params.align_corners = params->align_corners; + op_params.half_pixel_centers = params->half_pixel_centers; + reference_ops::ResizeBilinearInteger( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(size), + tflite::micro::GetTensorData(size), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + MicroPrintf("Output type is %d, requires float or int8.", output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_RESIZE_BILINEAR() { + return tflite::micro::RegisterOp(nullptr, ResizeBilinearPrepare, + ResizeBilinearEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp b/src/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp index 971de83..ef2d35d 100644 --- a/src/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp +++ b/src/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,23 +21,29 @@ limitations under the License. #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace resize_nearest_neighbor { + +namespace { constexpr int kInputTensor = 0; constexpr int kSizeTensor = 1; constexpr int kOutputTensor = 0; -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus ResizeNearestNeighborPrepare(TfLiteContext* context, + TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* size = GetInput(context, node, kSizeTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* size = + micro_context->AllocateTempInputTensor(node, kSizeTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); // Our current implementations rely on the input being 4D, // and the size being 1D tensor with exactly 2 elements. @@ -49,13 +55,19 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { output->type = input->type; if (!IsConstantTensor(size)) { - TF_LITE_KERNEL_LOG(context, "Dynamic tensors are unsupported in tfmicro."); + MicroPrintf("Dynamic tensors are unsupported in tfmicro."); return kTfLiteError; } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(size); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus ResizeNearestNeighborEval(TfLiteContext* context, + TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); @@ -78,44 +90,37 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(size), tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteUInt8) { + } else if (output->type == kTfLiteInt8) { reference_ops::ResizeNearestNeighbor( op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(size), tflite::micro::GetTensorData(size), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteInt8) { + tflite::micro::GetTensorData(output)); + } else if (output->type == kTfLiteInt16) { reference_ops::ResizeNearestNeighbor( op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(size), tflite::micro::GetTensorData(size), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); + tflite::micro::GetTensorData(output)); } else { - TF_LITE_KERNEL_LOG(context, - "Output type is %d, requires float, uint8_t or int8_t.", - output->type); + MicroPrintf("Output tensor type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; } return kTfLiteOk; } -} // namespace resize_nearest_neighbor - -TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/resize_nearest_neighbor::Prepare, - /*invoke=*/resize_nearest_neighbor::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + +} // namespace + +TFLMRegistration Register_RESIZE_NEAREST_NEIGHBOR() { + return tflite::micro::RegisterOp(nullptr, ResizeNearestNeighborPrepare, + ResizeNearestNeighborEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/round.cpp b/src/tensorflow/lite/micro/kernels/round.cpp index 5804016..ae8e354 100644 --- a/src/tensorflow/lite/micro/kernels/round.cpp +++ b/src/tensorflow/lite/micro/kernels/round.cpp @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,17 +21,19 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/kernel_util.h" namespace tflite { -namespace ops { -namespace micro { -namespace round { +namespace { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); +TfLiteStatus RoundPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -42,10 +44,13 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < output->dims->size; ++i) { TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus RoundEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); TfLiteEvalTensor* output = @@ -58,19 +63,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } -} // namespace round +} // namespace -TfLiteRegistration Register_ROUND() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/round::Prepare, - /*invoke=*/round::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_ROUND() { + return tflite::micro::RegisterOp(nullptr, RoundPrepare, RoundEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/select.cpp b/src/tensorflow/lite/micro/kernels/select.cpp new file mode 100644 index 0000000..90e6413 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/select.cpp @@ -0,0 +1,196 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/select.h" + +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensorCondition = 0; +constexpr int kInputTensorX = 1; +constexpr int kInputTensorY = 2; +constexpr int kOutputTensor = 0; + +struct OpData { + bool requires_broadcast; +}; + +void* SelectInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + auto* data = static_cast( + context->AllocatePersistentBuffer(context, sizeof(OpData))); + data->requires_broadcast = false; + return data; +} + +TfLiteStatus CheckBroadcastShape(TfLiteContext* context, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + const TfLiteTensor* input3, + const TfLiteIntArray* output_shape) { + const int dims1 = NumDimensions(input1); + const int dims2 = NumDimensions(input2); + const int dims3 = NumDimensions(input3); + const int out_dims = std::max(std::max(dims1, dims2), dims3); + TF_LITE_ENSURE_EQ(context, out_dims, output_shape->size); + + for (int i = 0; i < out_dims; ++i) { + const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); + const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); + const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); + const int min_value = std::min(std::min(d1, d2), d3); + int max_value = std::max(std::max(d1, d2), d3); + // If one dimension is 0, others must be 0 or 1. + if (min_value == 0) max_value = 0; + if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) || + !(d3 == 1 || d3 == max_value)) { + MicroPrintf("Given shapes are not broadcastable."); + return kTfLiteError; + } + TF_LITE_ENSURE_EQ(context, output_shape->data[out_dims - i - 1], max_value); + } + return kTfLiteOk; +} + +TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) { + OpData* data = reinterpret_cast(node->user_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input_condition = + micro_context->AllocateTempInputTensor(node, kInputTensorCondition); + + TfLiteTensor* input_x = + micro_context->AllocateTempInputTensor(node, kInputTensorX); + + TfLiteTensor* input_y = + micro_context->AllocateTempInputTensor(node, kInputTensorY); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + // Input must be bool. + TF_LITE_ENSURE_TYPES_EQ(context, input_condition->type, kTfLiteBool); + TF_LITE_ENSURE_TYPES_EQ(context, input_x->type, input_y->type); + output->type = input_x->type; + + // Respect the original output shape when there are mixed shapes to represent + // a scalar data. + bool possible_mixed_scaler = + GetTensorShape(input_condition).FlatSize() == 1 && + GetTensorShape(input_x).FlatSize() == 1 && + GetTensorShape(input_y).FlatSize() == 1 && + GetTensorShape(output).FlatSize() == 1; + + bool same_shape = HaveSameShapes(input_condition, input_x) && + HaveSameShapes(input_x, input_y); + if (!same_shape && !possible_mixed_scaler) { + TF_LITE_ENSURE_OK( + context, CheckBroadcastShape(context, input_condition, input_x, input_y, + output->dims)); + data->requires_broadcast = true; + } + + micro_context->DeallocateTempTfLiteTensor(input_condition); + micro_context->DeallocateTempTfLiteTensor(input_x); + micro_context->DeallocateTempTfLiteTensor(input_y); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +template +void CallSelect(const TfLiteEvalTensor* input_condition, + const TfLiteEvalTensor* input_x, + const TfLiteEvalTensor* input_y, TfLiteEvalTensor* output, + bool need_broadcast) { + using Func = decltype(reference_ops::Select)*; + Func select_func; + if (need_broadcast) { + select_func = reference_ops::BroadcastSelect5DSlow; + } else { + select_func = reference_ops::Select; + } + + select_func(tflite::micro::GetTensorShape(input_condition), + tflite::micro::GetTensorData(input_condition), + tflite::micro::GetTensorShape(input_x), + tflite::micro::GetTensorData(input_x), + tflite::micro::GetTensorShape(input_y), + tflite::micro::GetTensorData(input_y), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) { + OpData* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input_condition = + tflite::micro::GetEvalInput(context, node, kInputTensorCondition); + + const TfLiteEvalTensor* input_x = + tflite::micro::GetEvalInput(context, node, kInputTensorX); + + const TfLiteEvalTensor* input_y = + tflite::micro::GetEvalInput(context, node, kInputTensorY); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (input_x->type) { + case kTfLiteFloat32: + CallSelect(input_condition, input_x, input_y, output, + data->requires_broadcast); + break; + case kTfLiteInt8: + CallSelect(input_condition, input_x, input_y, output, + data->requires_broadcast); + break; + case kTfLiteInt16: + CallSelect(input_condition, input_x, input_y, output, + data->requires_broadcast); + break; + default: + MicroPrintf("Does not support type other than %s, but got %s", + "int8|int16|float32", TfLiteTypeGetName(input_x->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +// SelectV2 op selects values of 'x' if the corresponding value of 'condition' +// is true or the value of 'y' if false. There are valid condition input sizes: +// +// 1. Either the same shape (in which case the select is elementwise), or +// 2. Broadcastable shapes between 'condition', 'x' and 'y'. +TFLMRegistration Register_SELECT_V2() { + return tflite::micro::RegisterOp(tflite::SelectInit, tflite::SelectPrepare, + tflite::SelectEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/shape.cpp b/src/tensorflow/lite/micro/kernels/shape.cpp old mode 100755 new mode 100644 index df962f6..d95e450 --- a/src/tensorflow/lite/micro/kernels/shape.cpp +++ b/src/tensorflow/lite/micro/kernels/shape.cpp @@ -20,6 +20,7 @@ limitations under the License. #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" #include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_utils.h" namespace tflite { @@ -34,21 +35,21 @@ void ExtractShape(const TfLiteEvalTensor* input, int32_t* output_data) { } } -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus ShapePrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus ShapeEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); if (output->type != kTfLiteInt32) { - TF_LITE_KERNEL_LOG(context, "Output type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); + MicroPrintf("Output type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); return kTfLiteError; } else { ExtractShape(input, tflite::micro::GetTensorData(output)); @@ -59,15 +60,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace -TfLiteRegistration Register_SHAPE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_SHAPE() { + return tflite::micro::RegisterOp(nullptr, ShapePrepare, ShapeEval); } } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/slice.cpp b/src/tensorflow/lite/micro/kernels/slice.cpp new file mode 100644 index 0000000..7963425 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/slice.cpp @@ -0,0 +1,164 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/slice.h" + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kBeginTensor = 1; +constexpr int kSizeTensor = 2; +constexpr int kOutputTensor = 0; + +const int kMaxDim = 5; + +template +void GetBeginAndSizeVectors(int dimensions, const TfLiteEvalTensor* begin, + const TfLiteEvalTensor* size, int32_t* begins, + int32_t* sizes) { + int offset = kMaxDim - dimensions; + for (int idx = 0; idx < dimensions; ++idx) { + begins[offset + idx] = tflite::micro::GetTensorData(begin)[idx]; + sizes[offset + idx] = tflite::micro::GetTensorData(size)[idx]; + } +} + +TfLiteStatus SlicePrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TFLITE_DCHECK(input != nullptr); + TfLiteTensor* begin = + micro_context->AllocateTempInputTensor(node, kBeginTensor); + TFLITE_DCHECK(begin != nullptr); + TfLiteTensor* size = + micro_context->AllocateTempInputTensor(node, kSizeTensor); + TFLITE_DCHECK(size != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TFLITE_DCHECK(output != nullptr); + + // Ensure validity of input tensor and its dimension. + TFLITE_DCHECK(input->type == output->type); + TFLITE_DCHECK(begin->type == size->type); + TFLITE_DCHECK(begin->type == kTfLiteInt32 || begin->type == kTfLiteInt64); + TFLITE_DCHECK(size->type == kTfLiteInt32 || size->type == kTfLiteInt64); + TFLITE_DCHECK(NumDimensions(begin) == 1); + TFLITE_DCHECK(NumDimensions(size) == 1); + TFLITE_DCHECK(NumElements(begin) == NumElements(size)); + TFLITE_DCHECK(NumDimensions(input) <= kMaxDim); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(begin); + micro_context->DeallocateTempTfLiteTensor(size); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus SliceEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* begin = + tflite::micro::GetEvalInput(context, node, kBeginTensor); + const TfLiteEvalTensor* size = + tflite::micro::GetEvalInput(context, node, kSizeTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + tflite::SliceParams op_params; + op_params.begin_count = kMaxDim; + op_params.size_count = kMaxDim; + for (int i = 0; i < kMaxDim; ++i) { + op_params.begin[i] = 0; + op_params.size[i] = 1; + } + + if (begin->type == kTfLiteInt32) { + GetBeginAndSizeVectors(input->dims->size, begin, size, + op_params.begin, op_params.size); + } else if (begin->type == kTfLiteInt64) { + GetBeginAndSizeVectors(input->dims->size, begin, size, + op_params.begin, op_params.size); + } else { + MicroPrintf("Begin tensor type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + + switch (input->type) { + case kTfLiteFloat32: + reference_ops::Slice(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt32: + reference_ops::Slice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::Slice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Slice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteBool: + reference_ops::Slice(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Input tensor type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_SLICE() { + return tflite::micro::RegisterOp(nullptr, SlicePrepare, SliceEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/softmax.h b/src/tensorflow/lite/micro/kernels/softmax.h new file mode 100644 index 0000000..02e032e --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/softmax.h @@ -0,0 +1,67 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length); + +// Common helper function to SoftmaxPrepare. +TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteTensor* output, + const TfLiteSoftmaxParams* params, + SoftmaxParams* op_data); + +TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node); + +// This is the most generic TFLMRegistration. The actual supported types +// may still be target dependent. The only requirement is that every +// implementation (reference or optimized) must define this function. +TFLMRegistration Register_SOFTMAX(); + +#if defined(XTENSA) || defined(ARDUINO) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 input and int16 output. +TFLMRegistration Register_SOFTMAX_INT8_INT16(); +#else +inline TFLMRegistration Register_SOFTMAX_INT8_INT16() { + return Register_SOFTMAX(); +} +#endif + +#if defined(ARDUINO) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 input/output and uses the latency optimized implementations. +TFLMRegistration Register_SOFTMAX_INT8(); + +// Returns a TFLMRegistration struct for kernel variant that only supports +// int16 input/output and uses the latency optimized implementations. +TFLMRegistration Register_SOFTMAX_INT16(); + +#else +inline TFLMRegistration Register_SOFTMAX_INT8() { return Register_SOFTMAX(); } + +inline TFLMRegistration Register_SOFTMAX_INT16() { return Register_SOFTMAX(); } +#endif + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ diff --git a/src/tensorflow/lite/micro/kernels/softmax_common.cpp b/src/tensorflow/lite/micro/kernels/softmax_common.cpp new file mode 100644 index 0000000..62b8b29 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/softmax_common.cpp @@ -0,0 +1,168 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/softmax.h" +#include "tensorflow/lite/micro/micro_context.h" + +namespace tflite { + +namespace { +// Softmax parameter data that persists in user_data +const int kInt16LUTArraySize = LUTSize(); + +TfLiteStatus InitializeLutForInt16(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteTensor* output, + SoftmaxParams* op_data) { + // Only allocate LUTs for KTfLiteInt16 data type + if (input->type == kTfLiteInt16) { + void* raw_exp_lut = context->AllocatePersistentBuffer( + context, sizeof(int16_t) * kInt16LUTArraySize); + TF_LITE_ENSURE(context, raw_exp_lut != nullptr); + op_data->exp_lut = reinterpret_cast(raw_exp_lut); + void* one_over_one_plus_x_lut = context->AllocatePersistentBuffer( + context, sizeof(int16_t) * kInt16LUTArraySize); + TF_LITE_ENSURE(context, one_over_one_plus_x_lut != nullptr); + op_data->one_over_one_plus_x_lut = + reinterpret_cast(one_over_one_plus_x_lut); + } + + if (output->type == kTfLiteInt16) { + TF_LITE_ENSURE(context, + input->type == kTfLiteInt8 || input->type == kTfLiteInt16); + } else { + TF_LITE_ENSURE_EQ(context, input->type, output->type); + } + + // Populate LUT if required + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + // exp LUT only used on negative values + // we consider exp(-10.0) is insignificant to accumulation + const int32_t range = std::numeric_limits::max() - + std::numeric_limits::min(); + LUTPopulate( + 10.0f / range, std::numeric_limits::max(), 2.0f / range, 0, + [](float value) { return std::exp(value); }, op_data->exp_lut); + + LUTPopulate( + 1.0f / range, std::numeric_limits::min(), 2.0f / range, 0, + [](float value) { return 1.0f / (1.0f + value); }, + op_data->one_over_one_plus_x_lut); + + op_data->zero_point = output->params.zero_point; + op_data->scale = output->params.scale; + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteTensor* output, + const TfLiteSoftmaxParams* params, + SoftmaxParams* op_data) { + if (InitializeLutForInt16(context, input, output, op_data) != kTfLiteOk) { + return kTfLiteError; + } + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 32768, + (0.001f * 1.f / 32768)); + } else { // input->type == kTfLiteInt8 + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); + if (output->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768); + TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 65536, + (0.001f * 1.f / 65536)); + } else { // output->type == kTfLiteint8 + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); + TF_LITE_ENSURE(context, output->params.scale == 1.f / 256); + } + } + + static const int kScaledDiffIntegerBits = 5; + + // Calculate input_multiplier and input_left_shift + if (input->type == kTfLiteInt16) { + int input_left_shift; + double input_scale_beta_rescale = + static_cast(input->params.scale) * + static_cast(params->beta) / + (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] + // correspond to [-10.0, 0.0] + QuantizeMultiplier(input_scale_beta_rescale, &op_data->input_multiplier, + &input_left_shift); + op_data->input_left_shift = input_left_shift; + } else { + int input_left_shift; + tflite::PreprocessSoftmaxScaling( + static_cast(params->beta), + static_cast(input->params.scale), kScaledDiffIntegerBits, + &op_data->input_multiplier, &input_left_shift); + op_data->input_left_shift = input_left_shift; + op_data->diff_min = + -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, + op_data->input_left_shift); + } + } else { + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); + op_data->beta = static_cast(params->beta); + } + return kTfLiteOk; +} + +void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(SoftmaxParams)); +} + +TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE(context, node->user_data != nullptr); + SoftmaxParams* op_data = static_cast(node->user_data); + + auto* params = static_cast(node->builtin_data); + auto ret_val = + CalculateSoftmaxParams(context, input, output, params, op_data); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return ret_val; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/space_to_batch_nd.cpp b/src/tensorflow/lite/micro/kernels/space_to_batch_nd.cpp new file mode 100644 index 0000000..f8df149 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/space_to_batch_nd.cpp @@ -0,0 +1,123 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kBlockShapeTensor = 1; +constexpr int kCropsTensor = 2; +constexpr int kOutputTensor = 0; + +// Currently, only 3D NHC and 4D NHWC input/output op_context are supported. +// In case of 3D input, it will be extended to 3D NHWC by adding W=1. +// The 4D array need to have exactly 2 spatial dimensions. +// TODO(b/149952582): Support arbitrary dimension in SpaceToBatchND. +const int kInputOutputMinDimensionNum = 3; +const int kInputOutputMaxDimensionNum = 4; + +void* SpaceToBatchNDInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(SpaceToBatchParams)); +} + +TfLiteStatus SpaceToBatchNDPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr && output != nullptr); + + TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(output) >= kInputOutputMinDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(input) <= kInputOutputMaxDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus SpaceToBatchNDEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const SpaceToBatchParams& params = + *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* block_shape = + tflite::micro::GetEvalInput(context, node, kBlockShapeTensor); + const TfLiteEvalTensor* crops = + tflite::micro::GetEvalInput(context, node, kCropsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + reference_ops::SpaceToBatchND( + params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(block_shape), + tflite::micro::GetTensorData(block_shape), + tflite::micro::GetTensorShape(crops), + tflite::micro::GetTensorData(crops), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::SpaceToBatchND( + params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(block_shape), + tflite::micro::GetTensorData(block_shape), + tflite::micro::GetTensorShape(crops), + tflite::micro::GetTensorData(crops), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace. + +TFLMRegistration Register_SPACE_TO_BATCH_ND() { + return tflite::micro::RegisterOp(SpaceToBatchNDInit, SpaceToBatchNDPrepare, + SpaceToBatchNDEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/space_to_depth.cpp b/src/tensorflow/lite/micro/kernels/space_to_depth.cpp new file mode 100644 index 0000000..eac6613 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/space_to_depth.cpp @@ -0,0 +1,127 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/space_to_depth.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; +constexpr int kBatchRank = 0; +constexpr int kHeightRank = 1; +constexpr int kWidthRank = 2; +constexpr int kDepthRank = 3; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); + + auto data_type = output->type; + TF_LITE_ENSURE(context, + data_type == kTfLiteFloat32 || data_type == kTfLiteInt8); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + const int block_size = params->block_size; + const int input_height = input->dims->data[kHeightRank]; + const int input_width = input->dims->data[kWidthRank]; + int output_height = input_height / block_size; + int output_width = input_width / block_size; + + TF_LITE_ENSURE_EQ(context, input_height, output_height * block_size); + TF_LITE_ENSURE_EQ(context, input_width, output_width * block_size); + + // Relocate dims to the persistent storage arena before changing them, + // otherwise we'd be modifying temporary copies made by the interpreters each + // time they process the layer. + TfLiteEvalTensor* output_eval = + micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + output->dims->data[kBatchRank] = input->dims->data[kBatchRank]; + output->dims->data[kHeightRank] = output_height; + output->dims->data[kWidthRank] = output_width; + output->dims->data[kDepthRank] = + input->dims->data[kDepthRank] * block_size * block_size; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + SpaceToDepthParams op_params; + op_params.block_size = params->block_size; + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + reference_ops::SpaceToDepth(op_params, micro::GetTensorShape(input), + micro::GetTensorData(input), + micro::GetTensorShape(output), + micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::SpaceToDepth(op_params, micro::GetTensorShape(input), + micro::GetTensorData(input), + micro::GetTensorShape(output), + micro::GetTensorData(output)); + break; + default: + MicroPrintf("SPACE_TO_DEPTH only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_SPACE_TO_DEPTH() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/split.cpp b/src/tensorflow/lite/micro/kernels/split.cpp index a1236d7..cae7074 100644 --- a/src/tensorflow/lite/micro/kernels/split.cpp +++ b/src/tensorflow/lite/micro/kernels/split.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,11 +18,11 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace split { + +namespace { template TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, @@ -67,8 +67,9 @@ TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* axis = GetInput(context, node, 0); +TfLiteStatus SplitPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 0); TF_LITE_ENSURE(context, axis != nullptr); // Dynamic output tensors are needed if axis tensor is not constant. @@ -76,10 +77,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // constant axis tensor for now. TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), "Non constant axis tensor not supported"); + + micro_context->DeallocateTempTfLiteTensor(axis); return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus SplitEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 0); const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 1); @@ -95,9 +98,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteFloat32: { return SplitImpl(context, node, input, axis_value); } - case kTfLiteUInt8: { - return SplitImpl(context, node, input, axis_value); - } case kTfLiteInt8: { return SplitImpl(context, node, input, axis_value); } @@ -108,28 +108,18 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return SplitImpl(context, node, input, axis_value); } default: - TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.", - TfLiteTypeGetName(input->type)); + MicroPrintf("Type %s currently not supported.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } -#undef TF_LITE_SPLIT return kTfLiteOk; } -} // namespace split - -TfLiteRegistration Register_SPLIT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/split::Prepare, - /*invoke=*/split::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +} // namespace + +TFLMRegistration Register_SPLIT() { + return tflite::micro::RegisterOp(nullptr, SplitPrepare, SplitEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/split_v.cpp b/src/tensorflow/lite/micro/kernels/split_v.cpp old mode 100755 new mode 100644 index c2a0114..c9cc3e7 --- a/src/tensorflow/lite/micro/kernels/split_v.cpp +++ b/src/tensorflow/lite/micro/kernels/split_v.cpp @@ -1,135 +1,127 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace split_v { - -template -TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input, int axis_value) { - const TfLiteIntArray* input_dims = input->dims; - const TfLiteEvalTensor* output0 = - tflite::micro::GetEvalOutput(context, node, 0); - - const int split_dimensions = input_dims->size; - - TFLITE_DCHECK_LT(axis_value, split_dimensions); - TFLITE_DCHECK_EQ(output0->dims->size, split_dimensions); - - int64_t split_size = 0; - const int output_count = NumOutputs(node); - for (int i = 0; i < output_count; i++) { - split_size += - tflite::micro::GetEvalOutput(context, node, i)->dims->data[axis_value]; - } - TFLITE_DCHECK_EQ(split_size, input_dims->data[axis_value]); - int64_t outer_size = 1; - for (int i = 0; i < axis_value; ++i) { - outer_size *= input_dims->data[i]; - } - - int64_t base_inner_size = 1; - for (int i = axis_value + 1; i < split_dimensions; ++i) { - base_inner_size *= input_dims->data[i]; - } - - const T* input_ptr = tflite::micro::GetTensorData(input); - for (int k = 0; k < outer_size; ++k) { - for (int i = 0; i < output_count; ++i) { - TfLiteEvalTensor* output_tensor = - tflite::micro::GetEvalOutput(context, node, i); - T* output_data = tflite::micro::GetTensorData(output_tensor); - const int copy_size = - output_tensor->dims->data[axis_value] * base_inner_size; - T* output_ptr = output_data + k * copy_size; - for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; - input_ptr += copy_size; - } - } - - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - - // Dynamic output tensors are needed if axis tensor is not constant. - // But Micro doesn't support dynamic memory allocation, so we only support - // constant axis tensor for now. - const TfLiteTensor* axis = GetInput(context, node, 2); - TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), - "Non constant axis tensor not supported"); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 2); - - int axis_value = tflite::micro::GetTensorData(axis)[0]; - if (axis_value < 0) { - axis_value += input->dims->size; - } - - TF_LITE_ENSURE(context, axis_value >= 0); - TF_LITE_ENSURE(context, axis_value < input->dims->size); - - switch (input->type) { - case kTfLiteFloat32: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt8: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt16: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt32: { - return SplitImpl(context, node, input, axis_value); - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace split_v - -TfLiteRegistration Register_SPLIT_V() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/split_v::Prepare, - /*invoke=*/split_v::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +template +TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input, int axis_value) { + const TfLiteIntArray* input_dims = input->dims; + const TfLiteEvalTensor* output0 = + tflite::micro::GetEvalOutput(context, node, 0); + + const int split_dimensions = input_dims->size; + + TFLITE_DCHECK_LT(axis_value, split_dimensions); + TFLITE_DCHECK_EQ(output0->dims->size, split_dimensions); + + int64_t split_size = 0; + const int output_count = NumOutputs(node); + for (int i = 0; i < output_count; i++) { + split_size += + tflite::micro::GetEvalOutput(context, node, i)->dims->data[axis_value]; + } + TFLITE_DCHECK_EQ(split_size, input_dims->data[axis_value]); + int64_t outer_size = 1; + for (int i = 0; i < axis_value; ++i) { + outer_size *= input_dims->data[i]; + } + + int64_t base_inner_size = 1; + for (int i = axis_value + 1; i < split_dimensions; ++i) { + base_inner_size *= input_dims->data[i]; + } + + const T* input_ptr = tflite::micro::GetTensorData(input); + for (int k = 0; k < outer_size; ++k) { + for (int i = 0; i < output_count; ++i) { + TfLiteEvalTensor* output_tensor = + tflite::micro::GetEvalOutput(context, node, i); + T* output_data = tflite::micro::GetTensorData(output_tensor); + const int copy_size = + output_tensor->dims->data[axis_value] * base_inner_size; + T* output_ptr = output_data + k * copy_size; + for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; + input_ptr += copy_size; + } + } + + return kTfLiteOk; +} + +TfLiteStatus SplitVPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + + MicroContext* micro_context = GetMicroContext(context); + // Dynamic output tensors are needed if axis tensor is not constant. + // But Micro doesn't support dynamic memory allocation, so we only support + // constant axis tensor for now. + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 2); + TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), + "Non constant axis tensor not supported"); + micro_context->DeallocateTempTfLiteTensor(axis); + return kTfLiteOk; +} + +TfLiteStatus SplitVEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 2); + + int axis_value = tflite::micro::GetTensorData(axis)[0]; + if (axis_value < 0) { + axis_value += input->dims->size; + } + + TF_LITE_ENSURE(context, axis_value >= 0); + TF_LITE_ENSURE(context, axis_value < input->dims->size); + + switch (input->type) { + case kTfLiteFloat32: { + return SplitImpl(context, node, input, axis_value); + } + case kTfLiteInt8: { + return SplitImpl(context, node, input, axis_value); + } + case kTfLiteInt16: { + return SplitImpl(context, node, input, axis_value); + } + case kTfLiteInt32: { + return SplitImpl(context, node, input, axis_value); + } + default: + MicroPrintf("Type %s currently not supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_SPLIT_V() { + return tflite::micro::RegisterOp(nullptr, SplitVPrepare, SplitVEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/squared_difference.cpp b/src/tensorflow/lite/micro/kernels/squared_difference.cpp new file mode 100644 index 0000000..0194d0c --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/squared_difference.cpp @@ -0,0 +1,270 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/binary_function.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + bool requires_broadcast; + ArithmeticParams arithmetic_params; +}; + +template +T SquaredDifference(T input1, T input2) { + const T difference = input1 - input2; + return difference * difference; +} + +void* SquaredDifferenceInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +void PrepareQuantized( + const TfLiteQuantizationParams& input1_quantization_params, + const TfLiteQuantizationParams& input2_quantization_params, + const TfLiteQuantizationParams& output_quantization_params, + const int left_shift, const int32_t quantized_activation_min, + const int32_t quantized_activation_max, OpData* data) { + data->arithmetic_params.input1_offset = + -input1_quantization_params.zero_point; + data->arithmetic_params.input2_offset = + -input2_quantization_params.zero_point; + data->arithmetic_params.output_offset = output_quantization_params.zero_point; + data->arithmetic_params.left_shift = left_shift; + const double twice_max_input_scale = + 2.0 * static_cast(std::max(input1_quantization_params.scale, + input2_quantization_params.scale)); + const double real_input1_multiplier = + static_cast(input1_quantization_params.scale) / + twice_max_input_scale; + double real_input2_multiplier = + static_cast(input2_quantization_params.scale) / + twice_max_input_scale; + const double real_output_multiplier = + (twice_max_input_scale * twice_max_input_scale) / + static_cast((1 << data->arithmetic_params.left_shift * 2) * + output_quantization_params.scale); + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->arithmetic_params.input1_multiplier, + &data->arithmetic_params.input1_shift); + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->arithmetic_params.input2_multiplier, + &data->arithmetic_params.input2_shift); + QuantizeMultiplier(real_output_multiplier, + &data->arithmetic_params.output_multiplier, + &data->arithmetic_params.output_shift); + data->arithmetic_params.quantized_activation_min = quantized_activation_min; + data->arithmetic_params.quantized_activation_max = quantized_activation_max; +} + +TfLiteStatus SquaredDifferencePrepare(TfLiteContext* context, + TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + OpData* data = reinterpret_cast(node->user_data); + data->requires_broadcast = false; + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + output->type = input2->type; + + const TfLiteQuantizationParams& input1_quantization_params = input1->params; + const TfLiteQuantizationParams& input2_quantization_params = input2->params; + const TfLiteQuantizationParams& output_quantization_params = output->params; + if (input1->type == kTfLiteInt8) { + const int32_t integer_type_min = std::numeric_limits::min(); + const int32_t integer_type_max = std::numeric_limits::max(); + TF_LITE_ENSURE(context, + input1_quantization_params.zero_point >= integer_type_min); + TF_LITE_ENSURE(context, + input1_quantization_params.zero_point <= integer_type_max); + TF_LITE_ENSURE(context, + input2_quantization_params.zero_point >= integer_type_min); + TF_LITE_ENSURE(context, + input2_quantization_params.zero_point <= integer_type_max); + TF_LITE_ENSURE(context, + output_quantization_params.zero_point >= integer_type_min); + TF_LITE_ENSURE(context, + output_quantization_params.zero_point <= integer_type_max); + // leftshift = 7 is selected so that maximum shifted result 255^2 * (1 << (7 + // * 2 )) does not overflow signed 32-bit integer + PrepareQuantized(input1_quantization_params, input2_quantization_params, + output_quantization_params, /*left_shift=*/7, + /*quantized_activation_min*/ integer_type_min, + /*quantized_activation_max*/ integer_type_max, data); + } else if (input1->type == kTfLiteInt16) { + const int32_t integer_type_min = std::numeric_limits::min(); + const int32_t integer_type_max = std::numeric_limits::max(); + TF_LITE_ENSURE(context, input1_quantization_params.zero_point == 0); + TF_LITE_ENSURE(context, input2_quantization_params.zero_point == 0); + TF_LITE_ENSURE(context, output_quantization_params.zero_point == 0); + + // leftshift = 0 as number is already 16-bit. so that maximum shifted result + // 32767^2 * (1 << (0 * 2 )) + PrepareQuantized(input1_quantization_params, input2_quantization_params, + output_quantization_params, /*left_shift=*/0, + /*quantized_activation_min*/ integer_type_min, + /*quantized_activation_max*/ integer_type_max, data); + } + + data->requires_broadcast = !HaveSameShapes(input1, input2); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +T SquaredDifference(T x, T y, const ArithmeticParams& params) { + const int32_t input1_val = params.input1_offset + x; + const int32_t input2_val = params.input2_offset + y; + const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); + const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); + const int32_t scaled_input1_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input1_val, params.input1_multiplier, params.input1_shift); + const int32_t scaled_input2_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input2_val, params.input2_multiplier, params.input2_shift); + const int32_t raw_diff = scaled_input1_val - scaled_input2_val; + + // Max of this is 32767^2 * (1 << 0), so won't overflow 32 bits. + const int32_t squared_raw_diff = raw_diff * raw_diff; + const int32_t raw_output = + MultiplyByQuantizedMultiplier(squared_raw_diff, params.output_multiplier, + params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + return static_cast(clamped_output); +} + +template +void EvalQuantizedSquaredDifference(TfLiteContext* context, TfLiteNode* node, + const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + const auto* op_data = static_cast(node->user_data); + if (data->requires_broadcast) { + reference_integer_ops::BroadcastBinaryFunction4DSlow( + op_data->arithmetic_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + reference_integer_ops::CheckArithmeticParams, SquaredDifference); + } else { + const int flat_size = tflite::micro::GetTensorShape(input1).FlatSize(); + reference_integer_ops::ElementWise( + flat_size, op_data->arithmetic_params, + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorData(output), + reference_integer_ops::CheckArithmeticParams, SquaredDifference); + } +} + +template +void EvalSquaredDifference(TfLiteContext* context, TfLiteNode* node, + const OpData* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + if (data->requires_broadcast) { + reference_ops::BroadcastBinaryFunction4DSlow( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), SquaredDifference); + } else { + reference_ops::BinaryFunction( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), SquaredDifference); + } +} + +TfLiteStatus SquaredDifferenceEval(TfLiteContext* context, TfLiteNode* node) { + OpData* data = reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteFloat32) { + EvalSquaredDifference(context, node, data, input1, input2, output); + } else if (output->type == kTfLiteInt32) { + EvalSquaredDifference(context, node, data, input1, input2, output); + } else if (output->type == kTfLiteInt8) { + EvalQuantizedSquaredDifference(context, node, data, input1, input2, + output); + } else if (output->type == kTfLiteInt16) { + EvalQuantizedSquaredDifference(context, node, data, input1, input2, + output); + } else { + MicroPrintf( + "SquaredDifference only supports FLOAT32, INT32 , INT16 and INT8 now, " + "got %d.", + output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} +} // namespace + +TFLMRegistration Register_SQUARED_DIFFERENCE() { + return tflite::micro::RegisterOp( + SquaredDifferenceInit, SquaredDifferencePrepare, SquaredDifferenceEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/squeeze.cpp b/src/tensorflow/lite/micro/kernels/squeeze.cpp new file mode 100644 index 0000000..3df1363 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/squeeze.cpp @@ -0,0 +1,118 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct SqueezeContext { + SqueezeContext(TfLiteContext* context, TfLiteNode* node) { + params = reinterpret_cast(node->builtin_data); + micro_context = GetMicroContext(context); + input = micro_context->AllocateTempInputTensor(node, 0); + output = micro_context->AllocateTempOutputTensor(node, 0); + } + ~SqueezeContext() { + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + } + MicroContext* micro_context; + TfLiteSqueezeParams* params; + TfLiteTensor* input; + TfLiteTensor* output; +}; + +TfLiteStatus SqueezePrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + SqueezeContext op_context(context, node); + const int input_num_dims = NumDimensions(op_context.input); + const int num_squeeze_dims = op_context.params->num_squeeze_dims; + + // Determines number of dimensions of output tensor after squeeze. + const TfLiteIntArray* input_dims = op_context.input->dims; + const TfLiteIntArray* output_dims = op_context.output->dims; + const int32_t* squeeze_dims = op_context.params->squeeze_dims; + + constexpr int max_squeeze_dims = 8; + TF_LITE_ENSURE(context, input_num_dims <= max_squeeze_dims); + bool should_squeeze[max_squeeze_dims] = {}; + + if (num_squeeze_dims == 0) { + for (int idx = 0; idx < input_num_dims; ++idx) { + if (input_dims->data[idx] == 1) { + should_squeeze[idx] = true; + } + } + } else { + for (int idx = 0; idx < num_squeeze_dims; ++idx) { + int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + input_num_dims + : squeeze_dims[idx]; + TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims && + input_dims->data[current] == 1); + should_squeeze[current] = true; + } + } + + // Ensure output dimensions are big enough. + for (int in_idx = 0, out_idx = 0; in_idx < input_num_dims; ++in_idx) { + if (!should_squeeze[in_idx]) { + TFLITE_CHECK_GE(output_dims->data[out_idx++], input_dims->data[in_idx]); + } + } + + return kTfLiteOk; +} + +TfLiteStatus SqueezeEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + + if (input->type == kTfLiteString) { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + size_t input_byte_size; + size_t output_byte_size; + TF_LITE_ENSURE_OK(context, + TfLiteEvalTensorByteLength(input, &input_byte_size)); + TF_LITE_ENSURE_OK(context, + TfLiteEvalTensorByteLength(output, &output_byte_size)); + + TF_LITE_ENSURE_EQ(context, input_byte_size, output_byte_size); + memcpy(output->data.raw, input->data.raw, input_byte_size); + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_SQUEEZE() { + return tflite::micro::RegisterOp(nullptr, SqueezePrepare, SqueezeEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/strided_slice.cpp b/src/tensorflow/lite/micro/kernels/strided_slice.cpp index 2dbe6e1..78507a7 100644 --- a/src/tensorflow/lite/micro/kernels/strided_slice.cpp +++ b/src/tensorflow/lite/micro/kernels/strided_slice.cpp @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,137 +14,30 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/strided_slice.h" -#include +#include #include -#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/strided_slice.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace strided_slice { -constexpr int kInputTensor = 0; -constexpr int kBeginTensor = 1; -constexpr int kEndTensor = 2; -constexpr int kStridesTensor = 3; -constexpr int kOutputTensor = 0; +namespace { -struct StridedSliceContext { - StridedSliceContext(TfLiteContext* context, TfLiteNode* node) { - params = reinterpret_cast(node->builtin_data); - input = GetInput(context, node, kInputTensor); - begin = GetInput(context, node, kBeginTensor); - end = GetInput(context, node, kEndTensor); - strides = GetInput(context, node, kStridesTensor); - output = GetOutput(context, node, kOutputTensor); - dims = NumDimensions(input); - } - const TfLiteStridedSliceParams* params; - const TfLiteTensor* input; - const TfLiteTensor* begin; - const TfLiteTensor* end; - const TfLiteTensor* strides; - TfLiteTensor* output; - int dims; -}; - -// This Op only supports 1-4D cases and since we use the reference 4D -// implementation, the 1-3D tensors are mapped to 4D. -const int kMaxDim = 4; - -tflite::StridedSliceParams BuildStridedSliceParams( - StridedSliceContext* op_context) { - tflite::StridedSliceParams op_params; - op_params.start_indices_count = op_context->dims; - op_params.stop_indices_count = op_context->dims; - op_params.strides_count = op_context->dims; - - for (int i = 0; i < op_context->dims; ++i) { - op_params.start_indices[i] = GetTensorData(op_context->begin)[i]; - op_params.stop_indices[i] = GetTensorData(op_context->end)[i]; - op_params.strides[i] = GetTensorData(op_context->strides)[i]; - } - - op_params.begin_mask = op_context->params->begin_mask; - op_params.ellipsis_mask = 0; - op_params.end_mask = op_context->params->end_mask; - op_params.new_axis_mask = 0; - op_params.shrink_axis_mask = op_context->params->shrink_axis_mask; - return op_params; -} - -// Processes the indexing tensors (begin, end and strides) to resize the -// output tensor. This function is callable from both Prepare() and Eval() as -// long as the caller ensures the indexing tensors are present. -TfLiteStatus CheckOutputSize(TfLiteContext* context, - StridedSliceContext* op_context) { - using ::tflite::strided_slice::StartForAxis; - using ::tflite::strided_slice::StopForAxis; - TfLiteIntArray* output_shape = op_context->output->dims; - int shape_size = 0; - auto op_params = BuildStridedSliceParams(op_context); - auto input_shape = GetTensorShape(op_context->input); - for (int idx = 0; idx < op_context->dims; ++idx) { - int32_t stride = GetTensorData(op_context->strides)[idx]; - TF_LITE_ENSURE_MSG(context, stride != 0, "stride value has to be non-zero"); - int32_t begin = StartForAxis(op_params, input_shape, idx); - int32_t end = StopForAxis(op_params, input_shape, idx, begin); - - // When shrinking an axis, the end position does not matter (and can be - // incorrect when negative indexing is used, see Issue #19260). Always use - // begin + 1 to generate a length 1 slice, since begin has - // already been adjusted for negative indices by StartForAxis. - const bool shrink_axis = op_context->params->shrink_axis_mask & (1 << idx); - if (shrink_axis) { - end = begin + 1; - } - - // This is valid for both positive and negative strides - int32_t dim_shape = std::ceil((end - begin) / static_cast(stride)); - dim_shape = dim_shape < 0 ? 0 : dim_shape; - if (!shrink_axis) { - TF_LITE_ENSURE_EQ(context, output_shape->data[shape_size], dim_shape); - shape_size++; - } - } - TF_LITE_ENSURE_EQ(context, output_shape->size, shape_size); - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(StridedSliceParams)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - StridedSliceParams* op_params = - static_cast(node->user_data); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - StridedSliceContext op_context(context, node); - TF_LITE_ENSURE_MSG(context, op_context.dims <= kMaxDim, - "input dim should not exceed 4"); - auto params = BuildStridedSliceParams(&op_context); - memcpy(op_params, ¶ms, sizeof(StridedSliceParams)); - return CheckOutputSize(context, &op_context); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus StridedSliceEval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); const StridedSliceParams& op_params = *(static_cast(node->user_data)); const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kStridedSliceInputTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kStridedSliceOutputTensor); switch (output->type) { case kTfLiteFloat32: reference_ops::StridedSlice(op_params, @@ -153,40 +46,47 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); break; - case kTfLiteUInt8: + case kTfLiteInt8: + reference_ops::StridedSlice(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: reference_ops::StridedSlice( op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); + tflite::micro::GetTensorData(output)); break; - case kTfLiteInt8: + case kTfLiteInt32: + reference_ops::StridedSlice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteBool: reference_ops::StridedSlice(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); + tflite::micro::GetTensorData(output)); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); return kTfLiteError; } return kTfLiteOk; } -} // namespace strided_slice -TfLiteRegistration Register_STRIDED_SLICE() { - return {/*init=*/strided_slice::Init, - /*free=*/nullptr, - /*prepare=*/strided_slice::Prepare, - /*invoke=*/strided_slice::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +} // namespace + +TFLMRegistration Register_STRIDED_SLICE() { + return tflite::micro::RegisterOp(StridedSliceInit, StridedSlicePrepare, + StridedSliceEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/strided_slice.h b/src/tensorflow/lite/micro/kernels/strided_slice.h new file mode 100644 index 0000000..ea9413f --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/strided_slice.h @@ -0,0 +1,40 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_STRIDED_SLICE_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_STRIDED_SLICE_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +constexpr int kStridedSliceInputTensor = 0; +constexpr int kStridedSliceBeginTensor = 1; +constexpr int kStridedSliceEndTensor = 2; +constexpr int kStridedSliceStridesTensor = 3; +constexpr int kStridedSliceOutputTensor = 0; + +void* StridedSliceInit(TfLiteContext* context, const char* buffer, + size_t length); + +TfLiteStatus StridedSlicePrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_STRIDED_SLICE_H_ diff --git a/src/tensorflow/lite/micro/kernels/strided_slice_common.cpp b/src/tensorflow/lite/micro/kernels/strided_slice_common.cpp new file mode 100644 index 0000000..165e1f3 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/strided_slice_common.cpp @@ -0,0 +1,149 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/strided_slice.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/strided_slice.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +struct StridedSliceContext { + StridedSliceContext(TfLiteContext* context, TfLiteNode* node) { + params = reinterpret_cast(node->builtin_data); + micro_context = GetMicroContext(context); + input = + micro_context->AllocateTempInputTensor(node, kStridedSliceInputTensor); + begin = + micro_context->AllocateTempInputTensor(node, kStridedSliceBeginTensor); + end = micro_context->AllocateTempInputTensor(node, kStridedSliceEndTensor); + strides = micro_context->AllocateTempInputTensor( + node, kStridedSliceStridesTensor); + output = micro_context->AllocateTempOutputTensor(node, + kStridedSliceOutputTensor); + dims = NumDimensions(input); + } + ~StridedSliceContext() { + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(begin); + micro_context->DeallocateTempTfLiteTensor(end); + micro_context->DeallocateTempTfLiteTensor(strides); + micro_context->DeallocateTempTfLiteTensor(output); + } + const TfLiteStridedSliceParams* params; + MicroContext* micro_context; + TfLiteTensor* input; + TfLiteTensor* begin; + TfLiteTensor* end; + TfLiteTensor* strides; + TfLiteTensor* output; + int dims; +}; + +// This Op only supports 1-4D cases and since we use the reference 4D +// implementation, the 1-3D tensors are mapped to 4D. +const int kMaxDim = 4; + +tflite::StridedSliceParams BuildStridedSliceParams( + StridedSliceContext* op_context) { + tflite::StridedSliceParams op_params{}; + op_params.start_indices_count = op_context->dims; + op_params.stop_indices_count = op_context->dims; + op_params.strides_count = op_context->dims; + + for (int i = 0; i < op_context->dims; ++i) { + op_params.start_indices[i] = GetTensorData(op_context->begin)[i]; + op_params.stop_indices[i] = GetTensorData(op_context->end)[i]; + op_params.strides[i] = GetTensorData(op_context->strides)[i]; + } + + op_params.begin_mask = op_context->params->begin_mask; + op_params.ellipsis_mask = 0; + op_params.end_mask = op_context->params->end_mask; + op_params.new_axis_mask = 0; + op_params.shrink_axis_mask = op_context->params->shrink_axis_mask; + return op_params; +} + +// Processes the indexing tensors (begin, end and strides) to resize the +// output tensor. This function is callable from both Prepare() and Eval() as +// long as the caller ensures the indexing tensors are present. +TfLiteStatus CheckOutputSize(TfLiteContext* context, + StridedSliceContext* op_context) { + using ::tflite::strided_slice::StartForAxis; + using ::tflite::strided_slice::StopForAxis; + TfLiteIntArray* output_shape = op_context->output->dims; + int shape_size = 0; + auto op_params = BuildStridedSliceParams(op_context); + auto input_shape = GetTensorShape(op_context->input); + for (int idx = 0; idx < op_context->dims; ++idx) { + int32_t stride = GetTensorData(op_context->strides)[idx]; + TF_LITE_ENSURE_MSG(context, stride != 0, "stride value has to be non-zero"); + int32_t begin = StartForAxis(op_params, input_shape, idx); + int32_t end = StopForAxis(op_params, input_shape, idx, begin); + + // When shrinking an axis, the end position does not matter (and can be + // incorrect when negative indexing is used, see Issue #19260). Always use + // begin + 1 to generate a length 1 slice, since begin has + // already been adjusted for negative indices by StartForAxis. + const bool shrink_axis = op_context->params->shrink_axis_mask & (1 << idx); + if (shrink_axis) { + end = begin + 1; + } + + // This is valid for both positive and negative strides + int32_t dim_shape = std::ceil((end - begin) / static_cast(stride)); + dim_shape = dim_shape < 0 ? 0 : dim_shape; + if (!shrink_axis) { + TF_LITE_ENSURE_EQ(context, output_shape->data[shape_size], dim_shape); + shape_size++; + } + } + TF_LITE_ENSURE_EQ(context, output_shape->size, shape_size); + return kTfLiteOk; +} + +} // namespace + +void* StridedSliceInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(StridedSliceParams)); +} + +TfLiteStatus StridedSlicePrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + StridedSliceParams* op_params = + static_cast(node->user_data); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + StridedSliceContext op_context(context, node); + TF_LITE_ENSURE_MSG(context, op_context.dims <= kMaxDim, + "input dim should not exceed 4"); + auto params = BuildStridedSliceParams(&op_context); + memcpy(op_params, ¶ms, sizeof(StridedSliceParams)); + return CheckOutputSize(context, &op_context); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/sub.cpp b/src/tensorflow/lite/micro/kernels/sub.cpp index 2cc61a9..930bc0b 100644 --- a/src/tensorflow/lite/micro/kernels/sub.cpp +++ b/src/tensorflow/lite/micro/kernels/sub.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,114 +13,31 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/sub.h" +#include "tensorflow/lite/micro/kernels/sub.h" #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/add.h" #include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/reference/sub.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace sub { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - bool requires_broadcast; - - // These fields are used in both the general 8-bit -> 8bit quantized path, - // and the special 16-bit -> 16bit quantized path - int input1_shift; - int input2_shift; - int32_t output_activation_min; - int32_t output_activation_max; - - // These fields are used only in the general 8-bit -> 8bit quantized path - int32_t input1_multiplier; - int32_t input2_multiplier; - int32_t output_multiplier; - int output_shift; - int left_shift; - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteSubParams* params, - const TfLiteTensor* input1, - const TfLiteTensor* input2, TfLiteTensor* output, - OpData* data) { - data->requires_broadcast = !HaveSameShapes(input1, input2); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - // 8bit -> 8bit general quantized path, with general rescalings - data->input1_offset = -input1->params.zero_point; - data->input2_offset = -input2->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = 20; - const float twice_max_input_scale = - 2 * std::max(input1->params.scale, input2->params.scale); - const double real_input1_multiplier = - static_cast(input1->params.scale / twice_max_input_scale); - const double real_input2_multiplier = - static_cast(input2->params.scale / twice_max_input_scale); - const double real_output_multiplier = - static_cast(twice_max_input_scale / - ((1 << data->left_shift) * output->params.scale)); - QuantizeMultiplierSmallerThanOneExp( - real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* SubInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_STATUS( - CalculateOpData(context, params, input1, input2, output, data)); - return kTfLiteOk; + return context->AllocatePersistentBuffer(context, sizeof(OpDataSub)); } void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, - const OpData* data, const TfLiteEvalTensor* input1, + const OpDataSub* data, const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, @@ -147,31 +64,31 @@ void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, } TfLiteStatus EvalSubQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteSubParams* params, const OpData* data, + TfLiteSubParams* params, const OpDataSub* data, const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - tflite::ArithmeticParams op_params; - op_params.left_shift = data->left_shift; - op_params.input1_offset = data->input1_offset; - op_params.input1_multiplier = data->input1_multiplier; - op_params.input1_shift = data->input1_shift; - op_params.input2_offset = data->input2_offset; - op_params.input2_multiplier = data->input2_multiplier; - op_params.input2_shift = data->input2_shift; - op_params.output_offset = data->output_offset; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (output->type == kTfLiteInt8) { + tflite::ArithmeticParams op_params; + op_params.left_shift = data->left_shift; + op_params.input1_offset = data->input1_offset; + op_params.input1_multiplier = data->input1_multiplier; + op_params.input1_shift = data->input1_shift; + op_params.input2_offset = data->input2_offset; + op_params.input2_multiplier = data->input2_multiplier; + op_params.input2_shift = data->input2_shift; + op_params.output_offset = data->output_offset; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + &op_params); + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + switch (output->type) { + case kTfLiteInt8: { if (need_broadcast) { - tflite::reference_ops::BroadcastSubSlow( + tflite::reference_ops::BroadcastQuantSubSlow( op_params, tflite::micro::GetTensorShape(input1), tflite::micro::GetTensorData(input1), tflite::micro::GetTensorShape(input2), @@ -187,70 +104,65 @@ TfLiteStatus EvalSubQuantized(TfLiteContext* context, TfLiteNode* node, tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); } - } else { + break; + } + case kTfLiteInt16: { if (need_broadcast) { - tflite::reference_ops::BroadcastSubSlow( + tflite::reference_ops::BroadcastQuantSubSlow( op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input1), tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorData(input2), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); + tflite::micro::GetTensorData(output)); } else { tflite::reference_ops::Sub( op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input1), tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorData(input2), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); + tflite::micro::GetTensorData(output)); } + break; } + default: + MicroPrintf("Quantized type %s not currently supported.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; } - return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus SubEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); + tflite::micro::GetEvalInput(context, node, kSubInputTensor1); const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); + tflite::micro::GetEvalInput(context, node, kSubInputTensor2); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kSubOutputTensor); TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); + const OpDataSub& data = *(static_cast(node->user_data)); if (output->type == kTfLiteFloat32) { EvalSub(context, node, params, &data, input1, input2, output); - } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { + } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_OK(context, EvalSubQuantized(context, node, params, &data, input1, input2, output)); } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(output->type), + output->type); return kTfLiteError; } return kTfLiteOk; } -} // namespace sub - -TfLiteRegistration Register_SUB() { - return {/*init=*/sub::Init, - /*free=*/nullptr, - /*prepare=*/sub::Prepare, - /*invoke=*/sub::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +TFLMRegistration Register_SUB() { + return tflite::micro::RegisterOp(SubInit, SubPrepare, SubEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/sub.h b/src/tensorflow/lite/micro/kernels/sub.h new file mode 100644 index 0000000..2990022 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/sub.h @@ -0,0 +1,60 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_SUB_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_SUB_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +extern const int kSubInputTensor1; +extern const int kSubInputTensor2; +extern const int kSubOutputTensor; + +struct OpDataSub { + bool requires_broadcast; + + // These fields are used in both the general 8-bit -> 8bit quantized path, + // and the special 16-bit -> 16bit quantized path + int input1_shift; + int input2_shift; + int32_t output_activation_min; + int32_t output_activation_max; + + // These fields are used only in the general 8-bit -> 8bit quantized path + int32_t input1_multiplier; + int32_t input2_multiplier; + int32_t output_multiplier; + int output_shift; + int left_shift; + int32_t input1_offset; + int32_t input2_offset; + int32_t output_offset; +}; + +TfLiteStatus CalculateOpDataSub(TfLiteContext* context, TfLiteSubParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + TfLiteTensor* output, OpDataSub* data); + +TfLiteStatus SubPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_SUB_H_ diff --git a/src/tensorflow/lite/micro/kernels/sub_common.cpp b/src/tensorflow/lite/micro/kernels/sub_common.cpp new file mode 100644 index 0000000..d664746 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/sub_common.cpp @@ -0,0 +1,107 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/add.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/reference/sub.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/sub.h" + +namespace tflite { + +const int kSubInputTensor1 = 0; +const int kSubInputTensor2 = 1; +const int kSubOutputTensor = 0; + +TfLiteStatus CalculateOpDataSub(TfLiteContext* context, TfLiteSubParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + TfLiteTensor* output, OpDataSub* data) { + data->requires_broadcast = !HaveSameShapes(input1, input2); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + // 8bit -> 8bit general quantized path, with general rescalings + data->input1_offset = -input1->params.zero_point; + data->input2_offset = -input2->params.zero_point; + data->output_offset = output->params.zero_point; + + // The shift is set to 15 in case of 16-bit and 20 in case of 8-bit, + // accordingly. In case of 16-bit we have 65535 << 15 which is less than 1 + // << 31, therefore the addition will still fit in a 32 bit accumulator. + data->left_shift = output->type == kTfLiteInt16 ? 15 : 20; + const float twice_max_input_scale = + 2 * std::max(input1->params.scale, input2->params.scale); + const double real_input1_multiplier = + static_cast(input1->params.scale / twice_max_input_scale); + const double real_input2_multiplier = + static_cast(input2->params.scale / twice_max_input_scale); + const double real_output_multiplier = + static_cast(twice_max_input_scale / + ((1 << data->left_shift) * output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + } + + return kTfLiteOk; +} + +TfLiteStatus SubPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpDataSub* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kSubInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kSubInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kSubOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_STATUS( + CalculateOpDataSub(context, params, input1, input2, output, data)); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/svdf.h b/src/tensorflow/lite/micro/kernels/svdf.h new file mode 100644 index 0000000..46ece91 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/svdf.h @@ -0,0 +1,100 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +struct OpDataSvdf { + int32_t effective_scale_1_a; + int32_t effective_scale_2_a; + // b versions of each scale are kept at int since the numbers are just the + // shift value - typically between [-32, 32]. + int effective_scale_1_b; + int effective_scale_2_b; + int scratch_tensor_index; + int scratch_output_tensor_index; + + // Cached tensor zero point values for quantized operations. + int input_zero_point; + int output_zero_point; + int activation_state_zero_point; +}; + +// Input tensors. +extern const int kSvdfInputTensor; +extern const int kSvdfWeightsFeatureTensor; +extern const int kSvdfWeightsTimeTensor; +extern const int kSvdfBiasTensor; +// This is a variable tensor, and will be modified by this op. +extern const int kSvdfInputActivationStateTensor; + +// Output tensor. +extern const int kSvdfOutputTensor; + +void EvalInt8SvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data); + +// TODO(#523): remove 16-bit code when no longer needed. +void EvalInt16SvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data); + +void EvalFloatSvdfReference( + TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* weights_feature, + const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, + const TfLiteSVDFParams* params, int scratch_tensor_index, + TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output); + +TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node); + +// This is the most generic TFLMRegistration. The actual supported types +// may still be target dependent. The only requirement is that every +// implementation (reference or optimized) must define this function. +TFLMRegistration Register_SVDF(); + +#if defined(HEXAGON) || defined(ARDUINO) || defined(XTENSA) + +TFLMRegistration Register_SVDF_INT8(); + +#else +// Note that while this block gets used for both reference and optimized kernels +// that do not have any specialized implementations, the only goal here is to +// define fallback implementation that allow reference kernels to still be used +// from applications that call a more specific kernel variant. + +inline TFLMRegistration Register_SVDF_INT8() { return Register_SVDF(); } + +#endif +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ diff --git a/src/tensorflow/lite/micro/kernels/svdf_common.cpp b/src/tensorflow/lite/micro/kernels/svdf_common.cpp new file mode 100644 index 0000000..d7dd963 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/svdf_common.cpp @@ -0,0 +1,517 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/activation_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/svdf.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +/** + * This version of SVDF is specific to TFLite Micro. It contains the following + * differences between the TFLite version: + * + * 1.) Scratch tensor allocation - scratch tensors must be known ahead of time + * for the Micro interpreter. + * 2.) Output dimensions - the TFLite version determines output size and runtime + * and resizes the output tensor. Micro runtime does not support tensor + * resizing. + */ + +const int kSvdfInputTensor = 0; +const int kSvdfWeightsFeatureTensor = 1; +const int kSvdfWeightsTimeTensor = 2; +const int kSvdfBiasTensor = 3; +const int kSvdfInputActivationStateTensor = + 4; // This is a variable tensor, and will be modified by this op. +const int kSvdfOutputTensor = 0; + +template +void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data) { + const int n_rank = params->rank; + const int n_batch = input_tensor->dims->data[0]; + const int n_input = input_tensor->dims->data[1]; + const int n_filter = weights_feature_tensor->dims->data[0]; + const int n_unit = n_filter / n_rank; + const int n_memory = weights_time_tensor->dims->data[1]; + + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(context->GetScratchBuffer != nullptr); + + int32_t* scratch_tensor = static_cast( + context->GetScratchBuffer(context, data.scratch_tensor_index)); + int32_t* scratch_output_tensor = static_cast( + context->GetScratchBuffer(context, data.scratch_output_tensor_index)); + + // Shift states. + T* const state_ptr = tflite::micro::GetTensorData(activation_state_tensor); + + // Left shift the activation_state. + { + T* new_state_start = state_ptr; + const T* old_state_start = state_ptr + 1; + const T* old_state_end = state_ptr + n_batch * n_filter * n_memory; + while (old_state_start != old_state_end) { + *new_state_start++ = *old_state_start++; + } + } + + // Note: no need to clear the latest activation, matmul is not accumulative. + + // Feature matmul. + { + T* state = tflite::micro::GetTensorData(activation_state_tensor); + const int8_t* input = tflite::micro::GetTensorData(input_tensor); + const int8_t* weight_feature = + tflite::micro::GetTensorData(weights_feature_tensor); + const int32_t output_max = std::numeric_limits::max(); + const int32_t output_min = std::numeric_limits::min(); + T* result_in_batch = state + (n_memory - 1); + for (int b = 0; b < n_batch; b++) { + const int8_t* matrix_ptr = weight_feature; + for (int r = 0; r < n_filter; r++) { + int32_t dot_prod = 0; + const int8_t* vector_in_batch = input + b * n_input; + for (int c = 0; c < n_input; c++) { + dot_prod += + *matrix_ptr++ * (*vector_in_batch++ - data.input_zero_point); + } + dot_prod = MultiplyByQuantizedMultiplier( + dot_prod, data.effective_scale_1_a, data.effective_scale_1_b); + dot_prod = std::min(std::max(output_min, dot_prod), output_max); + // The int16 version of the op assumes a zero_point of 0. This + // code accounts for the potentially non-zero zero_point for the int8 + // version of the op. + *result_in_batch = data.activation_state_zero_point + dot_prod; + result_in_batch += n_memory; + } + } + } + + // Time. + { + for (int b = 0; b < n_batch; ++b) { + int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; + + // Perform batched vector dot product: + const T* vector1_ptr = + tflite::micro::GetTensorData(weights_time_tensor); + const T* vector2_ptr = + tflite::micro::GetTensorData(activation_state_tensor) + + b * n_memory * n_filter; + + for (int i = 0; i < n_filter; i++) { + *scratch_ptr_batch = 0; + for (int j = 0; j < n_memory; j++) { + *scratch_ptr_batch += + *vector1_ptr++ * + (*vector2_ptr++ - data.activation_state_zero_point); + } + scratch_ptr_batch++; + } + } + } + + // Reduce, add bias, rescale, activation. + { + // Add bias. + if (bias_tensor) { + // Vector batch assign: + const int32_t* bias_data = + tflite::micro::GetTensorData(bias_tensor); + for (int i = 0; i < n_batch; ++i) { + int32_t* output_ptr = scratch_output_tensor + i * n_unit; + const int32_t* bias_ptr = bias_data; + for (int j = 0; j < n_unit; ++j) { + *output_ptr++ = *bias_ptr++; + } + } + } else { + int32_t* output_ptr = scratch_output_tensor; + for (int i = 0; i < n_batch * n_unit; ++i) { + *output_ptr++ = 0; + } + } + + // Reduce. + for (int b = 0; b < n_batch; ++b) { + int32_t* output_temp_ptr = scratch_output_tensor + b * n_unit; + int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; + + // Reduction sum vector + for (int i = 0; i < n_unit; ++i) { + for (int j = 0; j < n_rank; ++j) { + output_temp_ptr[i] += *scratch_ptr_batch++; + } + } + } + + // Rescale. + const int32_t output_max = std::numeric_limits::max(); + const int32_t output_min = std::numeric_limits::min(); + for (int i = 0; i < n_batch * n_unit; ++i) { + int32_t x1 = scratch_output_tensor[i]; + int32_t x2 = MultiplyByQuantizedMultiplier(x1, data.effective_scale_2_a, + data.effective_scale_2_b); + int32_t x3 = x2 + data.output_zero_point; + int32_t x4 = std::min(std::max(output_min, x3), output_max); + tflite::micro::GetTensorData(output_tensor)[i] = + static_cast(x4); + } + } +} + +/** + * Generate two versions of the integer code. One with int16_t type for the + * time weights and the activation state, and another one with int8_t for the + * same. + */ + +void EvalInt16SvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data) { + EvalIntegerSvdfReference( + context, node, input_tensor, weights_feature_tensor, weights_time_tensor, + bias_tensor, params, activation_state_tensor, output_tensor, data); +} + +void EvalInt8SvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data) { + EvalIntegerSvdfReference( + context, node, input_tensor, weights_feature_tensor, weights_time_tensor, + bias_tensor, params, activation_state_tensor, output_tensor, data); +} + +static inline void ApplyTimeWeightsBiasAndActivation( + int batch_size, int memory_size, int num_filters, int num_units, int rank, + const float* const weights_time_ptr, const float* const bias_ptr, + TfLiteFusedActivation activation, float* const state_ptr, + float* const scratch_ptr, float* const output_ptr) { + // Compute matmul(activation_state, weights_time). + for (int b = 0; b < batch_size; ++b) { + // Perform batched vector dot product: + float* scratch_ptr_batch = scratch_ptr + b * num_filters; + const float* vector1_ptr = weights_time_ptr; + const float* vector2_ptr = state_ptr + b * memory_size * num_filters; + for (int i = 0; i < num_filters; ++i) { + *scratch_ptr_batch = 0.f; + for (int j = 0; j < memory_size; ++j) { + *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; + } + scratch_ptr_batch++; + } + } + + // Initialize output with bias if provided. + if (bias_ptr) { + // VectorBatchVectorAssign + for (int i = 0; i < batch_size; ++i) { + float* output_data = output_ptr + i * num_units; + const float* bias_data = bias_ptr; + for (int j = 0; j < num_units; ++j) { + *output_data++ = *bias_data++; + } + } + } else { + float* output_data = output_ptr; + for (int i = 0; i < batch_size * num_units; ++i) { + *output_data++ = 0.0f; + } + } + + // Reduction sum. + for (int b = 0; b < batch_size; ++b) { + float* output_ptr_batch = output_ptr + b * num_units; + float* scratch_ptr_batch = scratch_ptr + b * num_filters; + + // Reduction sum vector + for (int i = 0; i < num_units; ++i) { + for (int j = 0; j < rank; j++) { + output_ptr_batch[i] += *scratch_ptr_batch++; + } + } + } + + // Apply activation. + for (int b = 0; b < batch_size; ++b) { + float* output_ptr_batch = output_ptr + b * num_units; + for (int i = 0; i < num_units; ++i) { + *output_ptr_batch = + tflite::ops::micro::ActivationValFloat(activation, *output_ptr_batch); + ++output_ptr_batch; + } + } +} + +void EvalFloatSvdfReference( + TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* weights_feature, + const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, + const TfLiteSVDFParams* params, int scratch_tensor_index, + TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output) { + const int rank = params->rank; + const int batch_size = input->dims->data[0]; + const int input_size = input->dims->data[1]; + const int num_filters = weights_feature->dims->data[0]; + const int num_units = num_filters / rank; + const int memory_size = weights_time->dims->data[1]; + + const float* weights_feature_ptr = + tflite::micro::GetTensorData(weights_feature); + const float* weights_time_ptr = + tflite::micro::GetTensorData(weights_time); + // TODO(#1751): account for optional bias tensor + const float* bias_ptr = tflite::micro::GetTensorData(bias); + const float* input_ptr = tflite::micro::GetTensorData(input); + + float* state_ptr = tflite::micro::GetTensorData(activation_state); + + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(context->GetScratchBuffer != nullptr); + + float* scratch_ptr = static_cast( + context->GetScratchBuffer(context, scratch_tensor_index)); + + float* output_ptr = tflite::micro::GetTensorData(output); + + // Left shift the activation_state. + { + float* new_state_start = state_ptr; + const float* old_state_start = state_ptr + 1; + const float* old_state_end = + state_ptr + batch_size * num_filters * memory_size; + while (old_state_start != old_state_end) { + *new_state_start++ = *old_state_start++; + } + } + + // Note: no need to clear the latest activation, matmul is not accumulative. + + // Compute conv1d(inputs, weights_feature). + // The activation_state's rightmost column is used to save current cycle + // activation. This is achieved by starting at state_ptr[memory_size - 1] and + // having the stride equal to memory_size. + + // Perform batched matrix vector multiply operation: + { + const float* matrix = weights_feature_ptr; + const float* vector = input_ptr; + float* result = &state_ptr[memory_size - 1]; + float* result_in_batch = result; + for (int i = 0; i < batch_size; ++i) { + const float* matrix_ptr = matrix; + for (int j = 0; j < num_filters; ++j) { + float dot_prod = 0.0f; + const float* vector_in_batch = vector + i * input_size; + for (int k = 0; k < input_size; ++k) { + dot_prod += *matrix_ptr++ * *vector_in_batch++; + } + *result_in_batch = dot_prod; + result_in_batch += memory_size; + } + } + } + + ApplyTimeWeightsBiasAndActivation( + batch_size, memory_size, num_filters, num_units, rank, weights_time_ptr, + bias_ptr, params->activation, state_ptr, scratch_ptr, output_ptr); +} + +TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto* params = static_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + + // Validate Tensor Inputs (dtype depends on quantization): + // [0] = Input, {2, batch_size, input_size} + // [1] = Weights Feature, {2, num_filters, input_size} + // [2] = Weights Time, {2, num_filters, memory_size} + // [3] = Bias (optional), {1, num_units} + // [4] = Activation State (variable), + // {2, batch_size, memory_size * num_filters} + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kSvdfInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* weights_feature = + micro_context->AllocateTempInputTensor(node, kSvdfWeightsFeatureTensor); + TF_LITE_ENSURE(context, weights_feature != nullptr); + TfLiteTensor* weights_time = + micro_context->AllocateTempInputTensor(node, kSvdfWeightsTimeTensor); + TF_LITE_ENSURE(context, weights_time != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kSvdfBiasTensor); + TfLiteTensor* activation_state = micro_context->AllocateTempInputTensor( + node, kSvdfInputActivationStateTensor); + TF_LITE_ENSURE(context, activation_state != nullptr); + + // Define input constants based on input tensor definition above: + const int rank = params->rank; + const int input_size = input->dims->data[1]; + const int batch_size = input->dims->data[0]; + const int num_filters = weights_feature->dims->data[0]; + TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); + const int num_units = num_filters / rank; + const int memory_size = weights_time->dims->data[1]; + + // Validate Input Tensor: + TF_LITE_ENSURE(context, + input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); + + // Validate Tensor Output: + // [0] = float/int8_t, {2, batch_size, num_units} + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kSvdfOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2); + TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size); + TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units); + + // Validate Weights Feature Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2); + TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size); + + // Validate Weights Time Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2); + TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters); + TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size); + + // Validate Optional Bias Input Tensor: + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); + } + + // Validate Activation State Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2); + TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size); + TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1], + memory_size * num_filters); + // Since is_variable is not part of TFLiteEvalTensor, check is_variable here. + TF_LITE_ENSURE_EQ(context, activation_state->is_variable, true); + + TF_LITE_ENSURE_EQ(context, node->inputs->size, 5); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataSvdf* data = static_cast(node->user_data); + + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8); + TF_LITE_ENSURE(context, (weights_time->type == kTfLiteInt16) || + (weights_time->type == kTfLiteInt8)); + TF_LITE_ENSURE(context, (activation_state->type == kTfLiteInt16) || + (activation_state->type == kTfLiteInt8)); + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); + } + + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); + + const double effective_scale_1 = static_cast( + input->params.scale * weights_feature->params.scale / + activation_state->params.scale); + const double effective_scale_2 = + static_cast(activation_state->params.scale * + weights_time->params.scale / output->params.scale); + + // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready. + // TODO(#1751): account for optional bias tensor + TF_LITE_ENSURE( + context, + std::abs(static_cast(bias->params.scale) - + static_cast(activation_state->params.scale * + weights_time->params.scale)) < 1e-5); + + QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a), + &(data->effective_scale_1_b)); + QuantizeMultiplier(effective_scale_2, &(data->effective_scale_2_a), + &(data->effective_scale_2_b)); + + data->input_zero_point = input->params.zero_point; + data->output_zero_point = output->params.zero_point; + data->activation_state_zero_point = activation_state->params.zero_point; + + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + + const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( + context, batch_size * num_filters * sizeof(int32_t), + &(data->scratch_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_status); + + const TfLiteStatus scratch_output_status = + context->RequestScratchBufferInArena( + context, batch_size * num_units * sizeof(int32_t), + &(data->scratch_output_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_output_status); + } else { + TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32); + TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32); + TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32); + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); + } + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); + + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( + context, batch_size * num_filters * sizeof(float), + &(data->scratch_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_status); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(weights_feature); + micro_context->DeallocateTempTfLiteTensor(weights_time); + micro_context->DeallocateTempTfLiteTensor(activation_state); + micro_context->DeallocateTempTfLiteTensor(output); + // TODO(#1751): account for optional bias tensor + micro_context->DeallocateTempTfLiteTensor(bias); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/tanh.cpp b/src/tensorflow/lite/micro/kernels/tanh.cpp index 7743a87..e695889 100644 --- a/src/tensorflow/lite/micro/kernels/tanh.cpp +++ b/src/tensorflow/lite/micro/kernels/tanh.cpp @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,13 +24,13 @@ limitations under the License. #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_utils.h" namespace tflite { -namespace ops { -namespace micro { -namespace activations { + namespace { + constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; @@ -48,16 +48,19 @@ void* TanhInit(TfLiteContext* context, const char* buffer, size_t length) { TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, OpData* data) { + MicroContext* micro_context = GetMicroContext(context); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { + if (input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = static_cast(input->params.scale) * @@ -69,6 +72,62 @@ TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, data->input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); } + + if (input->type == kTfLiteInt16) { + static constexpr int kInputIntegerBits = 3; + static constexpr int kOutputFractionalBits = 15; + + // These operators are implemented in fixed-point arithmetic, + // which intrinsically wants symmetric ranges (zero_point==0) + // and power-of-two scales (power-of-two is abbreviated below as POT). + // While more general support would be possible by means of rescaling, + // that would add some overhead and some loss of accuracy and wouldn't + // be used at the moment as current quantized LSTM applications are + // happy with symmetric, power-of-two-scales quantization. So we just + // implement that narrow case only for now. + + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + + int input_scale_log2_rounded; + bool param_scale_pot = + CheckedLog2(input->params.scale, &input_scale_log2_rounded); + + data->input_left_shift = + (15 - kInputIntegerBits) + input_scale_log2_rounded; + param_scale_pot &= + (data->input_left_shift == 0 || data->input_left_shift == 1); + + if (param_scale_pot) { + data->input_multiplier = 0; + } else { + // Calculate multiplier to change input scale to 1/(3*4096) + // as required by the table lookup. + // The number 3.0 in the multiplier comes from here, + // because the interval is [-10.7, 10.7] instead of [-8, 8]. + // So, in this scaling +/-2^17 represents +/-10.7. + + double multiplier = + static_cast(input->params.scale) * 4096.0 * 3.0; + data->input_left_shift = 0; + + while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) { + data->input_left_shift++; + multiplier = multiplier * 2.0; + } + + data->input_multiplier = static_cast(multiplier); + } + TFLITE_DCHECK_LE(data->input_multiplier, 32767); + int output_scale_log2_rounded; + TF_LITE_ENSURE( + context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); + TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, + -kOutputFractionalBits); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -77,13 +136,16 @@ TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = static_cast(node->user_data); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); data->input_zero_point = input->params.zero_point; - return CalculateArithmeticOpData(context, node, data); -} + TF_LITE_ENSURE_OK(context, CalculateArithmeticOpData(context, node, data)); -} // namespace + micro_context->DeallocateTempTfLiteTensor(input); + return kTfLiteOk; +} TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = @@ -103,25 +165,12 @@ TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; case kTfLiteInt16: { - TanhParams params; - params.input_left_shift = data.input_left_shift; - reference_ops::Tanh(params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteUInt8: { - TanhParams params; - params.input_zero_point = data.input_zero_point; - params.input_range_radius = data.input_range_radius; - params.input_multiplier = data.input_multiplier; - params.input_left_shift = data.input_left_shift; - reference_ops::Tanh(params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - + reference_integer_ops::Tanh( + data.input_multiplier, data.input_left_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); return kTfLiteOk; } break; case kTfLiteInt8: { @@ -134,25 +183,17 @@ TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type), context); return kTfLiteError; } } -} // namespace activations - -TfLiteRegistration Register_TANH() { - return {/*init=*/activations::TanhInit, - /*free=*/nullptr, - /*prepare=*/activations::TanhPrepare, - /*invoke=*/activations::TanhEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; +} // namespace + +TFLMRegistration Register_TANH() { + return tflite::micro::RegisterOp(TanhInit, TanhPrepare, TanhEval); } -} // namespace micro -} // namespace ops + } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/transpose.cpp b/src/tensorflow/lite/micro/kernels/transpose.cpp new file mode 100644 index 0000000..fd17e89 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/transpose.cpp @@ -0,0 +1,122 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/kernels/internal/reference/transpose.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kPermTensor = 1; +constexpr int kOutputTensor = 0; + +struct TransposeContext { + TransposeContext(TfLiteContext* context, TfLiteNode* node) { + micro_context = GetMicroContext(context); + input = micro_context->AllocateTempInputTensor(node, kInputTensor); + perm = micro_context->AllocateTempInputTensor(node, kPermTensor); + output = micro_context->AllocateTempOutputTensor(node, kOutputTensor); + } + ~TransposeContext() { + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(perm); + micro_context->DeallocateTempTfLiteTensor(output); + } + MicroContext* micro_context; + TfLiteTensor* input; + TfLiteTensor* perm; + TfLiteTensor* output; +}; + +TfLiteStatus TransposePrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TransposeContext op_context(context, node); + + // Ensure validity of input tensor. + TF_LITE_ENSURE_MSG(context, NumDimensions(op_context.input) <= 5, + "Transpose op only supports 1D-5D input arrays."); + TF_LITE_ENSURE_TYPES_EQ(context, op_context.input->type, + op_context.output->type); + + int dims = NumDimensions(op_context.input); + const int32_t* perm_data = GetTensorData(op_context.perm); + + // Ensure validity of the permutations tensor as a 1D tensor. + TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.perm), 1); + TF_LITE_ENSURE_EQ(context, op_context.perm->dims->data[0], dims); + for (int idx = 0; idx < dims; ++idx) { + TF_LITE_ENSURE_MSG(context, (perm_data[idx] >= 0 && perm_data[idx] < dims), + "Transpose op permutations array is out of bounds."); + } + + return kTfLiteOk; +} + +TfLiteStatus TransposeEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* perm_tensor = + tflite::micro::GetEvalInput(context, node, kPermTensor); + const int32_t* perm_data = perm_tensor->data.i32; + const int size = perm_tensor->dims->data[0]; + TransposeParams params; + params.perm_count = size; + for (int i = 0; i < size; ++i) { + params.perm[i] = perm_data[i]; + } + + // Transpose kernel only does rearranging values not numeric evaluations + // on each cell. It's safe to implement per size of scalar type and this + // trick keeps the total code size in a reasonable range. + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + switch (input->type) { + case kTfLiteFloat32: + reference_ops::Transpose(params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::Transpose(params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf( + "Type %s is currently not supported by Transpose. " + "Only float32 and int8 is supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TFLMRegistration Register_TRANSPOSE() { + return tflite::micro::RegisterOp(nullptr, TransposePrepare, TransposeEval); +} +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/transpose_conv.h b/src/tensorflow/lite/micro/kernels/transpose_conv.h new file mode 100644 index 0000000..b1d9b67 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/transpose_conv.h @@ -0,0 +1,63 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_TRANSPOSE_CONV_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_TRANSPOSE_CONV_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/micro/micro_common.h" + +namespace tflite { + +// For the TfLite transpose_conv implementation, input tensor 0 corresponds to +// the OutputShapeTensor. However, since TFLM does not support dynamic tensors, +// the TFLM implementation ignores input tensor 0 and the only inputs we care +// about are kFilterTensor, kInputTensor and kBiasTensor. +constexpr int kTransposeConvFilterTensor = 1; +constexpr int kTransposeConvInputTensor = 2; +constexpr int kTransposeConvBiasTensor = 3; +constexpr int kTransposeConvOutputTensor = 0; + +// Conv is quantized along dimension 0: +// https://www.tensorflow.org/lite/performance/quantization_spec +constexpr int kTransposeConvQuantizedDimension = 0; + +// This is the most generic TFLMRegistration. The actual supported types +// may still be target dependent. The only requirement is that every +// implementation (reference or optimized) must define this function. +TFLMRegistration Register_TRANSPOSE_CONV(); + +#if defined(ARDUINO) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8. +TFLMRegistration Register_TRANSPOSE_CONV_INT8(); + +#else +// Note that while this block gets used for both reference and optimized kernels +// that do not have any specialized implementations, the only goal here is to +// define fallback implementation that allow reference kernels to still be used +// from applications that call a more specific kernel variant. + +inline TFLMRegistration Register_TRANSPOSE_CONV_INT8() { + return Register_TRANSPOSE_CONV(); +} + +#endif + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_TRANSPOSE_CONV_H_ diff --git a/src/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h b/src/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h new file mode 100644 index 0000000..06d7ef3 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h @@ -0,0 +1,56 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +// This is the most generic TFLMRegistration. The actual supported types +// may still be target dependent. The only requirement is that every +// implementation (reference or optimized) must define this function. +// TODO(b/230666079): resolve conflict with xtensa implementation +TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM(); + +#if defined(ARDUINO) +// Returns a TFLMRegistration struct for kernel variant that only supports +// int8 activations and int8 weights and uses the latency optimized +// implementations. +TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT8(); + +// Returns a TFLMRegistration struct for kernel variant that only supports +// int16 activations and int8 weights and uses the latency optimized +// implementations. +TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT16(); + +#else +inline TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT8() { + return Register_UNIDIRECTIONAL_SEQUENCE_LSTM(); +} + +inline TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT16() { + return Register_UNIDIRECTIONAL_SEQUENCE_LSTM(); +} +#endif + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_H_ diff --git a/src/tensorflow/lite/micro/kernels/unpack.cpp b/src/tensorflow/lite/micro/kernels/unpack.cpp index 557cc57..9ce1683 100644 --- a/src/tensorflow/lite/micro/kernels/unpack.cpp +++ b/src/tensorflow/lite/micro/kernels/unpack.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,11 +18,10 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace unpack { + namespace { constexpr int kInputTensor = 0; @@ -73,7 +72,7 @@ TfLiteStatus UnpackImpl(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus UnpackEval(TfLiteContext* context, TfLiteNode* node) { TfLiteUnpackParams* data = reinterpret_cast(node->builtin_data); @@ -87,35 +86,23 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteInt32: { return UnpackImpl(context, node, input, data->num, data->axis); } - case kTfLiteUInt8: { - return UnpackImpl(context, node, input, data->num, data->axis); - } case kTfLiteInt8: { return UnpackImpl(context, node, input, data->num, data->axis); } default: { - TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by unpack.", - TfLiteTypeGetName(input->type)); + MicroPrintf("Type '%s' is not supported by unpack.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } } return kTfLiteOk; } + } // namespace -} // namespace unpack - -TfLiteRegistration Register_UNPACK() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/unpack::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + +TFLMRegistration Register_UNPACK() { + return tflite::micro::RegisterOp(nullptr, nullptr, UnpackEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/var_handle.cpp b/src/tensorflow/lite/micro/kernels/var_handle.cpp new file mode 100644 index 0000000..0efb28c --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/var_handle.cpp @@ -0,0 +1,94 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_resource_variable.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +struct OpData { + int32_t resource_id; +}; + +void* VarHandleInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus VarHandlePrepare(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + const auto* params = + reinterpret_cast(node->builtin_data); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + MicroResourceVariables* resources = graph_info.GetResourceVariables(); + if (resources == nullptr) { + MicroPrintf( + "VAR_HANDLE requires resource variables. Please create " + "ResourceVariables and pass it to the interpreter."); + return kTfLiteError; + } + op_data->resource_id = + resources->CreateIdIfNoneFound(params->container, params->shared_name); + if (op_data->resource_id < 0) { + return kTfLiteError; + } + + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TFLITE_DCHECK(output != nullptr); + + // Assign saved resource_id so this output tensor will always return the + // correct resource id. + output->data.i32 = &op_data->resource_id; + + return kTfLiteOk; +} + +TfLiteStatus VarHandleEval(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TFLITE_DCHECK(output != nullptr); + + // Assign saved resource_id so this output tensor will always return the + // correct resource id. + output->data.i32 = &op_data->resource_id; + return kTfLiteOk; +} + +} // namespace. + +TFLMRegistration Register_VAR_HANDLE() { + return tflite::micro::RegisterOp(VarHandleInit, VarHandlePrepare, + VarHandleEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/while.cpp b/src/tensorflow/lite/micro/kernels/while.cpp new file mode 100644 index 0000000..a11adeb --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/while.cpp @@ -0,0 +1,133 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace { + +struct OpData { + int cond_subgraph_index; + int body_subgraph_index; +}; + +void* WhileInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus WhilePrepare(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + const auto* params = + reinterpret_cast(node->builtin_data); + + op_data->cond_subgraph_index = params->cond_subgraph_index; + op_data->body_subgraph_index = params->body_subgraph_index; + + // The first input is the condition. + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + + size_t num_inputs = node->inputs->size; + size_t num_outputs = node->outputs->size; + + MicroGraph& graph_info = micro_context->graph(); + + TF_LITE_ENSURE(context, + op_data->cond_subgraph_index < graph_info.NumSubgraphs()); + TF_LITE_ENSURE(context, + op_data->body_subgraph_index < graph_info.NumSubgraphs()); + + TF_LITE_ENSURE_EQ(context, num_inputs, + graph_info.NumSubgraphInputs(op_data->cond_subgraph_index)); + TF_LITE_ENSURE_EQ(context, num_inputs, + graph_info.NumSubgraphInputs(op_data->body_subgraph_index)); + TF_LITE_ENSURE_EQ(context, num_inputs, num_outputs); + TF_LITE_ENSURE_EQ( + context, num_outputs, + graph_info.NumSubgraphOutputs(op_data->body_subgraph_index)); + + return kTfLiteOk; +} + +TfLiteStatus WhileEval(TfLiteContext* context, TfLiteNode* node) { + const OpData* op_data = reinterpret_cast(node->user_data); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph* graph_info = µ_context->graph(); + + TF_LITE_ENSURE_OK(context, + tflite::micro::CopyOpInputsToSubgraphInputs( + context, node, graph_info, op_data->cond_subgraph_index, + /*first_tensor_idx=*/0)); + + TF_LITE_ENSURE_OK(context, + graph_info->InvokeSubgraph(op_data->cond_subgraph_index)); + + TfLiteEvalTensor* cond_subgraph_output = graph_info->GetSubgraphOutput( + op_data->cond_subgraph_index, /*tensor_idx=*/0); + bool cond_value = cond_subgraph_output->data.b[0]; + + TF_LITE_ENSURE_OK(context, + tflite::micro::CopyOpInputsToSubgraphInputs( + context, node, graph_info, op_data->body_subgraph_index, + /*first_tensor_idx=*/0)); + TF_LITE_ENSURE_OK(context, + tflite::micro::CopyOpInputsToOpOutputs(context, node)); + + while (cond_value == true) { + // Copy output of this iteration back to the body input. + TF_LITE_ENSURE_OK( + context, tflite::micro::CopyOpOutputsToSubgraphInputs( + context, node, graph_info, op_data->body_subgraph_index)); + TF_LITE_ENSURE_OK(context, + graph_info->InvokeSubgraph(op_data->body_subgraph_index)); + + TF_LITE_ENSURE_OK( + context, tflite::micro::CopySubgraphOutputsToOpOutputs( + context, node, graph_info, op_data->body_subgraph_index)); + TF_LITE_ENSURE_OK( + context, tflite::micro::CopyOpOutputsToSubgraphInputs( + context, node, graph_info, op_data->cond_subgraph_index)); + TF_LITE_ENSURE_OK(context, + graph_info->InvokeSubgraph(op_data->cond_subgraph_index)); + + cond_subgraph_output = graph_info->GetSubgraphOutput( + op_data->cond_subgraph_index, /*tensor_idx=*/0); + cond_value = cond_subgraph_output->data.b[0]; + } + + return kTfLiteOk; +} + +} // namespace. + +TFLMRegistration Register_WHILE() { + return tflite::micro::RegisterOp(WhileInit, WhilePrepare, WhileEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/kernels/zeros_like.cpp b/src/tensorflow/lite/micro/kernels/zeros_like.cpp new file mode 100644 index 0000000..eb1f9c6 --- /dev/null +++ b/src/tensorflow/lite/micro/kernels/zeros_like.cpp @@ -0,0 +1,88 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus ZerosLikePrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + output->type = input->type; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +void resetZeros(T* out, const int num_elements) { + for (int i = 0; i < num_elements; ++i) { + out[i] = static_cast(0); + } +} + +TfLiteStatus ZerosLikeEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output)); + switch (input->type) { + case kTfLiteInt64: + resetZeros(tflite::micro::GetTensorData(output), flat_size); + break; + case kTfLiteInt32: + resetZeros(tflite::micro::GetTensorData(output), flat_size); + break; + case kTfLiteInt8: + resetZeros(tflite::micro::GetTensorData(output), flat_size); + break; + case kTfLiteFloat32: + resetZeros(tflite::micro::GetTensorData(output), flat_size); + break; + default: + MicroPrintf( + "ZerosLike only currently supports int64, int32, " + "and float32, got %d.", + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} +} // namespace + +TFLMRegistration Register_ZEROS_LIKE() { + return tflite::micro::RegisterOp(nullptr, ZerosLikePrepare, ZerosLikeEval); +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/memory_helpers.cpp b/src/tensorflow/lite/micro/memory_helpers.cpp index c515c5e..0f4257e 100644 --- a/src/tensorflow/lite/micro/memory_helpers.cpp +++ b/src/tensorflow/lite/micro/memory_helpers.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,11 +18,10 @@ limitations under the License. #include #include -#include "flatbuffers/flatbuffers.h" // from @flatbuffers +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -48,18 +47,33 @@ size_t AlignSizeUp(size_t size, size_t alignment) { TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) { switch (type) { + case kTfLiteFloat16: + *size = sizeof(int16_t); + break; + case kTfLiteBFloat16: + *size = sizeof(int16_t); + break; case kTfLiteFloat32: *size = sizeof(float); break; + case kTfLiteFloat64: + *size = sizeof(double); + break; case kTfLiteInt16: *size = sizeof(int16_t); break; case kTfLiteInt32: *size = sizeof(int32_t); break; + case kTfLiteUInt32: + *size = sizeof(uint32_t); + break; case kTfLiteUInt8: *size = sizeof(uint8_t); break; + case kTfLiteUInt16: + *size = sizeof(uint16_t); + break; case kTfLiteInt8: *size = sizeof(int8_t); break; @@ -72,12 +86,18 @@ TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) { case kTfLiteBool: *size = sizeof(bool); break; + case kTfLiteResource: + *size = sizeof(int32_t); + break; case kTfLiteComplex64: *size = sizeof(float) * 2; break; case kTfLiteComplex128: *size = sizeof(double) * 2; break; + case kTfLiteInt4: + *size = sizeof(int8_t); + break; default: return kTfLiteError; } @@ -85,20 +105,19 @@ TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) { } TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, - size_t* bytes, size_t* type_size, - ErrorReporter* error_reporter) { + size_t* bytes, size_t* type_size) { int element_count = 1; // If flatbuffer_tensor.shape == nullptr, then flatbuffer_tensor is a scalar // so has 1 element. if (flatbuffer_tensor.shape() != nullptr) { - for (size_t n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) { + for (size_t n = 0; n < flatbuffer_tensor.shape()->size(); ++n) { element_count *= flatbuffer_tensor.shape()->Get(n); } } TfLiteType tf_lite_type; - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &tf_lite_type, error_reporter)); + TF_LITE_ENSURE_STATUS( + ConvertTensorType(flatbuffer_tensor.type(), &tf_lite_type)); TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(tf_lite_type, type_size)); *bytes = element_count * (*type_size); return kTfLiteOk; diff --git a/src/tensorflow/lite/micro/memory_helpers.h b/src/tensorflow/lite/micro/memory_helpers.h index 8f5526c..f3392e4 100644 --- a/src/tensorflow/lite/micro/memory_helpers.h +++ b/src/tensorflow/lite/micro/memory_helpers.h @@ -19,7 +19,6 @@ limitations under the License. #include #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -33,13 +32,19 @@ uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); // Returns an increased size that's a multiple of alignment. size_t AlignSizeUp(size_t size, size_t alignment); +// Templated version of AlignSizeUp +// Returns an increased size that's a multiple of alignment. +template +size_t AlignSizeUp(size_t count = 1) { + return AlignSizeUp(sizeof(T) * count, alignof(T)); +} + // Returns size in bytes for a given TfLiteType. TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size); // How many bytes are needed to hold a tensor's contents. TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, - size_t* bytes, size_t* type_size, - ErrorReporter* error_reporter); + size_t* bytes, size_t* type_size); // How many bytes are used in a TfLiteEvalTensor instance. The byte length is // returned in out_bytes. diff --git a/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp b/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp index 39991ab..6209930 100644 --- a/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp +++ b/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,8 +15,27 @@ limitations under the License. #include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" +#include "tensorflow/lite/micro/micro_log.h" + namespace tflite { +namespace { + +// Returns a character representing a numbered buffer +// for GreedyMemoryPlanner::PrintMemoryPlan() +char GetOrdinalCharacter(int i) { + if (i < 10) { + return '0' + i; + } else if (i < 36) { + return 'a' + (i - 10); + } else if (i < 62) { + return 'A' + (i - 36); + } + return GetOrdinalCharacter(i % 62); +} + +} // namespace + // Simple stable in-place sort function. Not time-efficient for large arrays. // Would normally be in an anonymous namespace to keep it private, but we want // to be able to test it externally. @@ -38,9 +57,14 @@ void ReverseSortInPlace(int* values, int* ids, int size) { } while (any_swapped); } -GreedyMemoryPlanner::GreedyMemoryPlanner(unsigned char* scratch_buffer, - int scratch_buffer_size) - : buffer_count_(0), need_to_calculate_offsets_(true) { +GreedyMemoryPlanner::GreedyMemoryPlanner() {} + +TfLiteStatus GreedyMemoryPlanner::Init(unsigned char* scratch_buffer, + int scratch_buffer_size) { + // Reset internal states + buffer_count_ = 0; + need_to_calculate_offsets_ = true; + // Allocate the arrays we need within the scratch buffer arena. max_buffer_count_ = scratch_buffer_size / per_buffer_size(); @@ -58,18 +82,17 @@ GreedyMemoryPlanner::GreedyMemoryPlanner(unsigned char* scratch_buffer, next_free += sizeof(ListEntry) * max_buffer_count_; buffer_offsets_ = reinterpret_cast(next_free); + return kTfLiteOk; } GreedyMemoryPlanner::~GreedyMemoryPlanner() { // We don't own the scratch buffer, so don't deallocate anything. } -TfLiteStatus GreedyMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used) { +TfLiteStatus GreedyMemoryPlanner::AddBuffer(int size, int first_time_used, + int last_time_used) { if (buffer_count_ >= max_buffer_count_) { - TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", - max_buffer_count_); + MicroPrintf("Too many buffers (max is %d)", max_buffer_count_); return kTfLiteError; } BufferRequirements* current = &requirements_[buffer_count_]; @@ -82,12 +105,11 @@ TfLiteStatus GreedyMemoryPlanner::AddBuffer( return kTfLiteOk; } -TfLiteStatus GreedyMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used, int offline_offset) { +TfLiteStatus GreedyMemoryPlanner::AddBuffer(int size, int first_time_used, + int last_time_used, + int offline_offset) { BufferRequirements* current = &requirements_[buffer_count_]; - if (AddBuffer(error_reporter, size, first_time_used, last_time_used) != - kTfLiteOk) { + if (AddBuffer(size, first_time_used, last_time_used) != kTfLiteOk) { return kTfLiteError; } current->offline_offset = offline_offset; @@ -297,8 +319,6 @@ size_t GreedyMemoryPlanner::GetMaximumMemorySize() { while (entry) { BufferRequirements* requirements = &requirements_[entry->requirements_index]; - // TODO(b/148246793): Update all size and offset variables types from - // int to size_t const size_t current_size = entry->offset + requirements->size; if (current_size > max_size) { max_size = current_size; @@ -311,17 +331,19 @@ size_t GreedyMemoryPlanner::GetMaximumMemorySize() { return max_size; } -void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) { +void GreedyMemoryPlanner::PrintMemoryPlan() { CalculateOffsetsIfNeeded(); for (int i = 0; i < buffer_count_; ++i) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Planner buffer ID: %d, calculated offset: %d, size required: %d, " - "first_time_created: %d, " - "last_time_used: %d", - i, buffer_offsets_[i], requirements_[i].size, - requirements_[i].first_time_used, requirements_[i].last_time_used); + char c = '*'; + if (requirements_[i].first_time_used != requirements_[i].last_time_used) { + // not a scratch buffer nor subgraph output tensor + c = GetOrdinalCharacter(i); + } + MicroPrintf("%c (id=%d): size=%d, offset=%d, first_used=%d last_used=%d", c, + i, requirements_[i].size, buffer_offsets_[i], + requirements_[i].first_time_used, + requirements_[i].last_time_used); } constexpr int kLineWidth = 80; @@ -345,6 +367,7 @@ void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) { for (int c = 0; c < kLineWidth; ++c) { line[c] = '.'; } + int memory_use = 0; for (int i = 0; i < buffer_count_; ++i) { BufferRequirements* requirements = &requirements_[i]; if ((t < requirements->first_time_used) || @@ -356,47 +379,44 @@ void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) { continue; } const int size = requirements->size; + memory_use += size; const int line_start = (offset * kLineWidth) / max_size; const int line_end = ((offset + size) * kLineWidth) / max_size; for (int n = line_start; n < line_end; ++n) { if (line[n] == '.') { - char display; - if (i < 10) { - display = '0' + i; - } else if (i < 36) { - display = 'a' + (i - 10); - } else if (i < 62) { - display = 'A' + (i - 36); + if (requirements->first_time_used == requirements->last_time_used) { + // scratch buffer or subgraph output tensor + line[n] = '*'; } else { - display = '*'; + line[n] = GetOrdinalCharacter(i); } - line[n] = display; } else { line[n] = '!'; } } } line[kLineWidth] = 0; - TF_LITE_REPORT_ERROR(error_reporter, "%s", (const char*)line); + + MicroPrintf("%4d: %s (%dk)", t, (const char*)line, + (memory_use + 1023) / 1024); } } int GreedyMemoryPlanner::GetBufferCount() { return buffer_count_; } -TfLiteStatus GreedyMemoryPlanner::GetOffsetForBuffer( - tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { +TfLiteStatus GreedyMemoryPlanner::GetOffsetForBuffer(int buffer_index, + int* offset) { CalculateOffsetsIfNeeded(); if ((buffer_index < 0) || (buffer_index >= buffer_count_)) { - TF_LITE_REPORT_ERROR(error_reporter, - "buffer index %d is outside range 0 to %d", - buffer_index, buffer_count_); + MicroPrintf("buffer index %d is outside range 0 to %d", buffer_index, + buffer_count_); return kTfLiteError; } *offset = buffer_offsets_[buffer_index]; return kTfLiteOk; } -bool GreedyMemoryPlanner::DoAnyBuffersOverlap(ErrorReporter* error_reporter) { +bool GreedyMemoryPlanner::DoAnyBuffersOverlap() { CalculateOffsetsIfNeeded(); bool were_overlaps_found = false; for (int i = 0; i < buffer_count_; ++i) { @@ -425,10 +445,10 @@ bool GreedyMemoryPlanner::DoAnyBuffersOverlap(ErrorReporter* error_reporter) { continue; } were_overlaps_found = true; - TF_LITE_REPORT_ERROR( - error_reporter, "Overlap: %d (%d=>%d, %d->%d) vs %d (%d=>%d, %d->%d)", - i, a_first_time_used, a_last_time_used, a_start_offset, a_end_offset, - j, b_first_time_used, b_last_time_used, b_start_offset, b_end_offset); + MicroPrintf("Overlap: %d (%d=>%d, %d->%d) vs %d (%d=>%d, %d->%d)", i, + a_first_time_used, a_last_time_used, a_start_offset, + a_end_offset, j, b_first_time_used, b_last_time_used, + b_start_offset, b_end_offset); } } return were_overlaps_found; diff --git a/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h b/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h index f5f26a8..b2cdb61 100644 --- a/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +++ b/src/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h @@ -17,7 +17,7 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_GREEDY_MEMORY_PLANNER_H_ #include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/memory_planner/memory_planner.h" +#include "tensorflow/lite/micro/memory_planner/micro_memory_planner.h" namespace tflite { @@ -43,29 +43,32 @@ constexpr int kOnlinePlannedBuffer = -1; // // This is not guaranteed to produce the best placement, since that's an // NP-Complete problem, but in practice it should produce one that's decent. -class GreedyMemoryPlanner : public MemoryPlanner { +class GreedyMemoryPlanner : public MicroMemoryPlanner { public: - // You need to pass in an area of memory to be used for planning. This memory - // needs to have a lifetime as long as the planner, but isn't owned by this - // object, so management should be handled by the client. This is so it can be - // stack or globally allocated if necessary on devices without dynamic memory - // allocation. How many buffers can be planned for will depend on the size of - // this scratch memory, so you should enlarge it if you see an error when - // calling AddBuffer(). The memory can be reused once you're done with the - // planner, as long as you copy the calculated offsets to another location. - // Each buffer requires about 36 bytes of scratch. - GreedyMemoryPlanner(unsigned char* scratch_buffer, int scratch_buffer_size); + GreedyMemoryPlanner(); ~GreedyMemoryPlanner() override; + // You need to pass in an area of memory to be used for planning. The client + // should ensure the validity of the memory when it needs to use this object. + // This memory isn't owned by this object, so management should be handled by + // the client. This is so it can be stack or globally allocated if necessary + // on devices without dynamic memory allocation. How many buffers can be + // planned for will depend on the size of this scratch memory, so you should + // enlarge it if you see an error when calling AddBuffer(). The memory can be + // reused once you're done with the planner, as long as you copy the + // calculated offsets to another location. Each buffer requires about 36 bytes + // of scratch. + TfLiteStatus Init(unsigned char* scratch_buffer, + int scratch_buffer_size) override; + // Record details of a buffer we want to place. - TfLiteStatus AddBuffer(ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used) override; + TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used) override; // Record details of an offline planned buffer offset we want to place. // offline_offset is the buffer offset from the start of the arena. - TfLiteStatus AddBuffer(ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used, - int offline_offset); + TfLiteStatus AddBuffer(int size, int first_time_used, int last_time_used, + int offline_offset) override; // Returns the high-water mark of used memory. This is the minimum size of a // memory arena you'd need to allocate to hold these buffers. @@ -77,15 +80,14 @@ class GreedyMemoryPlanner : public MemoryPlanner { // Where a given buffer should be placed in the memory arena. // This information is stored in the memory arena itself, so once the arena // is used for inference, it will be overwritten. - TfLiteStatus GetOffsetForBuffer(ErrorReporter* error_reporter, - int buffer_index, int* offset) override; + TfLiteStatus GetOffsetForBuffer(int buffer_index, int* offset) override; // Prints an ascii-art diagram of the buffer layout plan. - void PrintMemoryPlan(ErrorReporter* error_reporter); + void PrintMemoryPlan() override; // Debug method to check whether any buffer allocations are overlapping. This // is an O(N^2) complexity operation, so only use for testing. - bool DoAnyBuffersOverlap(ErrorReporter* error_reporter); + bool DoAnyBuffersOverlap(); // Used to store a list of buffers ordered by their offset. struct ListEntry { @@ -105,6 +107,11 @@ class GreedyMemoryPlanner : public MemoryPlanner { return per_buffer_size; } + // Returns False because the GreedyMemoryPlanner doesn't preserves all tensors + // after invocation. Do to the fact that tensors that tensor data for tensors + // that aren't being used during a phase of invocation are overwritten. + bool preserves_all_tensors() const override { return false; } + private: // Whether a buffer is active in a given time range. bool DoesEntryOverlapInTime(const ListEntry* entry, const int first_time_used, diff --git a/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp b/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp index d25a4f2..00d707a 100644 --- a/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp +++ b/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp @@ -15,18 +15,21 @@ limitations under the License. #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" +#include "tensorflow/lite/micro/micro_log.h" + namespace tflite { +// C++11 requires defining a constexpr static class member in a .cc file +constexpr int tflite::LinearMemoryPlanner::kMaxBufferCount; + LinearMemoryPlanner::LinearMemoryPlanner() : current_buffer_count_(0), next_free_offset_(0) {} LinearMemoryPlanner::~LinearMemoryPlanner() {} -TfLiteStatus LinearMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used) { +TfLiteStatus LinearMemoryPlanner::AddBuffer(int size, int first_time_used, + int last_time_used) { if (current_buffer_count_ >= kMaxBufferCount) { - TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", - kMaxBufferCount); + MicroPrintf("Too many buffers (max is %d)", kMaxBufferCount); return kTfLiteError; } buffer_offsets_[current_buffer_count_] = next_free_offset_; @@ -39,12 +42,11 @@ size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } -TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( - tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { +TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer(int buffer_index, + int* offset) { if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { - TF_LITE_REPORT_ERROR(error_reporter, - "buffer index %d is outside range 0 to %d", - buffer_index, current_buffer_count_); + MicroPrintf("buffer index %d is outside range 0 to %d", buffer_index, + current_buffer_count_); return kTfLiteError; } *offset = buffer_offsets_[buffer_index]; diff --git a/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.h b/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.h index 4d77e77..9850569 100644 --- a/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +++ b/src/tensorflow/lite/micro/memory_planner/linear_memory_planner.h @@ -17,24 +17,27 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ #include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/memory_planner/memory_planner.h" +#include "tensorflow/lite/micro/memory_planner/micro_memory_planner.h" namespace tflite { // The simplest possible memory planner that just lays out all buffers at // increasing offsets without trying to reuse memory. -class LinearMemoryPlanner : public MemoryPlanner { +class LinearMemoryPlanner : public MicroMemoryPlanner { public: LinearMemoryPlanner(); ~LinearMemoryPlanner() override; - TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used) override; + TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used) override; size_t GetMaximumMemorySize() override; int GetBufferCount() override; - TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, - int buffer_index, int* offset) override; + TfLiteStatus GetOffsetForBuffer(int buffer_index, int* offset) override; + + // Returns True because the LinearMemoryPlanner preserves all tensors after + // invocation. + bool preserves_all_tensors() const override { return true; } private: static constexpr int kMaxBufferCount = 1024; diff --git a/src/tensorflow/lite/micro/memory_planner/memory_plan_struct.h b/src/tensorflow/lite/micro/memory_planner/memory_plan_struct.h new file mode 100644 index 0000000..c8c431c --- /dev/null +++ b/src/tensorflow/lite/micro/memory_planner/memory_plan_struct.h @@ -0,0 +1,73 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLAN_STRUCT_H_ +#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLAN_STRUCT_H_ + +#include +#include + +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +// This is an experimental feature and subjected to change. +// More description is available at +// tensorflow/lite/micro/docs/offline_memory_plan.md. + +// Describes a buffer's layout inside an arena. This struct should be kept as +// small as possible for memory footprint sensitive applications and should use +// only primitive fields, making it easy to adjust offline. +struct BufferDescriptor { + // Starting offset inside an arena for this buffer. + // Offset is the minimum information needed for the buffer. The user knows + // the model and the size of each buffer in order to lay out a valid buffer + // plan. + int32_t offset; +}; + +// A structure describing the lay out of buffers inside an arena. +struct BufferPlan { + // Number of buffers described in this plan. + int32_t buffer_count; + + // Each element describes one buffer. + // Buffer index is implicit by the order of AddBuffer() call. + // Specifically, indices of activation tensors are 0 … N-1 where N is the + // number of activation tensors. + // The rest are based on the order of OP requests. + // + // This is a flexible array member and should ideally be + // arena_entries[]; However, in order to support a variety + // of compilers (and without needing to add ifdef's), we + // are implementing the flexible array member with an array of + // length 1 as the last member of the struct. When the size of a BufferPlan + // is needed, use the provided SizeOfBufferPlan(buffer_count) that + // accounts for this implemenatation caveat. + BufferDescriptor buffer_plan_entries[1]; +}; + +// Returns size of a BufferPlan given a buffer count. This size is compile time +// known if buffer_count is a compile time constant. +constexpr size_t SizeOfBufferPlan(int32_t buffer_count) { + // Minus 1 because a BufferPlan struct have a BufferDescriptor already. + // Max to provide a lower bound for the corner case of buffer_count = 0. + return sizeof(BufferPlan) + + sizeof(BufferDescriptor) * Max(buffer_count - 1, 0); +} + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLAN_STRUCT_H_ diff --git a/src/tensorflow/lite/micro/memory_planner/memory_planner.h b/src/tensorflow/lite/micro/memory_planner/memory_planner.h deleted file mode 100644 index 2c39fbe..0000000 --- a/src/tensorflow/lite/micro/memory_planner/memory_planner.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ -#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" - -namespace tflite { - -// Interface class for planning the layout of memory buffers during the -// execution of a graph. -// It's designed to be used by a client that iterates in any order through the -// buffers it wants to lay out, and then calls the getter functions for -// information about the calculated layout. For example: -// -// SomeMemoryPlanner planner; -// planner.AddBuffer(reporter, 100, 0, 1); // Buffer 0 -// planner.AddBuffer(reporter, 50, 2, 3); // Buffer 1 -// planner.AddBuffer(reporter, 50, 2, 3); // Buffer 2 -// -// int offset0; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 0, &offset0)); -// int offset1; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 1, &offset1)); -// int offset2; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 2, &offset2)); -// const int arena_size_needed = planner.GetMaximumMemorySize(); -// -// The goal is for applications to be able to experiment with different layout -// strategies without changing their client code, by swapping out classes that -// implement this interface.= -class MemoryPlanner { - public: - MemoryPlanner() {} - virtual ~MemoryPlanner() {} - - // Pass information about a buffer's size and lifetime to the layout - // algorithm. The order this is called implicitly assigns an index to the - // result, so the buffer information that's passed into the N-th call of - // this method will be used as the buffer_index argument to - // GetOffsetForBuffer(). - virtual TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, - int size, int first_time_used, - int last_time_used) = 0; - - // The largest contiguous block of memory that's needed to hold the layout. - virtual size_t GetMaximumMemorySize() = 0; - // How many buffers have been added to the planner. - virtual int GetBufferCount() = 0; - // Calculated layout offset for the N-th buffer added to the planner. - virtual TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, - int buffer_index, int* offset) = 0; -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ diff --git a/src/tensorflow/lite/micro/memory_planner/micro_memory_planner.h b/src/tensorflow/lite/micro/memory_planner/micro_memory_planner.h new file mode 100644 index 0000000..035f467 --- /dev/null +++ b/src/tensorflow/lite/micro/memory_planner/micro_memory_planner.h @@ -0,0 +1,95 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ + +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +// Interface class for planning the layout of memory buffers during the +// execution of a graph. +// It's designed to be used by a client that iterates in any order through the +// buffers it wants to lay out, and then calls the getter functions for +// information about the calculated layout. For example: +// +// SomeMemoryPlanner planner; +// planner.AddBuffer(100, 0, 1); // Buffer 0 +// planner.AddBuffer(50, 2, 3); // Buffer 1 +// planner.AddBuffer(50, 2, 3); // Buffer 2 +// +// int offset0; +// TF_EXPECT_OK(planner.GetOffsetForBuffer(0, &offset0)); +// int offset1; +// TF_EXPECT_OK(planner.GetOffsetForBuffer(1, &offset1)); +// int offset2; +// TF_EXPECT_OK(planner.GetOffsetForBuffer(2, &offset2)); +// const int arena_size_needed = planner.GetMaximumMemorySize(); +// +// The goal is for applications to be able to experiment with different layout +// strategies without changing their client code, by swapping out classes that +// implement this interface.= +class MicroMemoryPlanner { + public: + MicroMemoryPlanner() {} + virtual ~MicroMemoryPlanner() {} + + // Pass information about a buffer's size and lifetime to the layout + // algorithm. The order this is called implicitly assigns an index to the + // result, so the buffer information that's passed into the N-th call of + // this method will be used as the buffer_index argument to + // GetOffsetForBuffer(). + virtual TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used) = 0; + + // Record details of an offline planned buffer offset we want to place. + // offline_offset is the buffer offset from the start of the arena. + // This is to support offline memory planning from the flatbuffer metadata. + // By default, it returns an error. + virtual TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used, int offline_offset) { + return kTfLiteError; + } + + // The largest contiguous block of memory that's needed to hold the layout. + virtual size_t GetMaximumMemorySize() = 0; + // How many buffers have been added to the planner. + virtual int GetBufferCount() = 0; + // Calculated layout offset for the N-th buffer added to the planner. + virtual TfLiteStatus GetOffsetForBuffer(int buffer_index, int* offset) = 0; + + // Provides the scratch buffer in case that the memory planner needs it. + // The lifetime of scratch buffers lifetime lasts until the static memory plan + // is committed. + // The default implementation is for the memory planner that does not need + // scratch buffer and simply returns ok. + virtual TfLiteStatus Init(unsigned char* scratch_buffer, + int scratch_buffer_size) { + return kTfLiteOk; + } + + // Method will return True if the MicroMemoryPlanner preserves all tensors + // after invocation, and False if it doesn't. + virtual bool preserves_all_tensors() const = 0; + + virtual void PrintMemoryPlan() { + // Default does nothing. + } +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ diff --git a/src/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cpp b/src/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cpp new file mode 100644 index 0000000..9bcb80c --- /dev/null +++ b/src/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cpp @@ -0,0 +1,66 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h" + +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +NonPersistentMemoryPlannerShim::NonPersistentMemoryPlannerShim( + const BufferPlan* buffer_plan) + : buffer_plan_(buffer_plan), buffer_request_count_(0) {} + +NonPersistentMemoryPlannerShim::~NonPersistentMemoryPlannerShim() {} + +TfLiteStatus NonPersistentMemoryPlannerShim::AddBuffer(int size, + int first_time_used, + int last_time_used) { + buffer_request_count_++; + if (buffer_request_count_ > buffer_plan_->buffer_count) { + MicroPrintf( + "Attempting to add buffer %d, but only %d buffers in given buffer " + "plan.", + buffer_request_count_, buffer_plan_->buffer_count); + return kTfLiteError; + } + return kTfLiteOk; +} + +size_t NonPersistentMemoryPlannerShim::GetMaximumMemorySize() { + // Simply return 0 to let the framework accept this memory plan + // because the client ensure validity of the memory plan. + return 0; +} + +// How many buffers are in the given memory plan. +int NonPersistentMemoryPlannerShim::GetBufferCount() { + return buffer_plan_->buffer_count; +} + +TfLiteStatus NonPersistentMemoryPlannerShim::GetOffsetForBuffer( + int buffer_request_index, int* offset) { + if (buffer_request_index >= buffer_plan_->buffer_count) { + MicroPrintf( + "Attempting to get offset for buffer %d, but only %d buffers in given " + "buffer plan.", + buffer_request_index, buffer_plan_->buffer_count); + return kTfLiteError; + } + *offset = buffer_plan_->buffer_plan_entries[buffer_request_index].offset; + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h b/src/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h new file mode 100644 index 0000000..13a3fad --- /dev/null +++ b/src/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h @@ -0,0 +1,133 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_NON_PERSISTENT_MEMORY_PLANNER_SHIM_H__ +#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_NON_PERSISTENT_MEMORY_PLANNER_SHIM_H__ + +#include "tensorflow/lite/micro/compatibility.h" +#include "tensorflow/lite/micro/memory_planner/memory_plan_struct.h" +#include "tensorflow/lite/micro/memory_planner/micro_memory_planner.h" + +namespace tflite { + +/* This is an experimental feature and subjected to change. + * +The NonPersistentMemoryPlannerShim enables TFLM to work with an external tooling +that can plan the offset of each non persistent buffer for the Model within the +TFLM arena. + +If the NonPersistentMemoryPlannerShim is used, then the final binary does not +have any of the symbols associated with the GreedyMemoryPlanner which results in +a reduced memory footprint. + +Additionally, the offline planning of the non-persistent buffers can be used to +have a more efficient utilization compared to the GreedyMemoryPlanner. + +For example, consider the following hypothetical model: + +A1(400) A2(401) +──┬─────────┐ ┌─────────── + │ │ │ + │ │ │ + │ ▼ ▼ + │ ┌────────┐ + │ │ OP1 │ + │ └───┬────┘ A4(201) + │ A3(10) │ │ + │ │ │ + │ │ │ + │ ┌───┴────┐ │ + │ │ OP2 │◄────────┤ + │ └───┬────┘ │ + │ A5(11) │ A6(202) │ + │ │ │ │ + │ ▼ │ │ + │ ┌────────┐ │ │ + │ │ OP3 │◄─┘ │ + │ └───┬────┘ │ + │ │ A8(200) │ + │ A7(12) │ │ │ + │ │ │ │ + │ ┌───┴────┐◄──┘ │ + └──────►│ OP4 │ │ + └───┬────┘◄────────┘ + │ + A9(13) │ + ▼ + +The GreedyMemoryPlanner will give the following memory layout that requires 1012 +bytes of scratch arena size: + +┌─────────────────────────────────────────┬──────────────────────────┬────────┬───────┐ +│ A2(401) │ A1(400) │ A4(201)│ +A3(10)│ +└─────────────────────────────────────────┴──────────────────────────┴────────┴───────┘ + +┌───────────┬──────┬──────┐ +│ A6(202) │A5(11)│A7(12)│ +└───────────┴──────┴──────┘ + +┌──────────┬───────┐ +│ A8(200) │A9(13) │ +└──────────┴───────┘ + +But a more efficient offline memory plan that requires only 826 bytes of scratch +arena size can be + +┌──────────────────────────────────────┬─────────────────────────────┬───────┬──────┐ +│ A1(400) │ A2(401) │ +A3(10)│A5(11)│ +└──────────────────────────────────────┴─────────────────────────────┴───────┴──────┘ + + ┌────────────────┬────────────┬────────┬───────┐ + │A4(201) │ A8(200) │A9(13) +│A7(12) │ └────────────────┴────────────┴────────┴───────┘ + + ┌─────────────┐ + │ A6(202) │ + └─────────────┘ + +*/ +class NonPersistentMemoryPlannerShim : public MicroMemoryPlanner { + public: + // Does not take ownership of buffer_plan, which must refer to a valid + // BufferPlan that outlives this object. + explicit NonPersistentMemoryPlannerShim(const BufferPlan* buffer_plan); + ~NonPersistentMemoryPlannerShim() override; + + TfLiteStatus GetOffsetForBuffer(int buffer_request_index, + int* offset) override; + + TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used) override; + size_t GetMaximumMemorySize() override; + int GetBufferCount() override; + + // Returns False because the NonPersistentMemoryPlannerShim doesn't preserves + // all tensors after invocation. + bool preserves_all_tensors() const override { return false; } + + private: + const BufferPlan* buffer_plan_; // not owned, can't be null + + // The number of buffers requested so far. Used for error checking. + int buffer_request_count_; + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_NON_PERSISTENT_MEMORY_PLANNER_SHIM_H__ diff --git a/src/tensorflow/lite/micro/micro_allocation_info.cpp b/src/tensorflow/lite/micro/micro_allocation_info.cpp new file mode 100644 index 0000000..a89a5e6 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_allocation_info.cpp @@ -0,0 +1,375 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/micro_allocation_info.h" + +#include + +#include "tensorflow/lite/c/c_api_types.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { +constexpr char kOfflineMemAllocMetadata[] = "OfflineMemoryAllocation"; +constexpr int kUninitializedLifetime = -1; +} // namespace + +// Mark the given Allocation info as first created at the specified allocation +// scope count. Only the first creation must be recorded since the allocation +// scope count monotonically increases throughout the lifetime marking process. +void AllocationInfoBuilder::UpdateFirstCreated(AllocationInfo* current, + int allocation_scope_count) { + TFLITE_DCHECK(current->first_created <= allocation_scope_count); + if (current->first_created == kUninitializedLifetime) { + current->first_created = allocation_scope_count; + } +} + +// Mark the given AllocationInfo as last used at the specified allocation scope +// count. Update the last used marker every time, since the allocation scope +// count monotonically increases through the lifetime marking process. +void AllocationInfoBuilder::UpdateLastUsed(AllocationInfo* current, + int allocation_scope_count) { + TFLITE_DCHECK(current->last_used <= allocation_scope_count); + current->last_used = allocation_scope_count; +} + +TfLiteStatus AllocationInfoBuilder::MarkSubgraphLifetimesIfNecessary( + const Operator* op, internal::ScratchBufferRequest* scratch_buffer_requests, + ScratchBufferHandle* scratch_buffer_handles, + SubgraphAllocations* allocations) { + int first_subgraph_index = -1; + int second_subgraph_index = -1; + const OperatorCode* opcode = + model_->operator_codes()->Get(op->opcode_index()); + switch (opcode->builtin_code()) { + case BuiltinOperator_IF: { + first_subgraph_index = + op->builtin_options_as_IfOptions()->then_subgraph_index(); + second_subgraph_index = + op->builtin_options_as_IfOptions()->else_subgraph_index(); + break; + } + case BuiltinOperator_CALL_ONCE: { + first_subgraph_index = + op->builtin_options_as_CallOnceOptions()->init_subgraph_index(); + break; + } + case BuiltinOperator_WHILE: { + first_subgraph_index = + op->builtin_options_as_WhileOptions()->cond_subgraph_index(); + second_subgraph_index = + op->builtin_options_as_WhileOptions()->body_subgraph_index(); + break; + } + default: { + break; + } + } + if (first_subgraph_index != -1) { + // Enter a new allocation scope for each subgraph. + allocation_scope_count_++; + TF_LITE_ENSURE_STATUS( + MarkAllocationLifetimes(first_subgraph_index, scratch_buffer_requests, + scratch_buffer_handles, allocations)); + } + if (second_subgraph_index != -1) { + // Enter a new allocation scope for each subgraph. + allocation_scope_count_++; + TF_LITE_ENSURE_STATUS( + MarkAllocationLifetimes(second_subgraph_index, scratch_buffer_requests, + scratch_buffer_handles, allocations)); + } + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::CreateAllocationInfo( + int scratch_buffer_request_count) { + size_t subgraph_offsets_length = model_->subgraphs()->size() * sizeof(size_t); + info_.subgraph_offsets = + reinterpret_cast(non_persistent_allocator_->AllocateTemp( + subgraph_offsets_length, alignof(size_t))); + if (info_.subgraph_offsets == nullptr) { + MicroPrintf( + "Failed to allocate memory for memory planning, %d bytes required", + subgraph_offsets_length); + return kTfLiteError; + } + size_t tensor_count = 0; + for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); + subgraph_idx++) { + // Add all tensors in each subgraph to the AllocationInfo array. Even weight + // tensors are added but marked with needs_allocating = false. Including all + // tensors in the graph here simplifies logic. + info_.subgraph_offsets[subgraph_idx] = tensor_count; + tensor_count += model_->subgraphs()->Get(subgraph_idx)->tensors()->size(); + } + info_.tensor_count = tensor_count; + + // Scratch buffer allocations follow tensor allocations, so the scratch offset + // is equal to the number of tensor allocations. + info_.scratch_offset = tensor_count; + info_.allocation_info_count = tensor_count + scratch_buffer_request_count; + info_.scratch_buffer_count = scratch_buffer_request_count; + size_t bytes = sizeof(AllocationInfo) * info_.allocation_info_count; + + // Allocate an array of AllocationInfo structs from the temp section. This + // struct will be used by AllocationInfoBuilder to find buffer usage. + info_.allocation_info = reinterpret_cast( + non_persistent_allocator_->AllocateTemp(bytes, alignof(AllocationInfo))); + if (info_.allocation_info == nullptr) { + MicroPrintf( + "Failed to allocate memory for memory planning, %d bytes required", + bytes); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::FreeAllocationInfo() { + non_persistent_allocator_->DeallocateTemp( + reinterpret_cast(info_.allocation_info)); + non_persistent_allocator_->DeallocateTemp( + reinterpret_cast(info_.subgraph_offsets)); + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::ValidateSubgraph( + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) { + uint32_t operators_size = NumSubgraphOperators(subgraph); + + for (uint32_t i = 0; i < operators_size; i++) { + const auto op = subgraph->operators()->Get(i); + for (size_t n = 0; + op->intermediates() != nullptr && n < op->intermediates()->size(); + n++) { + const int tensor_index = op->intermediates()->Get(n); + size_t tensor_size = -1; + TF_LITE_ENSURE_STATUS(TfLiteEvalTensorByteLength( + &eval_tensors[tensor_index], &tensor_size)); + if (tensor_size != 0) { + MicroPrintf( + "Does not support intermediate tensor with non-zero size: %d", + tensor_size); + return kTfLiteError; + } + } + } + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::InitializeAllocationInfo( + const int32_t* offline_offsets, SubgraphAllocations* allocations) { + AllocationInfo* allocation_info = info_.allocation_info; + // Initialize allocation info for every tensor in every subgraph. + int offline_index = 0; + for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); + subgraph_idx++) { + const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); + TfLiteEvalTensor* eval_tensors = allocations[subgraph_idx].tensors; + AllocationInfo* subgraph_allocation_info = + &allocation_info[info_.subgraph_offsets[subgraph_idx]]; + + // Ensure constraints are met. + TF_LITE_ENSURE_STATUS(ValidateSubgraph(subgraph, eval_tensors)); + + for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { + AllocationInfo* current = &subgraph_allocation_info[i]; + current->output_ptr = &(eval_tensors[i].data.data); + + TF_LITE_ENSURE_STATUS( + TfLiteEvalTensorByteLength(&eval_tensors[i], ¤t->bytes)); + + current->first_created = kUninitializedLifetime; + current->last_used = kUninitializedLifetime; + current->needs_allocating = + (eval_tensors[i].data.data == nullptr) && + (!subgraph->tensors()->Get(i)->is_variable()) && + (current->bytes != 0); + if (offline_offsets) { + current->offline_offset = offline_offsets[offline_index++]; + + // Mark offline planned variable tensors so they can get an offline + // offset and be handled offline. + if (subgraph->tensors()->Get(i)->is_variable() && + current->offline_offset != kOnlinePlannedBuffer) { + current->needs_allocating = true; + } + } else { + current->offline_offset = kOnlinePlannedBuffer; + } + } + } + // Initialize allocation info for every scratch buffer. + AllocationInfo* scratch_allocation_info = + &allocation_info[info_.scratch_offset]; + for (size_t i = 0; i < info_.scratch_buffer_count; i++) { + AllocationInfo* current = &scratch_allocation_info[i]; + current->first_created = kUninitializedLifetime; + current->last_used = kUninitializedLifetime; + current->needs_allocating = true; + current->offline_offset = kOnlinePlannedBuffer; + } + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::MarkAllocationLifetimes( + int subgraph_idx, internal::ScratchBufferRequest* scratch_buffer_requests, + ScratchBufferHandle* scratch_buffer_handles, + SubgraphAllocations* allocations) { + const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); + + AllocationInfo* allocation_info = info_.allocation_info; + // Each subgraph's tensor allocations are in a contiguous block starting at + // subgraph_offsets_[subgraph index] with one entry per tensor. + AllocationInfo* subgraph_allocation_info = + &allocation_info[info_.subgraph_offsets[subgraph_idx]]; + + uint32_t operators_size = NumSubgraphOperators(subgraph); + // Mark all inputs as created at the start of the subgraph invocation. + for (size_t i = 0; + subgraph->inputs() != nullptr && i < subgraph->inputs()->size(); ++i) { + const int tensor_index = subgraph->inputs()->Get(i); + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + UpdateFirstCreated(current, allocation_scope_count_); + // This will ensure that the tensors that are inputs to the subgraphs + // but not used in any ops also have a reasonable lifetime. + UpdateLastUsed(current, allocation_scope_count_); + } + + for (uint32_t i = 0; i < operators_size; i++) { + // Each operator has a new allocation scope. + allocation_scope_count_++; + const auto* op = subgraph->operators()->Get(i); + // Figure out when the first creation and use of each tensor is. + for (size_t n = 0; op->outputs() != nullptr && n < op->outputs()->size(); + ++n) { + const int tensor_index = op->outputs()->Get(n); + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + UpdateFirstCreated(current, allocation_scope_count_); + } + + // Keep track of scope count before any subgraphs, so that scratch buffers' + // lifetime within a control flow op properly overlaps with all subgraphs. + int start_allocation_scope_count = allocation_scope_count_; + + // Control flow operators can invoke subgraphs. Plan these subgraphs + // before continuing on to the rest of the graph. + MarkSubgraphLifetimesIfNecessary(op, scratch_buffer_requests, + scratch_buffer_handles, allocations); + + // Figure out when the last use of each tensor is. + for (size_t n = 0; op->inputs() != nullptr && n < op->inputs()->size(); + ++n) { + const int tensor_index = op->inputs()->Get(n); + // Optional bias tensors can have an index of -1 when they are omitted. + if (tensor_index >= 0) { + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + // No need to update creation since it is either marked by the subgraph + // or producer op, or it is not part of the memory plan (weight, bias + // tensor). + UpdateLastUsed(current, allocation_scope_count_); + } + } + for (size_t n = 0; op->outputs() != nullptr && n < op->outputs()->size(); + ++n) { + const int tensor_index = op->outputs()->Get(n); + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + UpdateLastUsed(current, allocation_scope_count_); + } + + // Mark thse lifetime of scratch buffers belonging to the current node. This + // operation is O(N * M) where N is the total number of visited nodes and M + // is the total number of scratch buffers. + // TODO(b/217794030): Optimize this memory planning code. + AllocationInfo* scratch_allocation_info = + &allocation_info[info_.scratch_offset]; + for (size_t scratch_idx = 0; scratch_idx < info_.scratch_buffer_count; + scratch_idx++) { + internal::ScratchBufferRequest request = + scratch_buffer_requests[scratch_idx]; + AllocationInfo* current = &scratch_allocation_info[scratch_idx]; + if (request.node_idx == static_cast(i) && + request.subgraph_idx == static_cast(subgraph_idx)) { + ScratchBufferHandle* current_handle = + &(scratch_buffer_handles[scratch_idx]); + current->output_ptr = reinterpret_cast(¤t_handle->data); + current->bytes = request.bytes; + UpdateFirstCreated(current, start_allocation_scope_count); + UpdateLastUsed(current, allocation_scope_count_); + } + } + } + + // Mark all outputs as persistent to the end of the subgraph invocation. + for (size_t i = 0; + subgraph->outputs() != nullptr && i < subgraph->outputs()->size(); ++i) { + const int tensor_index = subgraph->outputs()->Get(i); + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + // Make sure to assign the First created value of the subgraph output + // This will handle the case where the subgraph is empty. This helps + // ensure all tensors have valid lifetimes before those are used by the + // memory planner. + UpdateFirstCreated(current, allocation_scope_count_); + UpdateLastUsed(current, allocation_scope_count_); + } + return kTfLiteOk; +} + +// Get offline tensors allocation plan. See +// micro/docs/memory_management.md for more info. +TfLiteStatus AllocationInfoBuilder::GetOfflinePlannedOffsets( + const int32_t** offline_planner_offsets) { + if (model_->metadata()) { + for (size_t i = 0; i < model_->metadata()->size(); ++i) { + auto metadata = model_->metadata()->Get(i); + + if (metadata->name()) { + const size_t metadata_name_size = metadata->name()->size(); + + if ((strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, + std::min(metadata_name_size, + strlen(kOfflineMemAllocMetadata))) == 0) && + metadata_name_size == strlen(kOfflineMemAllocMetadata)) { + const flatbuffers::Vector>* buffers = + model_->buffers(); + auto* buffer = (*buffers)[metadata->buffer()]; + auto* array = buffer->data(); + const uint32_t* metadata_buffer = + reinterpret_cast(array->data()); + const size_t nbr_tensors = static_cast(metadata_buffer[2]); + *offline_planner_offsets = + reinterpret_cast(&metadata_buffer[3]); + + if (info_.tensor_count != nbr_tensors) { + MicroPrintf( + "Nbr of offline buffer offsets (%d) in metadata " + "not equal nbr tensors (%d)\n", + nbr_tensors, info_.tensor_count); + return kTfLiteError; + } + } + } + } + } + return kTfLiteOk; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_allocation_info.h b/src/tensorflow/lite/micro/micro_allocation_info.h new file mode 100644 index 0000000..c0c7bd5 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_allocation_info.h @@ -0,0 +1,139 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/compatibility.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/micro_allocator.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +// Used to hold information used during allocation calculations. +struct AllocationInfo { + size_t bytes; + void** output_ptr; + int first_created; + int last_used; + int32_t offline_offset; + bool needs_allocating; +}; + +// Used to hold the allocation info list and related metadata for the entire +// graph (including subgraphs). Since all subgraphs are planned together, the +// allocation info list contains allocations for all subgraphs. Track the offset +// into this list for each subgraph then reserve space to track all allocations. +// +// The AllocationInfo list is a contiguous list of allocations across all +// subgraphs and scratch buffers. Each element here is marked as +// st. The following is a possible +// AllocationInfo list: +// [s0t0, s0t1, s1t0, s2t1, s1t2, s3t0, s3t1, scratch0, scratch1, scratch2] +// +// For this example, the subgraph offsets would be [0, 2, 5] and the scratch +// offset would be 7. +struct GraphAllocationInfo { + AllocationInfo* allocation_info; + size_t allocation_info_count; + size_t* subgraph_offsets; + size_t scratch_offset; + size_t tensor_count; + size_t scratch_buffer_count; +}; + +// A helper class to construct AllocationInfo array. This array contains the +// lifetime of tensors / scratch_buffer and will be used to calculate the memory +// plan. Methods need to be called in order from `Create`, Init`, `Add*`, to +// `Finish`. +class AllocationInfoBuilder { + public: + AllocationInfoBuilder(const Model* model, + INonPersistentBufferAllocator* non_persistent_allocator) + : model_(model), non_persistent_allocator_(non_persistent_allocator) {} + + // Check if model contains offline planned buffer offsets. + // - If there's no metadata available, offline_planner_offsets is not set + // - If there's metadata available, offline_planner_offsets will point to the + // first offset in the metadata buffer list. + TfLiteStatus GetOfflinePlannedOffsets( + const int32_t** offline_planner_offsets); + + // Allocate memory for the allocation info array as well as offsets into that + // array for each subgraph. + TfLiteStatus CreateAllocationInfo(int scratch_buffer_request_count); + + // Release memory used for the allocation info array. + TfLiteStatus FreeAllocationInfo(); + + // Initialize AllocationInfo for all tensors and scratch buffers in the graph. + TfLiteStatus InitializeAllocationInfo(const int32_t* offline_offsets, + SubgraphAllocations* allocations); + + // Mark the scope of each tensor and scratch buffer across the graph. Enter + // all possible subgraphs invoked by each control flow operator. This method + // marks the maximum lifetime of each buffer so that tensors are correctly + // planned for all valid invocation flows. + TfLiteStatus MarkAllocationLifetimes( + int subgraph_idx, internal::ScratchBufferRequest* scratch_buffer_request, + ScratchBufferHandle* scratch_buffer_handles, + SubgraphAllocations* allocations); + + // Identify control flow operators and recursively mark all subgraphs which + // that operator can invoke. The lifetime of all tensors within a subgraph + // can only be extended. The order of subgraph invocation does not matter + // since subgraphs within the same control flow operator are executed + // within their own allocation scope (planned buffers in a subgraph cannot + // persist beyond the end of that subgraph's invocation). + TfLiteStatus MarkSubgraphLifetimesIfNecessary( + const Operator* op, + internal::ScratchBufferRequest* scratch_buffer_requests, + ScratchBufferHandle* scratch_buffer_handles, + SubgraphAllocations* allocations); + + // Returns the number of allocations. + int AllocationCount() const { return info_.allocation_info_count; } + + // Returns a pointer to the built AllocationInfo array. + AllocationInfo* Finish() const { return info_.allocation_info; } + + private: + // Mark the given Allocation info as first created at the specified allocation + // scope count. Only the first creation must be recorded since the allocation + // scope count monotonically increases throughout the lifetime marking + // process. + void UpdateFirstCreated(AllocationInfo* current, int allocation_scope_count); + + // Mark the given AllocationInfo as last used at the specified allocation + // scope + // count. Update the last used marker every time, since the allocation scope + // count monotonically increases through the lifetime marking process. + void UpdateLastUsed(AllocationInfo* current, int allocation_scope_count); + + // Validate if a subgraph satisfies assumptions. + TfLiteStatus ValidateSubgraph(const SubGraph* subgraph, + TfLiteEvalTensor* eval_tensors); + + const tflite::Model* model_ = nullptr; + INonPersistentBufferAllocator* non_persistent_allocator_ = nullptr; + GraphAllocationInfo info_ = + {}; // Prevents problems caused by accessing uninitialized memory. + int allocation_scope_count_ = 0; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_ diff --git a/src/tensorflow/lite/micro/micro_allocator.cpp b/src/tensorflow/lite/micro/micro_allocator.cpp index fb54727..d36fdc9 100644 --- a/src/tensorflow/lite/micro/micro_allocator.cpp +++ b/src/tensorflow/lite/micro/micro_allocator.cpp @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,21 +18,32 @@ limitations under the License. #include #include -#include "flatbuffers/flatbuffers.h" // from @flatbuffers +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/core/api/op_resolver.h" -#include "tensorflow/lite/core/api/tensor_utils.h" #include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h" +#include "tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h" +#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" #include "tensorflow/lite/micro/compatibility.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" #include "tensorflow/lite/micro/memory_helpers.h" #include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" -#include "tensorflow/lite/micro/memory_planner/memory_planner.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/micro/simple_memory_allocator.h" +#include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" +#include "tensorflow/lite/micro/memory_planner/micro_memory_planner.h" +#include "tensorflow/lite/micro/micro_allocation_info.h" +#include "tensorflow/lite/micro/micro_arena_constants.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h" #include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/schema/schema_utils.h" + +#ifdef USE_TFLM_COMPRESSION + +#include +#include + +#include "tensorflow/lite/micro/compression/metadata_saved.h" + +#endif // USE_TFLM_COMPRESSION namespace tflite { @@ -46,29 +57,17 @@ constexpr size_t kMaxScratchBuffersPerOp = 12; // needs a node id assignment. constexpr int kUnassignedScratchBufferRequestIndex = -1; -// Used to hold information used during allocation calculations. -struct AllocationInfo { - size_t bytes; - void** output_ptr; - int first_created; - int last_used; - int32_t offline_offset; - bool needs_allocating; -}; - -// We align tensor buffers to 16-byte boundaries, since this is a common -// requirement for SIMD extensions. -constexpr int kBufferAlignment = 16; -constexpr char kOfflineMemAllocMetadata[] = "OfflineMemoryAllocation"; const TfLiteIntArray kZeroLengthIntArray = {}; -class MicroBuiltinDataAllocator : public BuiltinDataAllocator { +class MicroBuiltinDataAllocator : public TfLiteBridgeBuiltinDataAllocator { public: - explicit MicroBuiltinDataAllocator(SimpleMemoryAllocator* memory_allocator) - : memory_allocator_(memory_allocator) {} + explicit MicroBuiltinDataAllocator( + IPersistentBufferAllocator* persistent_allocator) + : persistent_allocator_(persistent_allocator) {} void* Allocate(size_t size, size_t alignment_hint) override { - return memory_allocator_->AllocateFromTail(size, alignment_hint); + return persistent_allocator_->AllocatePersistentBuffer(size, + alignment_hint); } void Deallocate(void* data) override { // Do not deallocate, builtin data needs to be available for the life time @@ -76,266 +75,35 @@ class MicroBuiltinDataAllocator : public BuiltinDataAllocator { } private: - SimpleMemoryAllocator* memory_allocator_; + IPersistentBufferAllocator* persistent_allocator_; TF_LITE_REMOVE_VIRTUAL_DELETE }; -#if !defined(__clang__) -// Helper function to check flatbuffer metadata correctness. This function is -// not called by default. Hence it's not linked in to the final binary code. -TfLiteStatus CheckOfflinePlannedOffsets(const Model* model, - ErrorReporter* error_reporter) { - // Suppress compile warning for unused function - (void)CheckOfflinePlannedOffsets; - - if (model->metadata()) { - for (size_t i = 0; i < model->metadata()->size(); ++i) { - auto metadata = model->metadata()->Get(i); - if (strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, - strlen(kOfflineMemAllocMetadata)) == 0) { - auto* subgraphs = model->subgraphs(); - const SubGraph* subgraph = (*subgraphs)[0]; - const flatbuffers::Vector>* tensors = - subgraph->tensors(); - const flatbuffers::Vector>* buffers = - model->buffers(); - int nbr_tflite_tensors = tensors->size(); - auto* buffer = (*buffers)[metadata->buffer()]; - auto* array = buffer->data(); - const uint32_t* metadata_buffer = (uint32_t*)array->data(); - int version = metadata_buffer[0]; - int subgraph_idx = metadata_buffer[1]; - const int nbr_offline_offsets = metadata_buffer[2]; -#ifndef TF_LITE_STRIP_ERROR_STRINGS - int* offline_planner_offsets = (int*)&metadata_buffer[3]; -#endif - - TF_LITE_REPORT_ERROR(error_reporter, "==== Model metadata info: ====="); - TF_LITE_REPORT_ERROR(error_reporter, - "Offline planner metadata found, version %d, " - "subgraph %d, nbr offline offsets %d", - version, subgraph_idx, nbr_offline_offsets); - for (int j = 0; j < nbr_offline_offsets; ++j) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Offline planner tensor index %d, offline offset: %d", j, - offline_planner_offsets[j]); - } - - if (version != 1) { - TF_LITE_REPORT_ERROR(error_reporter, "Version not supported! (%d)\n", - version); - return kTfLiteError; - } - if (subgraph_idx != 0) { - TF_LITE_REPORT_ERROR(error_reporter, - "Only 1 subgraph supported! Subgraph idx (%d)\n", - subgraph_idx); - return kTfLiteError; - } - if (nbr_tflite_tensors != nbr_offline_offsets) { - TF_LITE_REPORT_ERROR(error_reporter, - "Nbr of offline buffer offsets (%d) in metadata " - "not equal nbr tensors (%d)\n", - nbr_offline_offsets, nbr_tflite_tensors); - return kTfLiteError; - } - } +MicroMemoryPlanner* CreateMemoryPlanner( + MemoryPlannerType memory_planner_type, + IPersistentBufferAllocator* memory_allocator) { + MicroMemoryPlanner* memory_planner = nullptr; + uint8_t* memory_planner_buffer = nullptr; + + switch (memory_planner_type) { + case MemoryPlannerType::kLinear: { + memory_planner_buffer = memory_allocator->AllocatePersistentBuffer( + sizeof(LinearMemoryPlanner), alignof(LinearMemoryPlanner)); + memory_planner = new (memory_planner_buffer) LinearMemoryPlanner(); + break; } - } - return kTfLiteOk; -} -#endif - -// A helper class to construct AllocationInfo array. This array contains the -// lifetime of tensors / scratch_buffer and will be used to calculate the memory -// plan. Methods need to be called in order from `Init`, `Add*`, to `Finish`. -class AllocationInfoBuilder { - public: - AllocationInfoBuilder(AllocationInfo* info, size_t tensor_count, - size_t scratch_buffer_count, ErrorReporter* reporter) - : info_(info), - tensor_count_(tensor_count), - buffer_count_(scratch_buffer_count), - reporter_(reporter) {} - - // Check if model contains offline planned buffer offsets. - // - If there's no metadata available, offline_planner_offsets is not set - // - If there's metadata available, offline_planner_offsets will point to the - // first offset in the metadata buffer list. - TfLiteStatus GetOfflinePlannedOffsets( - const Model* model, const int32_t** offline_planner_offsets); - - // Add allocaiton information for the tensors. - TfLiteStatus AddTensors(const SubGraph* subgraph, - const int32_t* offline_offsets, - TfLiteEvalTensor* eval_tensors); - - // Add allocation information for the scratch buffers. - TfLiteStatus AddScratchBuffers( - internal::ScratchBufferRequest* scratch_buffer_requests, - ScratchBufferHandle* scratch_buffer_handles); - - // Returns a pointer to the built AllocationInfo array. - const AllocationInfo* Finish() const { return info_; } - - private: - AllocationInfo* info_ = nullptr; - size_t tensor_count_ = 0; - size_t buffer_count_ = 0; - ErrorReporter* reporter_ = nullptr; -}; - -TfLiteStatus AllocationInfoBuilder::AddTensors(const SubGraph* subgraph, - const int32_t* offline_offsets, - TfLiteEvalTensor* eval_tensors) { - TFLITE_DCHECK(eval_tensors != nullptr); - - // Set up allocation info for all tensors. - for (size_t i = 0; i < tensor_count_; ++i) { - AllocationInfo* current = &info_[i]; - current->output_ptr = &(eval_tensors[i].data.data); - - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors[i], ¤t->bytes)); - - current->first_created = -1; - current->last_used = -1; - current->needs_allocating = (eval_tensors[i].data.data == nullptr) && - (!subgraph->tensors()->Get(i)->is_variable()); - if (offline_offsets) { - current->offline_offset = offline_offsets[i]; - } else { - current->offline_offset = kOnlinePlannedBuffer; + case MemoryPlannerType::kGreedy: { + memory_planner_buffer = memory_allocator->AllocatePersistentBuffer( + sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); + memory_planner = new (memory_planner_buffer) GreedyMemoryPlanner(); + break; } } - - for (size_t i = 0; i < subgraph->inputs()->size(); ++i) { - const int tensor_index = subgraph->inputs()->Get(i); - AllocationInfo* current = &info_[tensor_index]; - current->first_created = 0; - } - - // Mark all outputs as persistent to the end of the invocation. - for (size_t i = 0; i < subgraph->outputs()->size(); ++i) { - const int tensor_index = subgraph->outputs()->Get(i); - AllocationInfo* current = &info_[tensor_index]; - current->last_used = subgraph->operators()->size() - 1; - } - - // Figure out when the first and last use of each tensor is. - for (int i = (subgraph->operators()->size() - 1); i >= 0; --i) { - const auto* op = subgraph->operators()->Get(i); - for (size_t n = 0; n < op->inputs()->size(); ++n) { - const int tensor_index = op->inputs()->Get(n); - AllocationInfo* current = &info_[tensor_index]; - if (((current->last_used == -1) || (current->last_used < i))) { - current->last_used = i; - } - } - for (size_t n = 0; n < op->outputs()->size(); ++n) { - const int tensor_index = op->outputs()->Get(n); - AllocationInfo* current = &info_[tensor_index]; - if ((current->first_created == -1) || (current->first_created > i)) { - current->first_created = i; - } - } - } - - // Sanity check for valid tensor lifetime. - for (size_t i = 0; i < tensor_count_; ++i) { - AllocationInfo* current = &info_[i]; - // Even though tensor appears to be read only it may still need to be - // allocated. - const bool appears_read_only = - (current->first_created == -1) && (current->last_used != -1); - const bool has_partial_lifetime = - !appears_read_only && - ((current->first_created == -1) || (current->last_used == -1)); - if (has_partial_lifetime && current->needs_allocating) { - TF_LITE_REPORT_ERROR( - reporter_, - "Logic error in memory planner, tensor %d has an invalid lifetime: " - "first_created: %d, last_used: %d", - i, current->first_created, current->last_used); - return kTfLiteError; - } - } - return kTfLiteOk; -} - -// The tensor offsets will be encoded in the metadata:[Metadata] field of the -// Model. The following encoding applies: -// -// | Metadata component | Value | -// | name:string | “OfflineMemoryAllocation” | -// | buffer:unit | Index of buffer containing memory allocation data | -// -// The buffer contents for the memory allocation is a list of 32-bit integers. -// The number of tensors, n, must be equal to the number of tensors defined in -// the model. The following encoding applies: -// -// | Offset | Value | -// | 0 | Offline allocation format version – set to 0 | -// | 1 | Subgraph index to which this allocation applies | -// | 2 | Number offsets following: n | -// | 3 | Arena byte offset of tensor #0 or -1 to allocate at runtime | -// | 4 | Arena byte offset of tensor #1 or -1 to allocate at runtime | -// | 3+(n-1) | Arena byte offset of tensor #(n-1) or -1 to allocate at runtime | -TfLiteStatus AllocationInfoBuilder::GetOfflinePlannedOffsets( - const Model* model, const int32_t** offline_planner_offsets) { - if (model->metadata()) { - for (size_t i = 0; i < model->metadata()->size(); ++i) { - auto metadata = model->metadata()->Get(i); - if (strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, - strlen(kOfflineMemAllocMetadata)) == 0) { - const flatbuffers::Vector>* buffers = - model->buffers(); - auto* buffer = (*buffers)[metadata->buffer()]; - auto* array = buffer->data(); - const uint32_t* metadata_buffer = - reinterpret_cast(array->data()); - const size_t nbr_tensors = static_cast(metadata_buffer[2]); - *offline_planner_offsets = - reinterpret_cast(&metadata_buffer[3]); - - if (tensor_count_ != nbr_tensors) { - TF_LITE_REPORT_ERROR(reporter_, - "Nbr of offline buffer offsets (%d) in metadata " - "not equal nbr tensors (%d)\n", - nbr_tensors, tensor_count_); - return kTfLiteError; - } - } - } - } - return kTfLiteOk; -} - -TfLiteStatus AllocationInfoBuilder::AddScratchBuffers( - internal::ScratchBufferRequest* scratch_buffer_requests, - ScratchBufferHandle* scratch_buffer_handles) { - // Set up allocation info for buffers. - for (size_t i = tensor_count_; i < tensor_count_ + buffer_count_; ++i) { - internal::ScratchBufferRequest* current_request = - &(scratch_buffer_requests[i - tensor_count_]); - ScratchBufferHandle* current_handle = - &(scratch_buffer_handles[i - tensor_count_]); - - AllocationInfo* current = &info_[i]; - current->output_ptr = reinterpret_cast(¤t_handle->data); - current->bytes = current_request->bytes; - current->first_created = current_request->node_idx; - current->last_used = current_request->node_idx; - current->offline_offset = kOnlinePlannedBuffer; - current->needs_allocating = true; - } - return kTfLiteOk; + return memory_planner; } -TfLiteStatus CreatePlan(ErrorReporter* error_reporter, - GreedyMemoryPlanner* planner, +TfLiteStatus CreatePlan(MicroMemoryPlanner* planner, const AllocationInfo* allocation_info, size_t allocation_info_size) { // Add the tensors to our allocation plan. @@ -343,23 +111,22 @@ TfLiteStatus CreatePlan(ErrorReporter* error_reporter, const AllocationInfo* current = &allocation_info[i]; if (current->needs_allocating) { size_t aligned_bytes_required = - AlignSizeUp(current->bytes, kBufferAlignment); + AlignSizeUp(current->bytes, MicroArenaBufferAlignment()); if (current->offline_offset == kOnlinePlannedBuffer) { - TF_LITE_ENSURE_STATUS( - planner->AddBuffer(error_reporter, aligned_bytes_required, - current->first_created, current->last_used)); + TF_LITE_ENSURE_STATUS(planner->AddBuffer(aligned_bytes_required, + current->first_created, + current->last_used)); } else { - TF_LITE_ENSURE_STATUS(planner->AddBuffer( - error_reporter, aligned_bytes_required, current->first_created, - current->last_used, current->offline_offset)); + TF_LITE_ENSURE_STATUS( + planner->AddBuffer(aligned_bytes_required, current->first_created, + current->last_used, current->offline_offset)); } } } return kTfLiteOk; } -TfLiteStatus CommitPlan(ErrorReporter* error_reporter, MemoryPlanner* planner, - uint8_t* starting_point, +TfLiteStatus CommitPlan(MicroMemoryPlanner* planner, uint8_t* starting_point, const AllocationInfo* allocation_info, size_t allocation_info_size) { // Figure out the actual memory addresses for each buffer, based on the plan. @@ -369,61 +136,60 @@ TfLiteStatus CommitPlan(ErrorReporter* error_reporter, MemoryPlanner* planner, if (current->needs_allocating) { int offset = -1; TF_LITE_ENSURE_STATUS( - planner->GetOffsetForBuffer(error_reporter, planner_index, &offset)); + planner->GetOffsetForBuffer(planner_index, &offset)); *current->output_ptr = reinterpret_cast(starting_point + offset); ++planner_index; } } return kTfLiteOk; } -} // namespace -namespace internal { +IPersistentBufferAllocator* CreatePersistentArenaAllocator(uint8_t* buffer_head, + size_t buffer_size) { + // Align the actually used area by the tail because persistent buffer grows + // from the bottom to top. + uint8_t* aligned_buffer_tail = + AlignPointerDown(buffer_head + buffer_size, MicroArenaBufferAlignment()); + size_t aligned_buffer_size = aligned_buffer_tail - buffer_head; + PersistentArenaBufferAllocator tmp = + PersistentArenaBufferAllocator(buffer_head, aligned_buffer_size); + + // Allocate enough bytes from the buffer to create a + // SingleArenaBufferAllocator. The new instance will use the current adjusted + // tail buffer from the tmp allocator instance. + uint8_t* allocator_buffer = + tmp.AllocatePersistentBuffer(sizeof(PersistentArenaBufferAllocator), + alignof(PersistentArenaBufferAllocator)); + // Use the default copy constructor to populate internal states. + return new (allocator_buffer) PersistentArenaBufferAllocator(tmp); +} -// Handles architecture safe mapping of flatbuffer vectors to a TfLite*Array -// struct. Matching types are required (e.g. float and TfLiteFloatArray). -// Big-endian systems will always allocate dimension array data in the tail -// (persistent) section. -template -TfLiteStatus FlatBufferVectorToTfLiteTypeArray( - SimpleMemoryAllocator* allocator, ErrorReporter* error_reporter, - const flatbuffers::Vector* flatbuffer_array, - kTfLiteArrayType** result) { - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(flatbuffer_array != nullptr); - // TODO(b/159668691): Consider adding type assertion or breaking this function - // into multiple functions for each type. std::is_same is c++11 and has a - // special updated constructor in c++17 that requires a string argument. - if (FLATBUFFERS_LITTLEENDIAN) { - // On little-endian machines, TfLite*Array happens to have the same memory - // layout as flatbuffers:Vector, so we can - // reinterpret_cast the flatbuffer vector and avoid a copy and malloc. - *result = const_cast( - reinterpret_cast(flatbuffer_array)); - } else { - // Big-endian architecture can not use the same memory layout as - // flatbuffers::Vector. Allocate from the tail and - // copy values from the flatbuffer into the newly allocated chunk. - kTfLiteArrayType* array = - reinterpret_cast(allocator->AllocateFromTail( - TfLiteIntArrayGetSizeInBytes(flatbuffer_array->Length()), - alignof(kTfLiteArrayType))); - if (array == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Failed to allocate %d bytes of memory to copy an array.", - TfLiteIntArrayGetSizeInBytes(flatbuffer_array->Length())); - return kTfLiteError; - } - array->size = flatbuffer_array->Length(); - for (int i = 0; i < array->size; ++i) { - array->data[i] = flatbuffer_array->Get(i); - } - *result = array; - } - return kTfLiteOk; +// NonPersistentBufferAllocator instance is created in the persistent buffer +// because it has to be persistent to keep track of the non-persistent buffer +// information. +INonPersistentBufferAllocator* CreateNonPersistentArenaAllocator( + uint8_t* buffer_head, size_t buffer_size, + IPersistentBufferAllocator* persistent_buffer_allocator) { + uint8_t* allocator_buffer = + persistent_buffer_allocator->AllocatePersistentBuffer( + sizeof(NonPersistentArenaBufferAllocator), + alignof(NonPersistentArenaBufferAllocator)); + // Align the actually used area by the head because persistent buffer grows + // from the head to bottom. + uint8_t* aligned_buffer_head = + AlignPointerUp(buffer_head, MicroArenaBufferAlignment()); + size_t aligned_buffer_size = buffer_head + buffer_size - aligned_buffer_head; + + INonPersistentBufferAllocator* non_persistent_buffer_allocator = + new (allocator_buffer) NonPersistentArenaBufferAllocator( + aligned_buffer_head, aligned_buffer_size); + return non_persistent_buffer_allocator; } +} // namespace + +namespace internal { + // Returns a pointer to any buffer associated with the flatbuffer tensor. Can // return nullptr if no buffer is found. void* GetFlatbufferTensorBuffer( @@ -457,17 +223,18 @@ void* GetFlatbufferTensorBuffer( } TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, bool allocate_temp, - const tflite::Tensor& flatbuffer_tensor, + IPersistentBufferAllocator* persistent_buffer_allocator, + INonPersistentBufferAllocator* non_persistent_buffer_allocator, + bool allocate_temp, const tflite::Tensor& flatbuffer_tensor, const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteTensor* result) { + TfLiteTensor* result) { TFLITE_DCHECK(result != nullptr); *result = {}; // Make sure the serialized type is one we know how to deal with, and convert // it from a flatbuffer enum into a constant used by the kernel C API. - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &result->type, error_reporter)); + TF_LITE_ENSURE_STATUS( + tflite::ConvertTensorType(flatbuffer_tensor.type(), &result->type)); // Make sure we remember if the serialized tensor is designated as a variable. result->is_variable = flatbuffer_tensor.is_variable(); @@ -487,20 +254,20 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( // Figure out what the size in bytes of the buffer is and store it. size_t type_size; - TF_LITE_ENSURE_STATUS(BytesRequiredForTensor( - flatbuffer_tensor, &result->bytes, &type_size, error_reporter)); + TF_LITE_ENSURE_STATUS( + BytesRequiredForTensor(flatbuffer_tensor, &result->bytes, &type_size)); if (flatbuffer_tensor.shape() == nullptr) { // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar // tensor. + // TODO(b/188459715): figure out why const_cast is required here. result->dims = const_cast(&kZeroLengthIntArray); } else { // TFLM doesn't allow reshaping the tensor which requires dynamic memory // allocation so it is safe to drop the const qualifier. In the future, if // we really want to update the tensor shape, we can always pass in a new // TfLiteIntArray - especially we have to do so if the dimension is - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, flatbuffer_tensor.shape(), &(result->dims))); + result->dims = FlatBufferVectorToTfLiteTypeArray(flatbuffer_tensor.shape()); } // Copy the quantization information from the serialized data. @@ -523,15 +290,15 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( TfLiteAffineQuantization* quantization = allocate_temp ? reinterpret_cast( - allocator->AllocateTemp(sizeof(TfLiteAffineQuantization), - alignof(TfLiteAffineQuantization))) + non_persistent_buffer_allocator->AllocateTemp( + sizeof(TfLiteAffineQuantization), + alignof(TfLiteAffineQuantization))) : reinterpret_cast( - allocator->AllocateFromTail( + persistent_buffer_allocator->AllocatePersistentBuffer( sizeof(TfLiteAffineQuantization), alignof(TfLiteAffineQuantization))); if (quantization == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "Unable to allocate TfLiteAffineQuantization.\n"); + MicroPrintf("Unable to allocate TfLiteAffineQuantization.\n"); return kTfLiteError; } @@ -540,26 +307,31 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( // zero_point is stored as a int64_t. quantization->zero_point = allocate_temp - ? reinterpret_cast(allocator->AllocateTemp( - TfLiteIntArrayGetSizeInBytes(channels), - alignof(TfLiteIntArray))) - : reinterpret_cast(allocator->AllocateFromTail( - TfLiteIntArrayGetSizeInBytes(channels), - alignof(TfLiteIntArray))); + ? reinterpret_cast( + non_persistent_buffer_allocator->AllocateTemp( + TfLiteIntArrayGetSizeInBytes(channels), + alignof(TfLiteIntArray))) + : reinterpret_cast( + persistent_buffer_allocator->AllocatePersistentBuffer( + TfLiteIntArrayGetSizeInBytes(channels), + alignof(TfLiteIntArray))); if (quantization->zero_point == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "Unable to allocate quantization->zero_point.\n"); + MicroPrintf("Unable to allocate quantization->zero_point.\n"); return kTfLiteError; } - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, src_quantization->scale(), - &quantization->scale)); + quantization->scale = + FlatBufferVectorToTfLiteTypeArray(src_quantization->scale()); quantization->zero_point->size = channels; int* zero_point_data = quantization->zero_point->data; for (int i = 0; i < channels; i++) { - zero_point_data[i] = src_quantization->zero_point()->Get(i); + // As a space-saving optimization, zero point arrays for weights can be + // reduced to a single value, since all zero points for weights are 0. + zero_point_data[i] = src_quantization->zero_point()->size() == + src_quantization->scale()->size() + ? src_quantization->zero_point()->Get(i) + : src_quantization->zero_point()->Get(0); } // TODO(rocky): Need to add a micro_allocator test case that fails when // this is not copied: @@ -571,14 +343,14 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( } TfLiteStatus InitializeTfLiteEvalTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, const tflite::Tensor& flatbuffer_tensor, + const tflite::Tensor& flatbuffer_tensor, const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteEvalTensor* result) { + TfLiteEvalTensor* result) { *result = {}; // Make sure the serialized type is one we know how to deal with, and convert // it from a flatbuffer enum into a constant used by the kernel C API. - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &result->type, error_reporter)); + TF_LITE_ENSURE_STATUS( + tflite::ConvertTensorType(flatbuffer_tensor.type(), &result->type)); result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); @@ -587,96 +359,341 @@ TfLiteStatus InitializeTfLiteEvalTensorFromFlatbuffer( // tensor. result->dims = const_cast(&kZeroLengthIntArray); } else { - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, flatbuffer_tensor.shape(), &(result->dims))); + result->dims = FlatBufferVectorToTfLiteTypeArray(flatbuffer_tensor.shape()); + } + return kTfLiteOk; +} + +#ifdef USE_TFLM_COMPRESSION + +const tflite::micro::compression::Metadata* GetCompressionMetadata( + const Model& model) { + const auto metadata_vector = model.metadata(); + if (metadata_vector == nullptr) { + return nullptr; + } + auto buffers = model.buffers(); + if (buffers == nullptr) { + return nullptr; + } + const size_t metadata_string_length = std::strlen(kCompressionMetadataString); + for (size_t metadata_index = 0; metadata_index < metadata_vector->size(); + metadata_index++) { + auto metadata = metadata_vector->Get(metadata_index); + if (metadata->name() == nullptr || metadata->name()->size() == 0) { + continue; + } + const char* s = metadata->name()->c_str(); + if ((metadata->name()->size() == metadata_string_length) && + (std::strncmp(s, kCompressionMetadataString, metadata_string_length) == + 0)) { + auto buffer_index = metadata->buffer(); + if (buffer_index == 0 || buffer_index >= buffers->size()) { + MicroPrintf("Compression: Invalid buffer index %u", buffer_index); + continue; + } + auto vp = buffers->Get(buffer_index)->data(); + if (vp == nullptr || vp->data() == nullptr) { + MicroPrintf("Compression: Invalid data for buffer index %u", + buffer_index); + continue; + } + // TODO(ddavis-2015): support multiple compression methods, possibly + // through multiple verification checks. + // Then return a pair. + auto compression_metadata = + tflite::micro::compression::GetSizePrefixedMetadata(vp); + flatbuffers::Verifier verifier(vp->data(), vp->size(), + flatbuffers::Verifier::Options()); + if (!tflite::micro::compression::VerifyMetadataBuffer(verifier)) { + MicroPrintf("Compression: verification failure"); + return nullptr; + } else { + tflite::micro::compression::MetadataT schema; + if (compression_metadata->schema_version() > schema.schema_version) { + MicroPrintf("Compression: schema version mismatch (using %d got %d)", + schema.schema_version, + compression_metadata->schema_version()); + return nullptr; + } + + return compression_metadata; + } + } + } + + return nullptr; +} + +TfLiteStatus InitializeCompressionTensorDataFromFlatbuffer( + const Model& model, const size_t subgraph_index, + const tflite::micro::compression::LutTensor& lut_tensor, + CompressionTensorData* ctd) { + // TODO(ddavis-2015): support multiple compression schemes + ctd->scheme = CompressionScheme::kBinQuant; + + const size_t tensor_index = lut_tensor.tensor(); + auto tensors = model.subgraphs()->Get(subgraph_index)->tensors(); + if (tensor_index >= tensors->size()) { + MicroPrintf("Compression: invalid tensor index %u in LutTensor", + tensor_index); + return kTfLiteError; + } + const size_t index_bit_width = lut_tensor.index_bitwidth(); + if (index_bit_width > LookupTableData::kMaxBitWidth) { + MicroPrintf("Compression: invalid bit width %u in LutTensor", + index_bit_width); + return kTfLiteError; + } + ctd->data.lut_data->compressed_bit_width = index_bit_width; + const size_t value_buffer_index = lut_tensor.value_buffer(); + if (value_buffer_index >= model.buffers()->size()) { + MicroPrintf("Compression: invalid value_buffer %u in LutTensor", + value_buffer_index); + return kTfLiteError; + } + auto value_buffer = model.buffers()->Get(value_buffer_index)->data(); + if (value_buffer == nullptr || value_buffer->data() == nullptr) { + MicroPrintf("Compression: invalid value table for value_buffer %u", + value_buffer_index); + return kTfLiteError; + } + ctd->data.lut_data->value_table = value_buffer->data(); + auto tensor = + model.subgraphs()->Get(subgraph_index)->tensors()->Get(tensor_index); + if (tensor->shape() == nullptr) { + MicroPrintf("Compression: scalar tensors not supported"); + return kTfLiteError; + } + TfLiteType tensor_type = kTfLiteNoType; + TfLiteStatus status = ConvertTensorType(tensor->type(), &tensor_type); + if (status != kTfLiteOk) { + MicroPrintf("Compression: failed to convert tensor type"); + return kTfLiteError; + } + size_t tensor_type_size = 0; + status = TfLiteTypeSizeOf(tensor_type, &tensor_type_size); + if (status != kTfLiteOk) { + MicroPrintf("Compression: failed to get tensor type size"); + return kTfLiteError; + } + if (tensor->quantization() != nullptr && + tensor->quantization()->scale() != nullptr && + tensor->quantization()->scale()->size() > 1) { + const size_t num_channels = tensor->quantization()->scale()->size(); + ctd->data.lut_data->is_per_channel_quantized = true; + const TfLiteIntArray* dims = + FlatBufferVectorToTfLiteTypeArray(tensor->shape()); + int32_t quantized_axis = tensor->quantization()->quantized_dimension(); + if (quantized_axis == 0) { + ctd->data.lut_data->use_alternate_axis = false; + } else if (quantized_axis == (dims->size - 1)) { + ctd->data.lut_data->use_alternate_axis = true; + } else { + MicroPrintf("Compression: unsupported quantization axis %u", + quantized_axis); + return kTfLiteError; + } + ctd->data.lut_data->value_table_channel_stride = + (value_buffer->size() / tensor_type_size) / num_channels; + } else { + ctd->data.lut_data->is_per_channel_quantized = false; + ctd->data.lut_data->use_alternate_axis = false; + ctd->data.lut_data->value_table_channel_stride = + value_buffer->size() / tensor_type_size; } + return kTfLiteOk; } +#endif // USE_TFLM_COMPRESSION + } // namespace internal -MicroAllocator::MicroAllocator(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter) - : memory_allocator_(memory_allocator), - error_reporter_(error_reporter), +size_t MicroAllocator::GetDefaultTailUsage(bool is_memory_planner_given) { + size_t total_size = AlignSizeUp() + + AlignSizeUp() + + AlignSizeUp() + + AlignSizeUp(); + if (!is_memory_planner_given) { + total_size += AlignSizeUp(); + } + return total_size; +} + +MicroAllocator::MicroAllocator(SingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner) + : non_persistent_buffer_allocator_(memory_allocator), + persistent_buffer_allocator_(memory_allocator), + memory_planner_(memory_planner), + model_is_allocating_(false) {} + +MicroAllocator::MicroAllocator( + IPersistentBufferAllocator* persistent_buffer_allocator, + INonPersistentBufferAllocator* non_persistent_buffer_allocator, + MicroMemoryPlanner* memory_planner) + : non_persistent_buffer_allocator_(non_persistent_buffer_allocator), + persistent_buffer_allocator_(persistent_buffer_allocator), + memory_planner_(memory_planner), model_is_allocating_(false) {} MicroAllocator::~MicroAllocator() {} MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, size_t arena_size, - ErrorReporter* error_reporter) { - uint8_t* aligned_arena = AlignPointerUp(tensor_arena, kBufferAlignment); + MicroMemoryPlanner* memory_planner) { + uint8_t* aligned_arena = + AlignPointerUp(tensor_arena, MicroArenaBufferAlignment()); size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena; - return Create(SimpleMemoryAllocator::Create(error_reporter, aligned_arena, - aligned_arena_size), - error_reporter); + SingleArenaBufferAllocator* memory_allocator = + SingleArenaBufferAllocator::Create(aligned_arena, aligned_arena_size); + + return Create(memory_allocator, memory_planner); } -MicroAllocator* MicroAllocator::Create(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter) { +MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, size_t arena_size, + MemoryPlannerType memory_planner_type) { + uint8_t* aligned_arena = + AlignPointerUp(tensor_arena, MicroArenaBufferAlignment()); + size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena; + SingleArenaBufferAllocator* memory_allocator = + SingleArenaBufferAllocator::Create(aligned_arena, aligned_arena_size); + + // By default create GreedyMemoryPlanner. + // If a different MemoryPlanner is needed, use the other api. + MicroMemoryPlanner* memory_planner = + CreateMemoryPlanner(memory_planner_type, memory_allocator); + + return Create(memory_allocator, memory_planner); +} + +MicroAllocator* MicroAllocator::Create( + SingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner) { TFLITE_DCHECK(memory_allocator != nullptr); - TFLITE_DCHECK(error_reporter != nullptr); + TFLITE_DCHECK(memory_planner != nullptr); - uint8_t* allocator_buffer = memory_allocator->AllocateFromTail( + uint8_t* allocator_buffer = memory_allocator->AllocatePersistentBuffer( sizeof(MicroAllocator), alignof(MicroAllocator)); - MicroAllocator* allocator = - new (allocator_buffer) MicroAllocator(memory_allocator, error_reporter); + MicroAllocator* allocator = new (allocator_buffer) + MicroAllocator(memory_allocator, memory_allocator, memory_planner); + return allocator; +} + +MicroAllocator* MicroAllocator::Create(uint8_t* persistent_tensor_arena, + size_t persistent_arena_size, + uint8_t* non_persistent_tensor_arena, + size_t non_persistent_arena_size, + MemoryPlannerType memory_planner_type) { + TFLITE_DCHECK(persistent_tensor_arena != nullptr); + TFLITE_DCHECK(non_persistent_tensor_arena != nullptr); + TFLITE_DCHECK(persistent_tensor_arena != non_persistent_tensor_arena); + + IPersistentBufferAllocator* persistent_buffer_allocator = + CreatePersistentArenaAllocator(persistent_tensor_arena, + persistent_arena_size); + INonPersistentBufferAllocator* non_persistent_buffer_allocator = + CreateNonPersistentArenaAllocator(non_persistent_tensor_arena, + non_persistent_arena_size, + persistent_buffer_allocator); + + // TODO(b/297821738): this should be changed to CreateMemoryPlanner if + // possible once it's figured out why it breaks the HifiMini Build + uint8_t* memory_planner_buffer = nullptr; + MicroMemoryPlanner* memory_planner = nullptr; + + if (memory_planner_type == MemoryPlannerType::kGreedy) { + memory_planner_buffer = + persistent_buffer_allocator->AllocatePersistentBuffer( + sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); + memory_planner = new (memory_planner_buffer) GreedyMemoryPlanner(); + } else if (memory_planner_type == MemoryPlannerType::kLinear) { + memory_planner_buffer = + persistent_buffer_allocator->AllocatePersistentBuffer( + sizeof(LinearMemoryPlanner), alignof(LinearMemoryPlanner)); + memory_planner = new (memory_planner_buffer) LinearMemoryPlanner(); + } + + uint8_t* micro_allocator_buffer = + persistent_buffer_allocator->AllocatePersistentBuffer( + sizeof(MicroAllocator), alignof(MicroAllocator)); + MicroAllocator* allocator = new (micro_allocator_buffer) + MicroAllocator(persistent_buffer_allocator, + non_persistent_buffer_allocator, memory_planner); return allocator; } -TfLiteStatus MicroAllocator::StartModelAllocation( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration** node_and_registrations, - TfLiteEvalTensor** eval_tensors) { +SubgraphAllocations* MicroAllocator::StartModelAllocation(const Model* model) { TFLITE_DCHECK(model != nullptr); if (model_is_allocating_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "MicroAllocator: Model allocation started before " - "finishing previously allocated model"); - return kTfLiteError; + MicroPrintf( + "MicroAllocator: Model allocation started before " + "finishing previously allocated model"); + return nullptr; } model_is_allocating_ = true; - TF_LITE_ENSURE_STATUS(InitScratchBufferData()); - TF_LITE_ENSURE_STATUS(AllocateTfLiteEvalTensors(model, eval_tensors)); - TF_LITE_ENSURE_STATUS( - AllocateNodeAndRegistrations(model, node_and_registrations)); - TF_LITE_ENSURE_STATUS(PrepareNodeAndRegistrationDataFromFlatbuffer( - model, op_resolver, *node_and_registrations)); + uint8_t* data_allocator_buffer = + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(MicroBuiltinDataAllocator), + alignof(MicroBuiltinDataAllocator)); + builtin_data_allocator_ = new (data_allocator_buffer) + MicroBuiltinDataAllocator(persistent_buffer_allocator_); - return kTfLiteOk; + if (InitScratchBufferData() != kTfLiteOk) { + return nullptr; + } + + // Allocate struct to store eval tensors, nodes and registrations. + SubgraphAllocations* output = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(SubgraphAllocations) * model->subgraphs()->size(), + alignof(SubgraphAllocations))); + if (output == nullptr) { + MicroPrintf("Failed to allocate memory for model metadata."); + return nullptr; + } + + if ( +#ifdef USE_TFLM_COMPRESSION + AllocateCompressedTensorsList(model, output) != kTfLiteOk || +#endif // USE_TFLM_COMPRESSION + AllocateTfLiteEvalTensors(model, output) != kTfLiteOk || + AllocateNodeAndRegistrations(model, output) != kTfLiteOk) { + return nullptr; + } + return output; } TfLiteStatus MicroAllocator::FinishModelAllocation( - const Model* model, TfLiteEvalTensor* eval_tensors, + const Model* model, SubgraphAllocations* subgraph_allocations, ScratchBufferHandle** scratch_buffer_handles) { if (!model_is_allocating_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "MicroAllocator: Model allocation finished before " - "starting allocating model"); + MicroPrintf( + "MicroAllocator: Model allocation finished before " + "starting allocating model"); return kTfLiteError; } - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - + // Allocate scratch buffer metadata. TF_LITE_ENSURE_STATUS(AllocateScratchBufferHandles( scratch_buffer_handles, scratch_buffer_request_count_)); - TF_LITE_ENSURE_STATUS(CommitStaticMemoryPlan(model, subgraph, eval_tensors, - *scratch_buffer_handles)); - TF_LITE_ENSURE_STATUS(AllocateVariables(subgraph, eval_tensors)); + // Plan all subgraphs and scratch buffers together. + TF_LITE_ENSURE_STATUS(CommitStaticMemoryPlan(model, subgraph_allocations, + *scratch_buffer_handles)); model_is_allocating_ = false; return kTfLiteOk; } void* MicroAllocator::AllocatePersistentBuffer(size_t bytes) { - return memory_allocator_->AllocateFromTail(bytes, kBufferAlignment); + return persistent_buffer_allocator_->AllocatePersistentBuffer( + bytes, MicroArenaBufferAlignment()); } TfLiteStatus MicroAllocator::RequestScratchBufferInArena(size_t bytes, + int subgraph_idx, int* buffer_idx) { // All scratch buffer requests are stored in the head section of the arena // when a model is in the prepare phase. First align a scratch buffer request @@ -693,10 +710,8 @@ TfLiteStatus MicroAllocator::RequestScratchBufferInArena(size_t bytes, // First, ensure that the per-kernel request has not exceeded the limit: if (current_node_request_count >= kMaxScratchBuffersPerOp) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Scratch buffer request exeeds limit per operator (%d)", - kMaxScratchBuffersPerOp); + MicroPrintf("Scratch buffer request exeeds limit per operator (%d)", + kMaxScratchBuffersPerOp); return kTfLiteError; } @@ -708,6 +723,7 @@ TfLiteStatus MicroAllocator::RequestScratchBufferInArena(size_t bytes, // allocating: current_request->bytes = bytes; current_request->node_idx = kUnassignedScratchBufferRequestIndex; + current_request->subgraph_idx = subgraph_idx; // Assign the current request index to the out-param: *buffer_idx = scratch_buffer_request_count_; @@ -720,7 +736,7 @@ TfLiteStatus MicroAllocator::RequestScratchBufferInArena(size_t bytes, TfLiteStatus MicroAllocator::FinishPrepareNodeAllocations(int node_id) { // When a node has finished preparing, all temp allocations performed by the // kernel should be cleaned up: - ResetTempAllocations(); + TF_LITE_ENSURE_STATUS(ResetTempAllocations()); // Find and update any new scratch buffer requests for the current node: internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); @@ -738,7 +754,8 @@ TfLiteStatus MicroAllocator::FinishPrepareNodeAllocations(int node_id) { // Ensure that the head is re-adjusted to allow for another at-most // kMaxScratchBuffersPerOp scratch buffer requests in the next operator: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( + TF_LITE_ENSURE_STATUS(non_persistent_buffer_allocator_->ResizeBuffer( + scratch_buffer_head_, sizeof(internal::ScratchBufferRequest) * (scratch_buffer_request_count_ + kMaxScratchBuffersPerOp), alignof(internal::ScratchBufferRequest))); @@ -747,281 +764,353 @@ TfLiteStatus MicroAllocator::FinishPrepareNodeAllocations(int node_id) { } size_t MicroAllocator::used_bytes() const { - return memory_allocator_->GetUsedBytes(); + return non_persistent_buffer_allocator_->GetNonPersistentUsedBytes() + + persistent_buffer_allocator_->GetPersistentUsedBytes(); } TfLiteStatus MicroAllocator::AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations) { - TFLITE_DCHECK(node_and_registrations); - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - NodeAndRegistration* output = reinterpret_cast( - memory_allocator_->AllocateFromTail( - sizeof(NodeAndRegistration) * subgraph->operators()->size(), - alignof(NodeAndRegistration))); - if (output == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for node_and_registrations."); - return kTfLiteError; - } - *node_and_registrations = output; - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) { - TFLITE_DCHECK(model != nullptr); - TFLITE_DCHECK(node_and_registrations != nullptr); - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - TfLiteStatus status = kTfLiteOk; - auto* opcodes = model->operator_codes(); - MicroBuiltinDataAllocator builtin_data_allocator(memory_allocator_); - for (size_t i = 0; i < subgraph->operators()->size(); ++i) { - const auto* op = subgraph->operators()->Get(i); - const size_t index = op->opcode_index(); - if (index >= opcodes->size()) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Missing registration for opcode_index %d\n", index); - return kTfLiteError; - } - auto* opcode = (*opcodes)[index]; - status = - GetRegistrationFromOpCode(opcode, op_resolver, error_reporter_, - &(node_and_registrations[i].registration)); - if (status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to get registration from op code %s\n ", - EnumNameBuiltinOperator(GetBuiltinCode(opcode))); - return status; - } - const auto* registration = node_and_registrations[i].registration; - if (registration == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, "Skipping op for opcode_index %d\n", - index); + const Model* model, SubgraphAllocations* subgraph_allocations) { + TFLITE_DCHECK(subgraph_allocations != nullptr); + + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + TFLITE_DCHECK(subgraph != nullptr); + + uint32_t operators_size = NumSubgraphOperators(subgraph); + + // Initialize NodeAndRegistrations for the subgraph. + NodeAndRegistration* output = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(NodeAndRegistration) * operators_size, + alignof(NodeAndRegistration))); + if (output == nullptr) { + MicroPrintf("Failed to allocate memory for node_and_registrations."); return kTfLiteError; } - BuiltinOperator op_type = - static_cast(registration->builtin_code); - - const char* custom_data = nullptr; - size_t custom_data_size = 0; - unsigned char* builtin_data = nullptr; - - if (op_type == BuiltinOperator_CUSTOM) { - // Custom Ops may or may not have a non-null custom_options field. - if (op->custom_options() != nullptr) { - custom_data = - reinterpret_cast(op->custom_options()->data()); - custom_data_size = op->custom_options()->size(); - } - } else { - if (op->custom_options() != nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Unsupported behavior: found builtin operator %s with custom " - "options.\n", - EnumNameBuiltinOperator(op_type)); - return kTfLiteError; - } - - MicroOpResolver::BuiltinParseFunction parser = - op_resolver.GetOpDataParser(op_type); - if (parser == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, "Did not find a parser for %s", - EnumNameBuiltinOperator(op_type)); - - return kTfLiteError; - } - TF_LITE_ENSURE_STATUS(parser(op, error_reporter_, &builtin_data_allocator, - (void**)(&builtin_data))); - } - - TfLiteIntArray* inputs_array; - TF_LITE_ENSURE_STATUS(internal::FlatBufferVectorToTfLiteTypeArray( - memory_allocator_, error_reporter_, op->inputs(), &inputs_array)); - - TfLiteIntArray* outputs_array; - TF_LITE_ENSURE_STATUS(internal::FlatBufferVectorToTfLiteTypeArray( - memory_allocator_, error_reporter_, op->outputs(), &outputs_array)); - - TfLiteNode* node = &(node_and_registrations[i].node); - *node = {}; - node->inputs = inputs_array; - node->outputs = outputs_array; - node->builtin_data = reinterpret_cast(builtin_data); - node->custom_initial_data = custom_data; - node->custom_initial_data_size = custom_data_size; + subgraph_allocations[subgraph_idx].node_and_registrations = output; } - return kTfLiteOk; } TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - const SubGraph* subgraph = GetSubGraphFromModel(model); + const Model* model, const SubgraphAllocations* subgraph_allocations, + int tensor_index, int subgraph_index) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_index); TFLITE_DCHECK(subgraph != nullptr); // This value is allocated from persistent arena space. It is guaranteed to be // around for the lifetime of the application. - TfLiteTensor* tensor = - AllocatePersistentTfLiteTensorInternal(model, eval_tensors, tensor_index); + TfLiteTensor* tensor = AllocatePersistentTfLiteTensorInternal(); + + if (tensor == nullptr) { + MicroPrintf("Failed to allocate memory for persistent TfLiteTensor"); + return nullptr; + } // Populate any fields from the flatbuffer, since this TfLiteTensor struct is // allocated in the persistent section of the arena, ensure that additional // allocations also take place in that section of the arena. - if (PopulateTfLiteTensorFromFlatbuffer(model, subgraph, tensor, tensor_index, - /*allocate_temp=*/false) != - kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to populate a persistent TfLiteTensor struct " - "from flatbuffer data!"); + if (PopulateTfLiteTensorFromFlatbuffer( + model, tensor, tensor_index, subgraph_index, + /*allocate_temp=*/false) != kTfLiteOk) { + MicroPrintf( + "Failed to populate a persistent TfLiteTensor struct " + "from flatbuffer data!"); return nullptr; } - if (eval_tensors != nullptr) { + if (subgraph_allocations != nullptr) { // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) // and not located in the flatbuffer are stored on the pre-allocated list of // TfLiteEvalTensors structs. These structs are the source of truth, simply // point the corresponding buffer to the new TfLiteTensor data value. - tensor->data.data = eval_tensors[tensor_index].data.data; + tensor->data.data = + subgraph_allocations[subgraph_index].tensors[tensor_index].data.data; + // TfLiteEvalTensor structs must also be the source of truth for the + // TfLiteTensor dims. + tensor->dims = + subgraph_allocations[subgraph_index].tensors[tensor_index].dims; } return tensor; } +void MicroAllocator::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) { + TFLITE_DCHECK(tensor != nullptr); + + if (tensor->quantization.type == kTfLiteAffineQuantization) { + TFLITE_DCHECK(tensor->quantization.params != nullptr); + TfLiteAffineQuantization* quantization = + reinterpret_cast( + tensor->quantization.params); + + non_persistent_buffer_allocator_->DeallocateTemp( + reinterpret_cast(quantization->zero_point)); + non_persistent_buffer_allocator_->DeallocateTemp( + reinterpret_cast(quantization)); + } + + // Clear the data in case someone still access tensor arena by mistake + tensor->quantization.type = kTfLiteNoQuantization; + tensor->quantization.params = nullptr; + tensor->data.data = nullptr; + tensor->dims = nullptr; + non_persistent_buffer_allocator_->DeallocateTemp( + reinterpret_cast(tensor)); +} + TfLiteTensor* MicroAllocator::AllocateTempTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - const SubGraph* subgraph = GetSubGraphFromModel(model); + const Model* model, const SubgraphAllocations* subgraph_allocations, + int tensor_index, int subgraph_index) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_index); TFLITE_DCHECK(subgraph != nullptr); // This value is allocated from temporary arena space. It is guaranteed to be // around for at least the scope of the calling function. Since this struct // allocation takes place in temp space, no need to own or cleanup. - TfLiteTensor* tensor = - reinterpret_cast(memory_allocator_->AllocateTemp( - sizeof(TfLiteTensor), alignof(TfLiteTensor))); + TfLiteTensor* tensor = reinterpret_cast( + non_persistent_buffer_allocator_->AllocateTemp(sizeof(TfLiteTensor), + alignof(TfLiteTensor))); // Populate any fields from the flatbuffer, since this TfLiteTensor struct is // allocated in the temp section of the arena, ensure that additional // allocations also take place in that section of the arena. - if (PopulateTfLiteTensorFromFlatbuffer(model, subgraph, tensor, tensor_index, + if (PopulateTfLiteTensorFromFlatbuffer(model, tensor, tensor_index, + subgraph_index, /*allocate_temp=*/true) != kTfLiteOk) { - TF_LITE_REPORT_ERROR( - error_reporter_, + MicroPrintf( "Failed to populate a temp TfLiteTensor struct from flatbuffer data!"); return nullptr; } - if (eval_tensors != nullptr) { + if (subgraph_allocations != nullptr) { // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) // and not located in the flatbuffer are stored on the pre-allocated list of // TfLiteEvalTensors structs. These structs are the source of truth, simply // point the corresponding buffer to the new TfLiteTensor data value. - tensor->data.data = eval_tensors[tensor_index].data.data; + tensor->data.data = + subgraph_allocations[subgraph_index].tensors[tensor_index].data.data; + // TfLiteEvalTensor structs must also be the source of truth for the + // TfLiteTensor dims. + tensor->dims = + subgraph_allocations[subgraph_index].tensors[tensor_index].dims; } return tensor; } -void MicroAllocator::ResetTempAllocations() { - memory_allocator_->ResetTempAllocations(); +uint8_t* MicroAllocator::AllocateTempBuffer(size_t size, size_t alignment) { + return non_persistent_buffer_allocator_->AllocateTemp(size, alignment); } -TfLiteStatus MicroAllocator::AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) { - TFLITE_DCHECK(eval_tensors != nullptr); +void MicroAllocator::DeallocateTempBuffer(uint8_t* buffer) { + non_persistent_buffer_allocator_->DeallocateTemp(buffer); +} - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); +TfLiteStatus MicroAllocator::ResetTempAllocations() { + return non_persistent_buffer_allocator_->ResetTempAllocations(); +} + +bool MicroAllocator::IsAllTempDeallocated() { + return non_persistent_buffer_allocator_->IsAllTempDeallocated(); +} + +#ifdef USE_TFLM_COMPRESSION + +TfLiteStatus MicroAllocator::AllocateCompressedTensorsList( + const Model* model, SubgraphAllocations* subgraph_allocations) { + TFLITE_DCHECK(subgraph_allocations != nullptr); - size_t alloc_count = subgraph->tensors()->size(); - TfLiteEvalTensor* tensors = - reinterpret_cast(memory_allocator_->AllocateFromTail( - sizeof(TfLiteEvalTensor) * alloc_count, alignof(TfLiteEvalTensor))); - if (tensors == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate memory for context->eval_tensors, " - "%d bytes required", - sizeof(TfLiteEvalTensor) * alloc_count); + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + subgraph_allocations[subgraph_idx].compressed.tensors = nullptr; + } + + const tflite::micro::compression::Metadata* compression_metadata = + internal::GetCompressionMetadata(*model); + if (compression_metadata == nullptr) { + // no compression metadata is available + return kTfLiteOk; + } + if (compression_metadata->subgraphs() == nullptr) { + MicroPrintf("Compression: invalid Subgraph vector"); + return kTfLiteError; + } + if (compression_metadata->subgraphs()->size() == 0) { + MicroPrintf("Compression: zero length Subgraph vector"); return kTfLiteError; } - for (size_t i = 0; i < alloc_count; ++i) { - TfLiteStatus status = internal::InitializeTfLiteEvalTensorFromFlatbuffer( - memory_allocator_, *subgraph->tensors()->Get(i), model->buffers(), - error_reporter_, &tensors[i]); - if (status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, "Failed to initialize tensor %d", - i); + for (size_t subgraph_index = 0; + subgraph_index < compression_metadata->subgraphs()->size(); + subgraph_index++) { + auto subgraph = compression_metadata->subgraphs()->Get(subgraph_index); + + if (subgraph->lut_tensors() == nullptr) { + MicroPrintf("Compression: invalid LutTensor vector"); return kTfLiteError; } + if (subgraph->lut_tensors()->size() == 0) { + MicroPrintf("Compression: zero length LutTensor vector"); + return kTfLiteError; + } + + for (size_t lut_tensors_index = 0; + lut_tensors_index < subgraph->lut_tensors()->size(); + lut_tensors_index++) { + auto lut_tensor = subgraph->lut_tensors()->Get(lut_tensors_index); + + CompressionTensorData* ctd = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(CompressionTensorData), alignof(CompressionTensorData))); + if (ctd == nullptr) { + MicroPrintf( + "Compressions: failed to allocate memory for " + "CompressionTensorData, %d bytes required", + sizeof(CompressionTensorData)); + return kTfLiteError; + } + + LookupTableData* lut_table = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(LookupTableData), alignof(LookupTableData))); + if (lut_table == nullptr) { + MicroPrintf( + "Compressions: failed to allocate memory for LookupTableData, " + "%d bytes required", + sizeof(LookupTableData)); + return kTfLiteError; + } + ctd->data.lut_data = lut_table; + + TfLiteStatus status = + internal::InitializeCompressionTensorDataFromFlatbuffer( + *model, subgraph_index, *lut_tensor, ctd); + if (status != kTfLiteOk) { + MicroPrintf("Compression: failed to initialize data for LutTensor %u", + lut_tensors_index); + return kTfLiteError; + } + + if (subgraph_allocations[subgraph_index].compressed.tensors == nullptr) { + size_t alloc_count = + model->subgraphs()->Get(subgraph_index)->tensors()->size(); + const CompressionTensorData** tensors = + reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(CompressionTensorData*) * alloc_count, + alignof(CompressionTensorData*))); + if (tensors == nullptr) { + MicroPrintf( + "Compression: failed to allocate memory for compression tensor " + "list, %d bytes required", + sizeof(CompressionTensorData*) * alloc_count); + return kTfLiteError; + } + + subgraph_allocations[subgraph_index].compressed.tensors = tensors; + std::fill(tensors, tensors + alloc_count, nullptr); + } + + const size_t tensor_index = lut_tensor->tensor(); + if (subgraph_allocations[subgraph_index] + .compressed.tensors[tensor_index] != nullptr) { + MicroPrintf("Compression: duplicate LutTensor subgraph %u tensor %u", + subgraph_index, tensor_index); + return kTfLiteError; + } else { + subgraph_allocations[subgraph_index].compressed.tensors[tensor_index] = + ctd; + } + } } - *eval_tensors = tensors; + return kTfLiteOk; } -TfLiteStatus MicroAllocator::AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors) { +#endif // USE_TFLM_COMPRESSION + +TfLiteStatus MicroAllocator::AllocateTfLiteEvalTensors( + const Model* model, SubgraphAllocations* subgraph_allocations) { + TFLITE_DCHECK(subgraph_allocations != nullptr); + + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + TFLITE_DCHECK(subgraph != nullptr); + + size_t alloc_count = subgraph->tensors()->size(); + TfLiteEvalTensor* tensors = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(TfLiteEvalTensor) * alloc_count, alignof(TfLiteEvalTensor))); + if (tensors == nullptr) { + MicroPrintf( + "Failed to allocate memory for context->eval_tensors, " + "%d bytes required", + sizeof(TfLiteEvalTensor) * alloc_count); + return kTfLiteError; + } + + for (size_t i = 0; i < alloc_count; ++i) { + TfLiteStatus status = internal::InitializeTfLiteEvalTensorFromFlatbuffer( + *subgraph->tensors()->Get(i), model->buffers(), &tensors[i]); + if (status != kTfLiteOk) { + MicroPrintf("Failed to initialize tensor %d", i); + return kTfLiteError; + } + } + subgraph_allocations[subgraph_idx].tensors = tensors; + } + return kTfLiteOk; +} + +TfLiteStatus MicroAllocator::AllocateVariables( + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, + const int32_t* offline_planner_offsets) { for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { auto* tensor = subgraph->tensors()->Get(i); if (tensor->is_variable()) { - size_t buffer_size; - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size)); + if (offline_planner_offsets == nullptr || + offline_planner_offsets[i] == kOnlinePlannedBuffer) { + size_t buffer_size; + TF_LITE_ENSURE_STATUS( + TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size)); - eval_tensors[i].data.data = - memory_allocator_->AllocateFromTail(buffer_size, kBufferAlignment); + eval_tensors[i].data.data = + persistent_buffer_allocator_->AllocatePersistentBuffer( + buffer_size, MicroArenaBufferAlignment()); - if (eval_tensors[i].data.data == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate variable tensor of size %d", - buffer_size); - return kTfLiteError; + if (eval_tensors[i].data.data == nullptr) { + MicroPrintf("Failed to allocate variable tensor of size %d", + buffer_size); + return kTfLiteError; + } } } } return kTfLiteOk; } -TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - return reinterpret_cast(memory_allocator_->AllocateFromTail( - sizeof(TfLiteTensor), alignof(TfLiteTensor))); +TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensorInternal() { + return reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(TfLiteTensor), alignof(TfLiteTensor))); } TfLiteStatus MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp) { + const Model* model, TfLiteTensor* tensor, int tensor_index, + int subgraph_idx, bool allocate_temp) { // TODO(b/162311891): This method serves as a stub to ensure quantized // allocations in the tail can be recorded. Once the interpreter has APIs for // accessing buffers on TfLiteEvalTensor this method can be dropped. return internal::InitializeTfLiteTensorFromFlatbuffer( - memory_allocator_, allocate_temp, *subgraph->tensors()->Get(tensor_index), - model->buffers(), error_reporter_, tensor); -} - -ErrorReporter* MicroAllocator::error_reporter() const { - return error_reporter_; -} - -const SubGraph* MicroAllocator::GetSubGraphFromModel(const Model* model) { - auto* subgraphs = model->subgraphs(); - if (subgraphs->size() != 1) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Only 1 subgraph is currently supported.\n"); - return nullptr; - } - return (*subgraphs)[0]; + persistent_buffer_allocator_, non_persistent_buffer_allocator_, + allocate_temp, + *model->subgraphs()->Get(subgraph_idx)->tensors()->Get(tensor_index), + model->buffers(), tensor); } TfLiteStatus MicroAllocator::CommitStaticMemoryPlan( - const Model* model, const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors, + const Model* model, SubgraphAllocations* allocations, ScratchBufferHandle* scratch_buffer_handles) { size_t head_usage = 0; // Create static memory plan @@ -1034,69 +1123,70 @@ TfLiteStatus MicroAllocator::CommitStaticMemoryPlan( // allocated from the temp section and cleaned up at the bottom of this // function. - size_t allocation_info_count = - subgraph->tensors()->size() + scratch_buffer_request_count_; - size_t bytes = sizeof(AllocationInfo) * allocation_info_count; - - // Allocate an array of AllocationInfo structs from the temp section. This - // struct will be used by AllocationInfoBuilder to find buffer usage. - AllocationInfo* allocation_info = reinterpret_cast( - memory_allocator_->AllocateTemp(bytes, alignof(AllocationInfo))); - if (allocation_info == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for allocation_info, %d bytes required", - bytes); - return kTfLiteError; - } - // Use the AllocationInfoBuilder class to help determine where buffers are // used in the subgraph. - AllocationInfoBuilder builder(allocation_info, subgraph->tensors()->size(), - scratch_buffer_request_count_, error_reporter_); + AllocationInfoBuilder builder(model, non_persistent_buffer_allocator_); + TF_LITE_ENSURE_STATUS( + builder.CreateAllocationInfo(scratch_buffer_request_count_)); const int32_t* offline_planner_offsets = nullptr; TF_LITE_ENSURE_STATUS( - builder.GetOfflinePlannedOffsets(model, &offline_planner_offsets)); + builder.GetOfflinePlannedOffsets(&offline_planner_offsets)); + + // We allocate buffers for variable tensors here since the offline planner + // offsets are conviently available here. + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + TFLITE_DCHECK(subgraph != nullptr); + TF_LITE_ENSURE_STATUS(AllocateVariables( + subgraph, allocations[subgraph_idx].tensors, offline_planner_offsets)); + } + TF_LITE_ENSURE_STATUS( - builder.AddTensors(subgraph, offline_planner_offsets, eval_tensors)); + builder.InitializeAllocationInfo(offline_planner_offsets, allocations)); internal::ScratchBufferRequest* scratch_buffer_requests = GetScratchBufferRequests(); - - TF_LITE_ENSURE_STATUS(builder.AddScratchBuffers(scratch_buffer_requests, - scratch_buffer_handles)); + TF_LITE_ENSURE_STATUS(builder.MarkAllocationLifetimes( + 0, scratch_buffer_requests, scratch_buffer_handles, allocations)); + int allocation_info_count = builder.AllocationCount(); + AllocationInfo* allocation_info = builder.Finish(); // Remaining arena size that memory planner can use for calculating offsets. size_t remaining_arena_size = - memory_allocator_->GetAvailableMemory(kBufferAlignment); - uint8_t* planner_arena = - memory_allocator_->AllocateTemp(remaining_arena_size, kBufferAlignment); - TF_LITE_ENSURE(error_reporter_, planner_arena != nullptr); - GreedyMemoryPlanner planner(planner_arena, remaining_arena_size); - TF_LITE_ENSURE_STATUS(CreatePlan(error_reporter_, &planner, allocation_info, - allocation_info_count)); + non_persistent_buffer_allocator_->GetAvailableMemory( + MicroArenaBufferAlignment()); + uint8_t* planner_arena = non_persistent_buffer_allocator_->AllocateTemp( + remaining_arena_size, MicroArenaBufferAlignment()); - // Reset all temp allocations used above: - memory_allocator_->ResetTempAllocations(); - - size_t actual_available_arena_size = - memory_allocator_->GetAvailableMemory(kBufferAlignment); - - // Make sure we have enough arena size. - if (planner.GetMaximumMemorySize() > actual_available_arena_size) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Arena size is too small for all buffers. Needed %u but only " - "%u was available.", - planner.GetMaximumMemorySize(), actual_available_arena_size); + if (planner_arena == nullptr) { return kTfLiteError; } + + memory_planner_->Init(planner_arena, remaining_arena_size); + TF_LITE_ENSURE_STATUS( + CreatePlan(memory_planner_, allocation_info, allocation_info_count)); + // Commit the plan. - TF_LITE_ENSURE_STATUS(CommitPlan(error_reporter_, &planner, - memory_allocator_->GetHeadBuffer(), - allocation_info, allocation_info_count)); - head_usage = planner.GetMaximumMemorySize(); + TF_LITE_ENSURE_STATUS( + CommitPlan(memory_planner_, + non_persistent_buffer_allocator_->GetOverlayMemoryAddress(), + allocation_info, allocation_info_count)); + + // Reset all temp allocations used above: + builder.FreeAllocationInfo(); + non_persistent_buffer_allocator_->DeallocateTemp(planner_arena); + TF_LITE_ENSURE_STATUS( + non_persistent_buffer_allocator_->ResetTempAllocations()); + TF_LITE_ENSURE_STATUS( + non_persistent_buffer_allocator_->DeallocateResizableBuffer( + scratch_buffer_head_)); + +#ifdef TF_LITE_SHOW_MEMORY_USE + memory_planner_->PrintMemoryPlan(); +#endif + head_usage = memory_planner_->GetMaximumMemorySize(); // The head is used to store memory plans for one model at a time during the // model preparation stage, and is re-purposed to store scratch buffer handles @@ -1110,8 +1200,9 @@ TfLiteStatus MicroAllocator::CommitStaticMemoryPlan( // The head is used for storing scratch buffer allocations before finalizing a // memory plan in this function. Ensure that the head is set to the largest // memory plan sent through the allocator: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( - max_head_buffer_usage_, kBufferAlignment)); + TF_LITE_ENSURE_STATUS( + non_persistent_buffer_allocator_->ReserveNonPersistentOverlayMemory( + max_head_buffer_usage_, MicroArenaBufferAlignment())); return kTfLiteOk; } @@ -1127,7 +1218,7 @@ TfLiteStatus MicroAllocator::AllocateScratchBufferHandles( // Allocate a consecutive block of memory store the scratch buffer handles. // This alignment ensures quick lookup during inference time for the model: *scratch_buffer_handles = reinterpret_cast( - memory_allocator_->AllocateFromTail( + persistent_buffer_allocator_->AllocatePersistentBuffer( sizeof(ScratchBufferHandle) * handle_count, alignof(ScratchBufferHandle))); @@ -1142,17 +1233,24 @@ TfLiteStatus MicroAllocator::InitScratchBufferData() { // All requests will be stored in the head section. Each kernel is allowed at // most kMaxScratchBuffersPerOp requests. Adjust the head to reserve at most // that many requests to begin: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( - sizeof(internal::ScratchBufferRequest) * kMaxScratchBuffersPerOp, - alignof(internal::ScratchBufferRequest))); + scratch_buffer_head_ = + non_persistent_buffer_allocator_->AllocateResizableBuffer( + sizeof(internal::ScratchBufferRequest) * kMaxScratchBuffersPerOp, + alignof(internal::ScratchBufferRequest)); + if (scratch_buffer_head_ == nullptr) { + return kTfLiteError; + } return kTfLiteOk; } internal::ScratchBufferRequest* MicroAllocator::GetScratchBufferRequests() { - return reinterpret_cast( - AlignPointerUp(memory_allocator_->GetHeadBuffer(), - alignof(internal::ScratchBufferRequest))); + return reinterpret_cast(AlignPointerUp( + scratch_buffer_head_, alignof(internal::ScratchBufferRequest))); +} + +TfLiteBridgeBuiltinDataAllocator* MicroAllocator::GetBuiltinDataAllocator() { + return builtin_data_allocator_; } } // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_allocator.h b/src/tensorflow/lite/micro/micro_allocator.h index 39a12ea..215bffc 100644 --- a/src/tensorflow/lite/micro/micro_allocator.h +++ b/src/tensorflow/lite/micro/micro_allocator.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,16 +18,24 @@ limitations under the License. #include #include -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" +#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" #include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/micro/simple_memory_allocator.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/memory_planner/micro_memory_planner.h" +#include "tensorflow/lite/micro/micro_common.h" +#include "tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h" #include "tensorflow/lite/schema/schema_generated.h" +#ifdef USE_TFLM_COMPRESSION + +#include "tensorflow/lite/micro/compression.h" + +#endif // USE_TFLM_COMPRESSION + namespace tflite { +// TODO(b/199402574): rename to tflite_internal or just remove internal +// namespace. namespace internal { // Sets up all of the data structure members for a TfLiteTensor based on the @@ -35,10 +43,11 @@ namespace internal { // TODO(b/162311891): Drop this method when the interpreter has an API for // returning buffers on TfLiteEvalTensor. TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, bool allocate_temp, - const tflite::Tensor& flatbuffer_tensor, + IPersistentBufferAllocator* persistent_buffer_allocator, + INonPersistentBufferAllocator* non_persistent_buffer_allocator, + bool allocate_temp, const tflite::Tensor& flatbuffer_tensor, const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteTensor* result); + TfLiteTensor* result); // Holds placeholder information for a scratch buffer request from a kernel. // This struct is only used during the model prepare stage. Each request from a @@ -50,7 +59,7 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( // of a sequential, array of ScratchBufferHandle allocations in the tail // section. These allocations are indexed by the request API defined in the // TfLiteContext struct. -typedef struct { +struct ScratchBufferRequest { // Number of bytes required by the buffer. The actual allocated size might be // greater than `bytes` due to buffer alignment. size_t bytes; @@ -58,22 +67,40 @@ typedef struct { // determine the lifetime of the buffer. In AllocationInfo, this buffer will // have `before` = node_idx and `after` = node_idx. int node_idx; -} ScratchBufferRequest; + int subgraph_idx; +}; } // namespace internal -typedef struct { +// Enum used to keep track of which MemoryPlanner is being used for +// MicroAllocater::Create(); +enum class MemoryPlannerType { + kGreedy, + kLinear, +}; + +struct NodeAndRegistration { TfLiteNode node; - const TfLiteRegistration* registration; -} NodeAndRegistration; + const TFLMRegistration* registration; +}; // Holds a pointer to a buffer for a scratch buffer requested by a kernel during // the model prepare stage. This struct is allocated in-place and allows for // quick pointer-indexed lookup for speed during model inference. -typedef struct { +struct ScratchBufferHandle { // Pointer to location of the scratch buffer: uint8_t* data; -} ScratchBufferHandle; +}; + +// Stores all per-subgraph allocations. This includes the node and registration +// array, and tensor list for each subgraph. +struct SubgraphAllocations { + NodeAndRegistration* node_and_registrations; + TfLiteEvalTensor* tensors; +#ifdef USE_TFLM_COMPRESSION + CompressedTensorList compressed; +#endif // USE_TFLM_COMPRESSION +}; // Allocator responsible for allocating memory for all intermediate tensors // necessary to invoke a model. @@ -84,9 +111,9 @@ typedef struct { // // The MicroAllocator simply plans out additional allocations that are required // to standup a model for inference in TF Micro. This class currently relies on -// an additional allocator - SimpleMemoryAllocator - for all allocations from an -// arena. These allocations are divided into head (non-persistent) and tail -// (persistent) regions: +// an additional allocator - SingleArenaBufferAllocator - for all allocations +// from an arena. These allocations are divided into head (non-persistent) and +// tail (persistent) regions: // // Memory layout to help understand how it works // This information could change in the future version. @@ -101,41 +128,72 @@ typedef struct { class MicroAllocator { public: // Creates a MicroAllocator instance from a given tensor arena. This arena - // will be managed by the created instance. - // Note: Please use __declspec(align(16)) to make sure tensor_arena is 16 + // will be managed by the created instance. The GreedyMemoryPlanner will + // by default be used and created on the arena. + // Note: Please use alignas(16) to make sure tensor_arena is 16 // bytes aligned, otherwise some head room will be wasted. // TODO(b/157615197): Cleanup constructor + factory usage. + static MicroAllocator* Create( + uint8_t* tensor_arena, size_t arena_size, + MemoryPlannerType memory_planner_type = MemoryPlannerType::kGreedy); + + // Creates a MicroAllocator instance from a given tensor arena and a given + // MemoryPlanner. This arena will be managed by the created instance. Note: + // Please use alignas(16) to make sure tensor_arena is 16 bytes + // aligned, otherwise some head room will be wasted. static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size, - ErrorReporter* error_reporter); - - // Creates a MicroAllocator instance using the provided SimpleMemoryAllocator - // intance. This allocator instance will use the SimpleMemoryAllocator - // instance to manage allocations internally. - static MicroAllocator* Create(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); - - // Begin allocating internal resources required for model inference. + MicroMemoryPlanner* memory_planner); + + // Creates a MicroAllocator instance using the provided + // SingleArenaBufferAllocator instance and the MemoryPlanner. This allocator + // instance will use the SingleArenaBufferAllocator instance to manage + // allocations internally. + static MicroAllocator* Create(SingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner); + + // Creates a MicroAllocator instance using the provided + // SingleArenaBufferAllocator instance and the MemoryPlanner. This allocator + // instance will use the SingleArenaBufferAllocator instance to manage + // allocations internally. + static MicroAllocator* Create( + uint8_t* persistent_tensor_arena, size_t persistent_arena_size, + uint8_t* non_persistent_tensor_arena, size_t non_persistent_arena_size, + MemoryPlannerType memory_planner_type = MemoryPlannerType::kGreedy); + + // Returns the fixed amount of memory overhead of MicroAllocator. + static size_t GetDefaultTailUsage(bool is_memory_planner_given); + + // Returns True if the MicroAllocator uses a LinearMemoryPlanner(is compatible + // with the PerserveAllTensors flag / feature ) and False otherwise. + bool preserves_all_tensor() const { + return memory_planner_->preserves_all_tensors(); + }; + + // Allocates internal resources required for model inference for each subgraph + // from the arena. + // // This method will run through the flatbuffer data supplied in the model to // properly allocate tensor, node, and op registration data. This method is - // expected to be followed with a call to FinishModelAllocation() before - // resuming allocation with another model. All persistent tensor buffers are - // stored in the out-param eval_tensors. This value is allocated from the - // persistent memory arena and will be used to host runtime tensor buffers. - TfLiteStatus StartModelAllocation( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration** node_and_registrations, - TfLiteEvalTensor** eval_tensors); + // expected to be followed with a call to FinishModelAllocation() Returns a + // pointer to an array of SubgraphAllocations (also stored in the tail of the + // arena) where each index corresponds to a different subgraph in the model. + // Return value is nullptr if the allocations failed. + SubgraphAllocations* StartModelAllocation(const Model* model); // Finish allocating internal resources required for model inference. - // This method will plan non-persistent buffers and commit a memory plan to - // the 'head' section of the memory arena. All variable tensor data will also - // be allocated. This method should be called after assigning model resources - // in StartModelAllocation(). The eval_tensors pointer should be the value - // passed into this class during StartModelAllocation(). Scratch buffer - // handles are stored in the out-param `scratch_buffer_handles`. This value - // will be used in `GetScratchBuffer` call to retrieve scratch buffers. + // + // -Plan the memory for activation tensors and scratch buffers. + // -Update eval tensors for each subgraph based on planned offsets. + // -Allocate scratch buffer handles array and update based on planned offsets. + // + // This method should be called after assigning model resources + // in StartModelAllocation(). The subgraph_allocations pointer should be the + // value passed into this class during StartModelAllocation(). Scratch buffer + // handles are stored in the out-param `scratch_buffer_handles` array which is + // allocated in this method. This value will be used in `GetScratchBuffer` + // call to retrieve scratch buffers. TfLiteStatus FinishModelAllocation( - const Model* model, TfLiteEvalTensor* eval_tensors, + const Model* model, SubgraphAllocations* subgraph_allocations, ScratchBufferHandle** scratch_buffer_handles); // Allocates a TfLiteTensor struct and populates the returned value with @@ -145,22 +203,37 @@ class MicroAllocator { // class during StartModelAllocation() and contains the source-of-truth for // buffers. virtual TfLiteTensor* AllocatePersistentTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index); + const Model* model, const SubgraphAllocations* subgraph_allocations, + int tensor_index, int subgraph_index); // Allocates a TfLiteTensor struct and populates the returned value with // properties from the model flatbuffer. This struct is allocated from // temporary arena memory is only guaranteed until a call is made to - // ResetTempAllocations(). The eval_tensors pointer should be the value passed - // into this class during StartModelAllocation() and contains the - // source-of-truth for buffers. - virtual TfLiteTensor* AllocateTempTfLiteTensor(const Model* model, - TfLiteEvalTensor* eval_tensors, - int tensor_index); + // ResetTempAllocations(). Subgraph_allocations contains the array of + // TfLiteEvalTensors. If the newly allocated temp at the specified subgraph + // and tensor index is already present int the TfLiteEvalTensor array, its + // data buffer will be re-used. + virtual TfLiteTensor* AllocateTempTfLiteTensor( + const Model* model, const SubgraphAllocations* subgraph_allocations, + int tensor_index, int subgraph_index); + + virtual void DeallocateTempTfLiteTensor(TfLiteTensor*); + + // Returns a pointer to a buffer from the temporary arena memory and is only + // guaranteed until a call is made to ResetTempAllocations(). + virtual uint8_t* AllocateTempBuffer(size_t size, size_t alignment); + + // Signals that the temporary buffer no longer needed. + virtual void DeallocateTempBuffer(uint8_t* buffer); // Resets all temporary allocations. This method should be called after a // chain of temp allocations (e.g. chain of TfLiteTensor objects via // AllocateTfLiteTensor()). - virtual void ResetTempAllocations(); + virtual TfLiteStatus ResetTempAllocations(); + + // Returns true if all temporary buffers including temp TfLiteTensor are + // already deallocated. + virtual bool IsAllTempDeallocated(); // Allocates persistent buffer which has the same life time as the allocator. // The memory is immediately available and is allocated from the tail of the @@ -171,7 +244,8 @@ class MicroAllocator { // This method only requests a buffer with a given size to be used after a // model has finished allocation via FinishModelAllocation(). All requested // buffers will be accessible by the out-param in that method. - TfLiteStatus RequestScratchBufferInArena(size_t bytes, int* buffer_idx); + TfLiteStatus RequestScratchBufferInArena(size_t bytes, int subgraph_idx, + int* buffer_idx); // Finish allocating a specific NodeAndRegistration prepare block (kernel // entry for a model) with a given node ID. This call ensures that any scratch @@ -183,53 +257,57 @@ class MicroAllocator { // `FinishModelAllocation`. Otherwise, it will return 0. size_t used_bytes() const; + TfLiteBridgeBuiltinDataAllocator* GetBuiltinDataAllocator(); + protected: - MicroAllocator(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); + MicroAllocator(SingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner); + MicroAllocator(IPersistentBufferAllocator* persistent_buffer_allocator, + INonPersistentBufferAllocator* non_persistent_buffer_allocator, + MicroMemoryPlanner* memory_planner); virtual ~MicroAllocator(); +#ifdef USE_TFLM_COMPRESSION + + // Allocates an array in the arena of pointers to the compressions data + // required to decompress tensors for each subgraph within the model. + virtual TfLiteStatus AllocateCompressedTensorsList( + const Model* model, SubgraphAllocations* subgraph_allocations); + +#endif // USE_TFLM_COMPRESSION + // Allocates an array in the arena to hold pointers to the node and // registration pointers required to represent the inference graph of the // model. virtual TfLiteStatus AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations); - - // Populates node and registration pointers representing the inference graph - // of the model from values inside the flatbuffer (loaded from the TfLiteModel - // instance). Persistent data (e.g. operator data) is allocated from the - // arena. - virtual TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations); + const Model* model, SubgraphAllocations* subgraph_allocations); // Allocates the list of persistent TfLiteEvalTensors that are used for the // "eval" phase of model inference. These structs will be the source of truth - // for all tensor buffers. Allocation results are stored in the out-param - // eval_tensors. + // for all tensor buffers. virtual TfLiteStatus AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors); + const Model* model, SubgraphAllocations* subgraph_allocations); // Allocates persistent tensor buffers for variable tensors in the subgraph. - virtual TfLiteStatus AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors); + // Online and offline variable tensors are handled differently hence the + // offline_planner_offsets parameter is needed. + virtual TfLiteStatus AllocateVariables( + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, + const int32_t* offline_planner_offsets); // Allocate and return a persistent TfLiteTensor. // TODO(b/162311891): Drop this method when the interpreter has an API for // accessing TfLiteEvalTensor structs. - virtual TfLiteTensor* AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index); + virtual TfLiteTensor* AllocatePersistentTfLiteTensorInternal(); // Populates a TfLiteTensor struct with data from the model flatbuffer. Any // quantization data is allocated from either the tail (persistent) or temp // sections of the arena based on the allocation flag. - virtual TfLiteStatus PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp); - - ErrorReporter* error_reporter() const; - - // Returns the first subgraph from the model. - const SubGraph* GetSubGraphFromModel(const Model* model); + virtual TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(const Model* model, + TfLiteTensor* tensor, + int tensor_index, + int subgraph_idx, + bool allocate_temp); private: // Commits a memory plan for all non-persistent buffer allocations in the @@ -240,8 +318,7 @@ class MicroAllocator { // ScratchBufferHandle structs that will point to allocated buffers also in // the head section. virtual TfLiteStatus CommitStaticMemoryPlan( - const Model* model, const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors, + const Model* model, SubgraphAllocations* allocations, ScratchBufferHandle* scratch_buffer_handles); // Allocates an array of ScratchBufferHandle structs in the tail section for a @@ -259,15 +336,26 @@ class MicroAllocator { internal::ScratchBufferRequest* GetScratchBufferRequests(); // A simple memory allocator that always allocate from the arena tail or head. - SimpleMemoryAllocator* memory_allocator_; + INonPersistentBufferAllocator* non_persistent_buffer_allocator_; + IPersistentBufferAllocator* persistent_buffer_allocator_; + + // Allocator used to allocate persistent builtin data. + TfLiteBridgeBuiltinDataAllocator* builtin_data_allocator_ = + nullptr; // Initialized as nullptr to prevent any possible issues related + // to accessing uninitialized memory. + + // Activation buffer memory planner. + MicroMemoryPlanner* memory_planner_; - ErrorReporter* error_reporter_; bool model_is_allocating_; // Holds the number of ScratchBufferRequest instances stored in the head // section when a model is allocating. size_t scratch_buffer_request_count_ = 0; + // Holds ScratchBufferRequest when a model is allocating + uint8_t* scratch_buffer_head_ = nullptr; + // Holds the byte length of the memory plan with the largest head usage. Used // to ensure that multi-tenant allocations can share the head for buffers. size_t max_head_buffer_usage_ = 0; diff --git a/src/tensorflow/lite/micro/micro_arena_constants.h b/src/tensorflow/lite/micro/micro_arena_constants.h new file mode 100644 index 0000000..8282817 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_arena_constants.h @@ -0,0 +1,28 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_ARENA_CONSTANTS_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_ARENA_CONSTANTS_H_ + +namespace tflite { + +// The default buffer alignment requirement. +// We align tensor buffers to 16-byte boundaries, since this is a common +// requirement for SIMD extensions. +constexpr int MicroArenaBufferAlignment() { return 16; } + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_ARENA_CONSTANTS_H_ diff --git a/src/tensorflow/lite/micro/micro_common.h b/src/tensorflow/lite/micro/micro_common.h new file mode 100644 index 0000000..9ab427f --- /dev/null +++ b/src/tensorflow/lite/micro/micro_common.h @@ -0,0 +1,38 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_COMMON_H_ +#define THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_COMMON_H_ + +#include "tensorflow/lite/c/common.h" + +// TFLMRegistration defines the API that TFLM kernels need to implement. +// This will be replacing the current TfLiteRegistration_V1 struct with +// something more compatible Embedded enviroment TFLM is used in. +struct TFLMRegistration { + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + void (*free)(TfLiteContext* context, void* buffer); + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + void (*reset)(TfLiteContext* context, void* buffer); + int32_t builtin_code; + const char* custom_name; +}; + +struct TFLMInferenceRegistration { + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + void (*reset)(TfLiteContext* context, void* buffer); +}; + +#endif // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_COMMON_H_ diff --git a/src/tensorflow/lite/micro/micro_context.cpp b/src/tensorflow/lite/micro/micro_context.cpp new file mode 100644 index 0000000..ea4fd8e --- /dev/null +++ b/src/tensorflow/lite/micro/micro_context.cpp @@ -0,0 +1,142 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/micro_context.h" + +#include +#include + +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/kernels/decompress.h" +#include "tensorflow/lite/micro/micro_common.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +int GetTensorIndex(int index, int max_size, const int* tensor_indices) { + if (index >= 0 && index < max_size) { + const int tensor_index = tensor_indices[index]; + if (tensor_index != kTfLiteOptionalTensor) { + return tensor_index; + } + } + return -1; +} + +} // namespace + +TfLiteTensor* MicroContext::AllocateTempInputTensor(const TfLiteNode* node, + int index) { + const int tensor_index = + GetTensorIndex(index, node->inputs->size, node->inputs->data); + if (tensor_index < 0) { + return nullptr; + } + return AllocateTempTfLiteTensor(tensor_index); +} + +TfLiteTensor* MicroContext::AllocateTempOutputTensor(const TfLiteNode* node, + int index) { + const int tensor_index = + GetTensorIndex(index, node->outputs->size, node->outputs->data); + if (tensor_index < 0) { + return nullptr; + } + return AllocateTempTfLiteTensor(tensor_index); +} + +TfLiteTensor* MicroContext::AllocateTempIntermediateTensor( + const TfLiteNode* node, int index) { + const int tensor_index = GetTensorIndex(index, node->intermediates->size, + node->intermediates->data); + if (tensor_index < 0) { + return nullptr; + } + return AllocateTempTfLiteTensor(tensor_index); +} + +void MicroContextReportOpError(struct TfLiteContext* context, + const char* format, ...) { + va_list args; + va_start(args, format); + VMicroPrintf(format, args); + va_end(args); +} + +#ifdef USE_TFLM_COMPRESSION + +void* MicroContext::DecompressTensorToBuffer( + const TfLiteEvalTensor& tensor, + const CompressionTensorData& compression_data, void* buffer) { + TFLITE_DCHECK(compression_data.scheme == CompressionScheme::kBinQuant); + TFLITE_DCHECK(buffer != nullptr); + size_t count = ElementCount(*tensor.dims); + size_t num_channels = 1; + + if (compression_data.data.lut_data->is_per_channel_quantized) { + const size_t channel_axis = + compression_data.data.lut_data->use_alternate_axis + ? tensor.dims->size - 1 + : 0; + num_channels = tensor.dims->data[channel_axis]; + } + + DecompressionState ds(static_cast(tensor.data.data), count, + compression_data, num_channels, GetAlternateProfiler()); + + switch (tensor.type) { + case kTfLiteBool: { + return ds.DecompressToBuffer(buffer); + } break; + case kTfLiteInt8: { + return ds.DecompressToBuffer(buffer); + } break; + case kTfLiteInt16: { + return ds.DecompressToBuffer(buffer); + } break; + case kTfLiteInt32: { + return ds.DecompressToBuffer(buffer); + } break; + case kTfLiteInt64: { + return ds.DecompressToBuffer(buffer); + } break; + case kTfLiteFloat32: { + return ds.DecompressToBuffer(buffer); + } break; + default: { + MicroPrintf("Unsupported decompression tensor type %d", tensor.type); + } break; + } + + return nullptr; +} + +TfLiteStatus MicroContext::SetDecompressionMemory( + const std::initializer_list& regions) { + return kTfLiteError; +} + +void* MicroContext::AllocateDecompressionMemory(size_t bytes, + size_t alignment) { + return nullptr; +} + +void MicroContext::ResetDecompressionMemoryAllocations() {} + +#endif // USE_TFLM_COMPRESSION + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_context.h b/src/tensorflow/lite/micro/micro_context.h new file mode 100644 index 0000000..5b1ea9c --- /dev/null +++ b/src/tensorflow/lite/micro/micro_context.h @@ -0,0 +1,215 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_CONTEXT_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_CONTEXT_H_ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/micro/micro_profiler_interface.h" + +#ifdef USE_TFLM_COMPRESSION + +#include + +#include "tensorflow/lite/micro/compression.h" + +#endif // USE_TFLM_COMPRESSION + +namespace tflite { +// TODO(b/149795762): kTfLiteAbort cannot be part of the tflite TfLiteStatus. +const TfLiteStatus kTfLiteAbort = static_cast(15); + +// MicroContext is eventually going to become the API between TFLM and the +// kernels, replacing all the functions in TfLiteContext. The end state is code +// kernels to have code like: +// +// MicroContext* micro_context = GetMicroContext(context); +// micro_context-> +class MicroContext { + public: + virtual ~MicroContext() = default; + + // Allocate persistent buffer which has the same life time as the interpreter. + // Returns nullptr on failure. + // The memory is allocated from the tail. + // This method is only available in Init or Prepare stage. + virtual void* AllocatePersistentBuffer(size_t bytes) = 0; + + // Request a scratch buffer in the arena through static memory planning. + // This method is only available in Prepare stage and the buffer is allocated + // by the interpreter between Prepare and Eval stage. In Eval stage, + // GetScratchBuffer API can be used to fetch the address. + virtual TfLiteStatus RequestScratchBufferInArena(size_t bytes, + int* buffer_idx) = 0; + + // Get the scratch buffer pointer. + // This method is only available in Eval stage. + virtual void* GetScratchBuffer(int buffer_idx) = 0; + + // Returns a temporary TfLiteTensor struct for a given index. + virtual TfLiteTensor* AllocateTempTfLiteTensor(int tensor_idx) = 0; + + // Returns a temporary TfLiteTensor struct for the specified input tensor of a + // given mode. This is the recommended API over the deprecated + // GetInput/GetInputSafe to get a temp input tensor. The returned tensor shall + // be freed via calling DeallocateTempTfLiteTensor. + TfLiteTensor* AllocateTempInputTensor(const TfLiteNode* node, int index); + + // Returns a temporary TfLiteTensor struct for the specified output tensor of + // a given mode. This is the recommended API over the deprecated + // GetOutput/GetOutputSafe to get a temp output tensor. The returned tensor + // shall be freed via calling DeallocateTempTfLiteTensor. + TfLiteTensor* AllocateTempOutputTensor(const TfLiteNode* node, int index); + + // Returns a temporary TfLiteTensor struct for the specified intermediate + // tensor of a given mode. This is the recommended API over the deprecated + // GetIntermediates/GetIntermediatesSafe to get a temp intermediate tensor. + // The returned tensor shall be freed via calling DeallocateTempTfLiteTensor. + TfLiteTensor* AllocateTempIntermediateTensor(const TfLiteNode* node, + int index); + + // Deallocates a temp TfLiteTensor. + virtual void DeallocateTempTfLiteTensor(TfLiteTensor* tensor) = 0; + + // Returns a pointer to a temporary buffer (from the arena). + // This API is only valid from the kernel's Prepare function and + // the buffer's lifetime is also that of the Prepare function. + virtual uint8_t* AllocateTempBuffer(size_t size, size_t alignment) = 0; + + // Signals that the temporary buffer is no longer needed. + virtual void DeallocateTempBuffer(uint8_t* buffer) = 0; + + // Returns a TfLiteEvalTensor struct for a given index. + virtual TfLiteEvalTensor* GetEvalTensor(int tensor_idx) = 0; + + // Does not take ownership of the pointer and the pointer must refer to valid + // an object that outlive this class instance. + // This can only be called once to set one external context. + virtual TfLiteStatus set_external_context(void* external_context_payload) = 0; + + virtual void* external_context() = 0; + + virtual MicroGraph& graph() = 0; + +#ifdef USE_TFLM_COMPRESSION + + // Available during Prepare & Eval. Returns false if tensor is not + // compressed. + virtual bool IsTensorCompressed(const TfLiteNode* node, int tensor_idx) = 0; + + // Only available during Prepare. The kernel is responsible for storing the + // scratch buffer handle. + virtual int AllocateDecompressionScratchBuffer(const TfLiteNode* node, + int tensor_idx) = 0; + + // Available during Prepare & Eval. Returns nullptr if tensor is not + // compressed. + virtual const CompressionTensorData* GetTensorCompressionData( + const TfLiteNode* node, int tensor_idx) = 0; + + // Only available during Prepare & Eval. Returns nullptr on failure, otherwise + // returns a pointer to the buffer. + virtual void* DecompressTensorToBuffer( + const TfLiteEvalTensor& tensor, + const CompressionTensorData& compression_data, void* buffer); + + // Used for configuring alternate decompression memory + struct AlternateMemoryRegion { + void* address; + size_t bytes; + }; + + // Set the alternate decompression memory regions. + // Can only be called during the MicroInterpreter kInit state. + virtual TfLiteStatus SetDecompressionMemory( + const std::initializer_list& regions); + + // Return a pointer to memory that can be used for decompression. + // The pointer will be aligned to the value. + // Return nullptr if the requested size is not available. + // Can be called during kPrepare and kInvoke states. + virtual void* AllocateDecompressionMemory(size_t bytes, size_t alignment); + + // reset all allocation tracking + virtual void ResetDecompressionMemoryAllocations(); + +#endif // USE_TFLM_COMPRESSION + + // Set the alternate MicroProfilerInterface. + // This can be used to profile subsystems simultaneously with the profiling + // of kernels during the Eval phase. See (b/379584353). + // The alternate MicroProfilerInterface is currently used by the tensor + // decompression subsystem. + virtual TfLiteStatus SetAlternateProfiler( + MicroProfilerInterface* alt_profiler) { + return kTfLiteError; + } + + // Get the alternate MicroProfilerInterface. + // This can be used to profile subsystems simultaneously with the profiling + // of kernels during the Eval phase. See (b/379584353). + // The alternate MicroProfilerInterface is currently used by the tensor + // decompression subsystem. + virtual MicroProfilerInterface* GetAlternateProfiler() const { + return nullptr; + } + + private: + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +inline MicroContext* GetMicroContext(const struct TfLiteContext* context) { + return reinterpret_cast(context->impl_); +} + +// Deprecated API. Prefer to using the MicroContext API directly from the +// kernels. +// TODO(b/213010668): migrate all existing kernels to use MicroContext, delete +// these functions, and remove corresponding members from the TfLiteContext +// struct for TFLM. +inline void* MicroContextAllocatePersistentBuffer(TfLiteContext* ctx, + size_t bytes) { + return GetMicroContext(ctx)->AllocatePersistentBuffer(bytes); +} +inline TfLiteStatus MicroContextRequestScratchBufferInArena(TfLiteContext* ctx, + size_t bytes, + int* buffer_idx) { + return GetMicroContext(ctx)->RequestScratchBufferInArena(bytes, buffer_idx); +} +inline void* MicroContextGetScratchBuffer(TfLiteContext* ctx, int buffer_idx) { + return GetMicroContext(ctx)->GetScratchBuffer(buffer_idx); +} +inline TfLiteTensor* MicroContextGetTensor(const struct TfLiteContext* context, + int tensor_idx) { + return GetMicroContext(context)->AllocateTempTfLiteTensor(tensor_idx); +} +inline TfLiteEvalTensor* MicroContextGetEvalTensor( + const struct TfLiteContext* context, int tensor_idx) { + return GetMicroContext(context)->GetEvalTensor(tensor_idx); +} +inline TfLiteExternalContext* MicroContextGetExternalContext( + TfLiteContext* context, TfLiteExternalContextType unused) { + return reinterpret_cast( + GetMicroContext(context)->external_context()); +} + +// Requests that an error be reported with format string msg. +void MicroContextReportOpError(struct TfLiteContext* context, + const char* format, ...); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_CONTEXT_H_ diff --git a/src/tensorflow/lite/micro/micro_error_reporter.cpp b/src/tensorflow/lite/micro/micro_error_reporter.cpp deleted file mode 100644 index 6d8361c..0000000 --- a/src/tensorflow/lite/micro/micro_error_reporter.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/micro_error_reporter.h" - -#include - -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#include "tensorflow/lite/micro/debug_log.h" -#include "tensorflow/lite/micro/micro_string.h" -#endif - -namespace tflite { - -int MicroErrorReporter::Report(const char* format, va_list args) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - // Only pulling in the implementation of this function for builds where we - // expect to make use of it to be extra cautious about not increasing the code - // size. - static constexpr int kMaxLogLen = 256; - char log_buffer[kMaxLogLen]; - MicroVsnprintf(log_buffer, kMaxLogLen, format, args); - DebugLog(log_buffer); - DebugLog("\r\n"); -#endif - return 0; -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_graph.h b/src/tensorflow/lite/micro/micro_graph.h new file mode 100644 index 0000000..79b3649 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_graph.h @@ -0,0 +1,62 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_GRAPH_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_GRAPH_H_ + +#include "tensorflow/lite/micro/compatibility.h" +#include "tensorflow/lite/micro/micro_common.h" +#include "tensorflow/lite/micro/micro_resource_variable.h" + +namespace tflite { + +// Abstracts the details of interacting with the graph from the kernels +// +// Provides methods to invoke any subgraph in the tflite::Graph. +class MicroGraph { + public: + virtual ~MicroGraph() = default; + + // Calls TFLMRegistration->Invoke for every operator in a single subgraph + // in the model. + virtual TfLiteStatus InvokeSubgraph(int subgraph_idx) = 0; + + // Number of tensor inputs to a specified subgraph in the model. + virtual size_t NumSubgraphInputs(int subgraph_idx) = 0; + + // Get the specified input tensor of a specified subgraph in the model. + virtual TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, + int input_idx) = 0; + + // Number of tensor outputs from a specified subgraph in the model. + virtual size_t NumSubgraphOutputs(int subgraph_idx) = 0; + + // Get the specified output tensor of a specified subgraph in the model. + virtual TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx, + int output_idx) = 0; + + // Number of subgraphs in the model. + virtual int NumSubgraphs() = 0; + + // Get the resource variables for this TFLM graph. + virtual MicroResourceVariables* GetResourceVariables() = 0; + + private: + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_GRAPH_H_ diff --git a/src/tensorflow/lite/micro/micro_interpreter.cpp b/src/tensorflow/lite/micro/micro_interpreter.cpp index deb35c3..a32baac 100644 --- a/src/tensorflow/lite/micro/micro_interpreter.cpp +++ b/src/tensorflow/lite/micro/micro_interpreter.cpp @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,312 +18,271 @@ limitations under the License. #include #include -#include "flatbuffers/flatbuffers.h" // from @flatbuffers +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +#include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/tensor_utils.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" #include "tensorflow/lite/micro/memory_helpers.h" #include "tensorflow/lite/micro/micro_allocator.h" +#include "tensorflow/lite/micro/micro_interpreter_context.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/micro/micro_profiler.h" +#include "tensorflow/lite/micro/micro_profiler_interface.h" +#include "tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h" #include "tensorflow/lite/schema/schema_generated.h" +#include "tensorflow/lite/schema/schema_utils.h" namespace tflite { namespace { - -#ifndef TF_LITE_STRIP_ERROR_STRINGS -const char* OpNameFromRegistration(const TfLiteRegistration* registration) { - if (registration->builtin_code == BuiltinOperator_CUSTOM) { - return registration->custom_name; +MemoryPlannerType FlagToMemoryPlannerType(bool preserve_all_tensors) { + if (preserve_all_tensors) { + return MemoryPlannerType::kLinear; } else { - return EnumNameBuiltinOperator(BuiltinOperator(registration->builtin_code)); + return MemoryPlannerType::kGreedy; } } -#endif // !defined(TF_LITE_STRIP_ERROR_STRINGS) - } // namespace -namespace internal { - -ContextHelper::ContextHelper(ErrorReporter* error_reporter, - MicroAllocator* allocator, const Model* model) - : allocator_(allocator), error_reporter_(error_reporter), model_(model) {} - -void* ContextHelper::AllocatePersistentBuffer(TfLiteContext* ctx, - size_t bytes) { - return reinterpret_cast(ctx->impl_) - ->allocator_->AllocatePersistentBuffer(bytes); -} - -TfLiteStatus ContextHelper::RequestScratchBufferInArena(TfLiteContext* ctx, - size_t bytes, - int* buffer_idx) { - ContextHelper* helper = reinterpret_cast(ctx->impl_); - return helper->allocator_->RequestScratchBufferInArena(bytes, buffer_idx); -} - -void* ContextHelper::GetScratchBuffer(TfLiteContext* ctx, int buffer_idx) { - ContextHelper* helper = reinterpret_cast(ctx->impl_); - ScratchBufferHandle* handle = helper->scratch_buffer_handles_ + buffer_idx; - return handle->data; -} - -void ContextHelper::ReportOpError(struct TfLiteContext* context, - const char* format, ...) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - ContextHelper* helper = static_cast(context->impl_); - va_list args; - va_start(args, format); - TF_LITE_REPORT_ERROR(helper->error_reporter_, format, args); - va_end(args); -#endif -} - -TfLiteTensor* ContextHelper::GetTensor(const struct TfLiteContext* context, - int tensor_idx) { - ContextHelper* helper = static_cast(context->impl_); - return helper->allocator_->AllocateTempTfLiteTensor( - helper->model_, helper->eval_tensors_, tensor_idx); -} - -TfLiteEvalTensor* ContextHelper::GetEvalTensor( - const struct TfLiteContext* context, int tensor_idx) { - ContextHelper* helper = reinterpret_cast(context->impl_); - return &helper->eval_tensors_[tensor_idx]; -} - -void ContextHelper::SetTfLiteEvalTensors(TfLiteEvalTensor* eval_tensors) { - eval_tensors_ = eval_tensors; -} - -void ContextHelper::SetScratchBufferHandles( - ScratchBufferHandle* scratch_buffer_handles) { - scratch_buffer_handles_ = scratch_buffer_handles; -} - -} // namespace internal - MicroInterpreter::MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, uint8_t* tensor_arena, size_t tensor_arena_size, - ErrorReporter* error_reporter, - tflite::Profiler* profiler) + MicroResourceVariables* resource_variables, + MicroProfilerInterface* profiler, + bool preserve_all_tensors) : model_(model), op_resolver_(op_resolver), - error_reporter_(error_reporter), - allocator_(*MicroAllocator::Create(tensor_arena, tensor_arena_size, - error_reporter)), + allocator_(*MicroAllocator::Create( + tensor_arena, tensor_arena_size, + FlagToMemoryPlannerType(preserve_all_tensors))), + graph_(&context_, model, &allocator_, resource_variables), tensors_allocated_(false), initialization_status_(kTfLiteError), - eval_tensors_(nullptr), - context_helper_(error_reporter_, &allocator_, model), - input_tensor_(nullptr), - output_tensor_(nullptr) { + input_tensors_(nullptr), + output_tensors_(nullptr), + micro_context_(&allocator_, model_, &graph_) { Init(profiler); } MicroInterpreter::MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, MicroAllocator* allocator, - ErrorReporter* error_reporter, - tflite::Profiler* profiler) + MicroResourceVariables* resource_variables, + MicroProfilerInterface* profiler) : model_(model), op_resolver_(op_resolver), - error_reporter_(error_reporter), allocator_(*allocator), + graph_(&context_, model, allocator, resource_variables), tensors_allocated_(false), initialization_status_(kTfLiteError), - eval_tensors_(nullptr), - context_helper_(error_reporter_, &allocator_, model), - input_tensor_(nullptr), - output_tensor_(nullptr) { + input_tensors_(nullptr), + output_tensors_(nullptr), + micro_context_(&allocator_, model_, &graph_) { Init(profiler); } MicroInterpreter::~MicroInterpreter() { - if (node_and_registrations_ != nullptr) { - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - TfLiteNode* node = &(node_and_registrations_[i].node); - const TfLiteRegistration* registration = - node_and_registrations_[i].registration; - // registration is allocated outside the interpreter, so double check to - // make sure it's not nullptr; - if (registration != nullptr && registration->free != nullptr) { - registration->free(&context_, node->user_data); - } - } + if (graph_.GetAllocations() != nullptr) { + graph_.FreeSubgraphs(); } } -void MicroInterpreter::Init(tflite::Profiler* profiler) { - const flatbuffers::Vector>* subgraphs = - model_->subgraphs(); - if (subgraphs->size() != 1) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Only 1 subgraph is currently supported.\n"); - initialization_status_ = kTfLiteError; - return; - } - subgraph_ = (*subgraphs)[0]; - - context_.impl_ = static_cast(&context_helper_); - context_.ReportError = context_helper_.ReportOpError; - context_.GetTensor = context_helper_.GetTensor; - context_.GetEvalTensor = context_helper_.GetEvalTensor; - context_.recommended_num_threads = 1; +void MicroInterpreter::Init(MicroProfilerInterface* profiler) { + micro_context_.SetInterpreterState( + MicroInterpreterContext::InterpreterState::kInit); + context_.impl_ = static_cast(µ_context_); + context_.ReportError = MicroContextReportOpError; + context_.GetTensor = MicroContextGetTensor; + context_.GetEvalTensor = MicroContextGetEvalTensor; context_.profiler = profiler; + context_.RequestScratchBufferInArena = + MicroContextRequestScratchBufferInArena; + context_.GetExternalContext = MicroContextGetExternalContext; + context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; + context_.GetScratchBuffer = MicroContextGetScratchBuffer; initialization_status_ = kTfLiteOk; } -void MicroInterpreter::CorrectTensorEndianness(TfLiteEvalTensor* tensorCorr) { - int32_t tensorSize = 1; - for (int d = 0; d < tensorCorr->dims->size; ++d) - tensorSize *= reinterpret_cast(tensorCorr->dims->data)[d]; - - switch (tensorCorr->type) { - case TfLiteType::kTfLiteFloat32: - CorrectTensorDataEndianness(tensorCorr->data.f, tensorSize); - break; - case TfLiteType::kTfLiteFloat16: - CorrectTensorDataEndianness(tensorCorr->data.f16, tensorSize); - break; - case TfLiteType::kTfLiteInt64: - CorrectTensorDataEndianness(tensorCorr->data.i64, tensorSize); - break; - case TfLiteType::kTfLiteUInt64: - CorrectTensorDataEndianness(tensorCorr->data.u64, tensorSize); - break; - case TfLiteType::kTfLiteInt32: - CorrectTensorDataEndianness(tensorCorr->data.i32, tensorSize); - break; - case TfLiteType::kTfLiteInt16: - CorrectTensorDataEndianness(tensorCorr->data.i16, tensorSize); - break; - case TfLiteType::kTfLiteComplex64: - CorrectTensorDataEndianness(tensorCorr->data.c64, tensorSize); - break; - case TfLiteType::kTfLiteComplex128: - CorrectTensorDataEndianness(tensorCorr->data.c128, tensorSize); - break; - default: - // Do nothing for other data types. - break; - } -} +TfLiteStatus MicroInterpreter::PrepareNodeAndRegistrationDataFromFlatbuffer() { + for (int subgraph_idx = 0; subgraph_idx < graph_.NumSubgraphs(); + subgraph_idx++) { + const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); + TFLITE_DCHECK(subgraph != nullptr); + + auto* opcodes = model_->operator_codes(); + TfLiteBridgeBuiltinDataAllocator* builtin_data_allocator = + allocator_.GetBuiltinDataAllocator(); + uint32_t operators_size = NumSubgraphOperators(subgraph); + for (size_t i = 0; i < operators_size; ++i) { + const auto* op = subgraph->operators()->Get(i); + const size_t index = op->opcode_index(); + if (index >= opcodes->size()) { + MicroPrintf("Missing registration for opcode_index %d\n", index); + return kTfLiteError; + } + const auto* opcode = opcodes->Get(index); + TfLiteStatus status = + GetRegistrationFromOpCode(opcode, op_resolver_, + &(graph_.GetAllocations()[subgraph_idx] + .node_and_registrations[i] + .registration)); + if (status != kTfLiteOk) { + MicroPrintf("Failed to get registration from op code %s\n ", + EnumNameBuiltinOperator(GetBuiltinCode(opcode))); + return status; + } + const auto* registration = graph_.GetAllocations()[subgraph_idx] + .node_and_registrations[i] + .registration; + if (registration == nullptr) { + MicroPrintf("Skipping op for opcode_index %d\n", index); + return kTfLiteError; + } + BuiltinOperator op_type = + static_cast(registration->builtin_code); + + const char* custom_data = nullptr; + size_t custom_data_size = 0; + unsigned char* builtin_data = nullptr; + + if (op_type == BuiltinOperator_CUSTOM) { + // Custom Ops may or may not have a non-null custom_options field. + if (op->custom_options() != nullptr) { + custom_data = + reinterpret_cast(op->custom_options()->data()); + custom_data_size = op->custom_options()->size(); + } + } else { + if (op->custom_options() != nullptr) { + MicroPrintf( + "Unsupported behavior: found builtin operator %s with custom " + "options.\n", + EnumNameBuiltinOperator(op_type)); + return kTfLiteError; + } + + TfLiteBridgeBuiltinParseFunction parser = + op_resolver_.GetOpDataParser(op_type); + if (parser == nullptr) { + MicroPrintf("Did not find a parser for %s", + EnumNameBuiltinOperator(op_type)); + + return kTfLiteError; + } + TF_LITE_ENSURE_STATUS(CallBuiltinParseFunction( + parser, op, builtin_data_allocator, (void**)(&builtin_data))); + } -template -void MicroInterpreter::CorrectTensorDataEndianness(T* data, int32_t size) { - for (int32_t i = 0; i < size; ++i) { - data[i] = flatbuffers::EndianScalar(data[i]); + TfLiteIntArray* inputs_array = + FlatBufferVectorToTfLiteTypeArray(op->inputs()); + TfLiteIntArray* outputs_array = + FlatBufferVectorToTfLiteTypeArray(op->outputs()); + + TfLiteNode* node = &( + graph_.GetAllocations()[subgraph_idx].node_and_registrations[i].node); + *node = {}; + node->inputs = inputs_array; + node->outputs = outputs_array; + node->builtin_data = reinterpret_cast(builtin_data); + node->custom_initial_data = custom_data; + node->custom_initial_data_size = custom_data_size; + + if (op->intermediates() && (op->intermediates()->size() > 0)) { + node->intermediates = + FlatBufferVectorToTfLiteTypeArray(op->intermediates()); + } + } } + return kTfLiteOk; } TfLiteStatus MicroInterpreter::AllocateTensors() { - if (allocator_.StartModelAllocation(model_, op_resolver_, - &node_and_registrations_, - &eval_tensors_) != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed starting model allocation.\n"); + SubgraphAllocations* allocations = allocator_.StartModelAllocation(model_); + + if (allocations == nullptr) { + MicroPrintf("Failed starting model allocation.\n"); initialization_status_ = kTfLiteError; return kTfLiteError; } - // Update the pointer now that TfLiteEvalTensor allocation has completed on - // the context helper. - // TODO(b/16157777): This call would not be needed if ContextHelper rolled - // into the interpreter. - context_helper_.SetTfLiteEvalTensors(eval_tensors_); - context_.tensors_size = subgraph_->tensors()->size(); - - // If the system is big endian then convert weights from the flatbuffer from - // little to big endian on startup so that it does not need to be done during - // inference. - // NOTE: This requires that the flatbuffer is held in memory which can be - // modified by this process. - if (!FLATBUFFERS_LITTLEENDIAN) { - for (size_t t = 0; t < subgraph_->tensors()->size(); ++t) { - if (auto* buffer = - (*model_->buffers())[subgraph_->tensors()->Get(t)->buffer()]) { - // If we've found a buffer, does it have any data? - if (auto* array = buffer->data()) { - // If it has any data, is the data size larger than zero? - if (array->size()) { - // Update the endianness of the corresponding eval tensor since that - // struct holds the buffer used at inference time. - CorrectTensorEndianness(&eval_tensors_[t]); - } - } - } - } - } + graph_.SetSubgraphAllocations(allocations); - // Only allow AllocatePersistentBuffer in Init stage. - context_.AllocatePersistentBuffer = context_helper_.AllocatePersistentBuffer; - context_.RequestScratchBufferInArena = nullptr; - context_.GetScratchBuffer = nullptr; - - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - size_t init_data_size; - const char* init_data; - if (registration->builtin_code == BuiltinOperator_CUSTOM) { - init_data = reinterpret_cast(node->custom_initial_data); - init_data_size = node->custom_initial_data_size; - } else { - init_data = reinterpret_cast(node->builtin_data); - init_data_size = 0; - } - if (registration->init) { - node->user_data = - registration->init(&context_, init_data, init_data_size); - } + TF_LITE_ENSURE_STATUS(PrepareNodeAndRegistrationDataFromFlatbuffer()); + + micro_context_.SetInterpreterState( + MicroInterpreterContext::InterpreterState::kInit); + TF_LITE_ENSURE_STATUS(graph_.InitSubgraphs()); + + micro_context_.SetInterpreterState( + MicroInterpreterContext::InterpreterState::kPrepare); + + TF_LITE_ENSURE_STATUS(graph_.PrepareSubgraphs()); + + micro_context_.SetInterpreterState( + MicroInterpreterContext::InterpreterState::kMemoryPlanning); + + TF_LITE_ENSURE_OK(&context_, allocator_.FinishModelAllocation( + model_, graph_.GetAllocations(), + &scratch_buffer_handles_)); + + micro_context_.SetScratchBufferHandles(scratch_buffer_handles_); + + // TODO(b/162311891): Drop these allocations when the interpreter supports + // handling buffers from TfLiteEvalTensor. + input_tensors_ = + reinterpret_cast(allocator_.AllocatePersistentBuffer( + sizeof(TfLiteTensor*) * inputs_size())); + if (input_tensors_ == nullptr) { + MicroPrintf( + "Failed to allocate memory for context->input_tensors_, " + "%d bytes required", + sizeof(TfLiteTensor*) * inputs_size()); + return kTfLiteError; } - // Both AllocatePersistentBuffer and RequestScratchBufferInArena is - // available in Prepare stage. - context_.RequestScratchBufferInArena = - context_helper_.RequestScratchBufferInArena; - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - if (registration->prepare) { - TfLiteStatus prepare_status = registration->prepare(&context_, node); - if (prepare_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Node %s (number %df) failed to prepare with status %d", - OpNameFromRegistration(registration), i, prepare_status); - return kTfLiteError; - } + for (size_t i = 0; i < inputs_size(); ++i) { + input_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( + model_, graph_.GetAllocations(), inputs().Get(i), 0); + if (input_tensors_[i] == nullptr) { + MicroPrintf("Failed to initialize input tensor %d", i); + return kTfLiteError; } - allocator_.FinishPrepareNodeAllocations(/*node_id=*/i); } - // Prepare is done, we're ready for Invoke. Memory allocation is no longer - // allowed. Kernels can only fetch scratch buffers via GetScratchBuffer. - context_.AllocatePersistentBuffer = nullptr; - context_.RequestScratchBufferInArena = nullptr; - context_.GetScratchBuffer = context_helper_.GetScratchBuffer; + // TODO(b/162311891): Drop these allocations when the interpreter supports + // handling buffers from TfLiteEvalTensor. + output_tensors_ = + reinterpret_cast(allocator_.AllocatePersistentBuffer( + sizeof(TfLiteTensor*) * outputs_size())); + if (output_tensors_ == nullptr) { + MicroPrintf( + "Failed to allocate memory for context->output_tensors_, " + "%d bytes required", + sizeof(TfLiteTensor*) * outputs_size()); + return kTfLiteError; + } - TF_LITE_ENSURE_OK(&context_, - allocator_.FinishModelAllocation(model_, eval_tensors_, - &scratch_buffer_handles_)); - // TODO(b/16157777): Remove this when ContextHelper is rolled into this class. - context_helper_.SetScratchBufferHandles(scratch_buffer_handles_); + for (size_t i = 0; i < outputs_size(); ++i) { + output_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( + model_, graph_.GetAllocations(), outputs().Get(i), 0); + if (output_tensors_[i] == nullptr) { + MicroPrintf("Failed to initialize output tensor %d", i); + return kTfLiteError; + } + } - TF_LITE_ENSURE_STATUS(ResetVariableTensors()); + TF_LITE_ENSURE_STATUS(Reset()); tensors_allocated_ = true; + micro_context_.SetInterpreterState( + MicroInterpreterContext::InterpreterState::kInvoke); return kTfLiteOk; } TfLiteStatus MicroInterpreter::Invoke() { if (initialization_status_ != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Invoke() called after initialization failed\n"); + MicroPrintf("Invoke() called after initialization failed\n"); return kTfLiteError; } @@ -332,122 +291,62 @@ TfLiteStatus MicroInterpreter::Invoke() { if (!tensors_allocated_) { TF_LITE_ENSURE_OK(&context_, AllocateTensors()); } - - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - - if (registration->invoke) { - TfLiteStatus invoke_status; -#ifndef NDEBUG // Omit profiler overhead from release builds. - // The case where profiler == nullptr is handled by - // ScopedOperatorProfile. - tflite::Profiler* profiler = - reinterpret_cast(context_.profiler); - ScopedOperatorProfile scoped_profiler( - profiler, OpNameFromRegistration(registration), i); -#endif - invoke_status = registration->invoke(&context_, node); - - // All TfLiteTensor structs used in the kernel are allocated from temp - // memory in the allocator. This creates a chain of allocations in the - // temp section. The call below resets the chain of allocations to - // prepare for the next call. - allocator_.ResetTempAllocations(); - - if (invoke_status == kTfLiteError) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Node %s (number %d) failed to invoke with status %d", - OpNameFromRegistration(registration), i, invoke_status); - return kTfLiteError; - } else if (invoke_status != kTfLiteOk) { - return invoke_status; - } - } - } - return kTfLiteOk; + return graph_.InvokeSubgraph(0); } TfLiteTensor* MicroInterpreter::input(size_t index) { const size_t length = inputs_size(); if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Input index %d out of range (length is %d)", index, - length); + MicroPrintf("Input index %d out of range (length is %d)", index, length); return nullptr; } - if (index != 0) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Input tensors not at index 0 are allocated from the " - "persistent memory arena. Repeat calls will cause excess " - "allocation!"); - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - inputs().Get(index)); - } - if (input_tensor_ == nullptr) { - input_tensor_ = allocator_.AllocatePersistentTfLiteTensor( - model_, eval_tensors_, inputs().Get(index)); - } - return input_tensor_; + return input_tensors_[index]; } TfLiteTensor* MicroInterpreter::output(size_t index) { const size_t length = outputs_size(); if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Output index %d out of range (length is %d)", index, - length); + MicroPrintf("Output index %d out of range (length is %d)", index, length); return nullptr; } - if (index != 0) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Output tensors not at index 0 are allocated from the " - "persistent memory arena. Repeat calls will cause excess " - "allocation!"); - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - outputs().Get(index)); - } - if (output_tensor_ == nullptr) { - // TODO(b/162311891): Drop these allocations when the interpreter supports - // handling buffers from TfLiteEvalTensor. - output_tensor_ = allocator_.AllocatePersistentTfLiteTensor( - model_, eval_tensors_, outputs().Get(index)); + return output_tensors_[index]; +} + +TfLiteStatus MicroInterpreter::Reset() { + TfLiteStatus status = graph_.ResetSubgraphs(); + if (status != kTfLiteOk) { + return status; } - return output_tensor_; + return graph_.ResetVariableTensors(); } -TfLiteTensor* MicroInterpreter::tensor(size_t index) { - const size_t length = tensors_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Tensor index %d out of range (length is %d)", index, - length); +TfLiteEvalTensor* MicroInterpreter::GetTensor(int tensor_index, + int subgraph_index) { + if (!allocator_.preserves_all_tensor()) { + MicroPrintf("GetTensor requires all tensors to be preserved"); return nullptr; } - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - index); + return &graph_.GetAllocations()[subgraph_index].tensors[tensor_index]; } -TfLiteStatus MicroInterpreter::ResetVariableTensors() { - for (size_t i = 0; i < subgraph_->tensors()->size(); ++i) { - auto* tensor = subgraph_->tensors()->Get(i); - if (tensor->is_variable()) { - size_t buffer_size; - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors_[i], &buffer_size)); - - int value = 0; - if (tensor->type() == tflite::TensorType_INT8) { - value = tensor->quantization()->zero_point()->Get(0); - } - memset(eval_tensors_[i].data.raw, value, buffer_size); - } - } +TfLiteStatus MicroInterpreter::SetMicroExternalContext( + void* external_context_payload) { + return micro_context_.set_external_context(external_context_payload); +} - return kTfLiteOk; +TfLiteStatus MicroInterpreter::SetAlternateProfiler( + MicroProfilerInterface* alt_profiler) { + return micro_context_.SetAlternateProfiler(alt_profiler); } +#ifdef USE_TFLM_COMPRESSION + +TfLiteStatus MicroInterpreter::SetDecompressionMemory( + const std::initializer_list& + regions) { + return micro_context_.SetDecompressionMemory(regions); +} + +#endif // USE_TFLM_COMPRESSION + } // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_interpreter.h b/src/tensorflow/lite/micro/micro_interpreter.h index 31720c8..55b7b2c 100644 --- a/src/tensorflow/lite/micro/micro_interpreter.h +++ b/src/tensorflow/lite/micro/micro_interpreter.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,71 +18,46 @@ limitations under the License. #include #include -#include "flatbuffers/flatbuffers.h" // from @flatbuffers +#ifdef USE_TFLM_COMPRESSION + +#include + +#endif // USE_TFLM_COMPRESSION + +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +#include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/profiler.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/micro/micro_allocator.h" +#include "tensorflow/lite/micro/micro_interpreter_context.h" +#include "tensorflow/lite/micro/micro_interpreter_graph.h" #include "tensorflow/lite/micro/micro_op_resolver.h" +#include "tensorflow/lite/micro/micro_profiler_interface.h" #include "tensorflow/lite/portable_type_to_tflitetype.h" #include "tensorflow/lite/schema/schema_generated.h" -namespace tflite { - -namespace internal { - -// A helper class to encapsulate the implementation of APIs in Context. -// context->impl_ points to an instance of this class. -// Check tensorflow/lite/c/common.h for detailed descriptions. -// TODO(b/16157777): Consider rolling this class into MicroInterpreter. -class ContextHelper { - public: - explicit ContextHelper(ErrorReporter* error_reporter, - MicroAllocator* allocator, const Model* model); - - // Functions that will be assigned to function pointers on TfLiteContext: - static void* AllocatePersistentBuffer(TfLiteContext* ctx, size_t bytes); - static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* ctx, - size_t bytes, - int* buffer_idx); - static void* GetScratchBuffer(TfLiteContext* ctx, int buffer_idx); - static void ReportOpError(struct TfLiteContext* context, const char* format, - ...); - static TfLiteTensor* GetTensor(const struct TfLiteContext* context, - int tensor_idx); - static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, - int tensor_idx); - - // Sets the pointer to a list of TfLiteEvalTensor instances. - void SetTfLiteEvalTensors(TfLiteEvalTensor* eval_tensors); - - // Sets the pointer to a list of ScratchBufferHandle instances. - void SetScratchBufferHandles(ScratchBufferHandle* scratch_buffer_handles); - - private: - MicroAllocator* allocator_ = nullptr; - ErrorReporter* error_reporter_ = nullptr; - const Model* model_ = nullptr; - TfLiteEvalTensor* eval_tensors_ = nullptr; - ScratchBufferHandle* scratch_buffer_handles_ = nullptr; -}; +/// Copied from tensorflow/lite/version.h to avoid a dependency chain into +// tensorflow/core. +#define TFLITE_SCHEMA_VERSION (3) -} // namespace internal +namespace tflite { class MicroInterpreter { public: - // The lifetime of the model, op resolver, tensor arena, error reporter and - // profiler must be at least as long as that of the interpreter object, since - // the interpreter may need to access them at any time. This means that you - // should usually create them with the same scope as each other, for example - // having them all allocated on the stack as local variables through a - // top-level function. The interpreter doesn't do any deallocation of any of - // the pointed-to objects, ownership remains with the caller. + // The lifetime of the model, op resolver, tensor arena, error reporter, + // resource variables, and profiler must be at least as long as that of the + // interpreter object, since the interpreter may need to access them at any + // time. This means that you should usually create them with the same scope as + // each other, for example having them all allocated on the stack as local + // variables through a top-level function. The interpreter doesn't do any + // deallocation of any of the pointed-to objects, ownership remains with the + // caller. MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, uint8_t* tensor_arena, size_t tensor_arena_size, - ErrorReporter* error_reporter, - tflite::Profiler* profiler = nullptr); + MicroResourceVariables* resource_variables = nullptr, + MicroProfilerInterface* profiler = nullptr, + bool preserve_all_tensors = false); // Create an interpreter instance using an existing MicroAllocator instance. // This constructor should be used when creating an allocator that needs to @@ -90,8 +65,9 @@ class MicroInterpreter { // allocations inside the interpreter. The lifetime of the allocator must be // as long as that of the interpreter object. MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, - MicroAllocator* allocator, ErrorReporter* error_reporter, - tflite::Profiler* profiler = nullptr); + MicroAllocator* allocator, + MicroResourceVariables* resource_variables = nullptr, + MicroProfilerInterface* profiler = nullptr); ~MicroInterpreter(); @@ -104,22 +80,18 @@ class MicroInterpreter { // TODO(b/149795762): Add this to the TfLiteStatus enum. TfLiteStatus Invoke(); - size_t tensors_size() const { return context_.tensors_size; } - TfLiteTensor* tensor(size_t tensor_index); - template - T* typed_tensor(int tensor_index) { - if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) { - if (tensor_ptr->type == typeToTfLiteType()) { - return GetTensorData(tensor_ptr); - } - } - return nullptr; - } + // This is the recommended API for an application to pass an external payload + // pointer as an external context to kernels. The life time of the payload + // pointer should be at least as long as this interpreter. TFLM supports only + // one external context. + TfLiteStatus SetMicroExternalContext(void* external_context_payload); TfLiteTensor* input(size_t index); - size_t inputs_size() const { return subgraph_->inputs()->Length(); } + size_t inputs_size() const { + return model_->subgraphs()->Get(0)->inputs()->size(); + } const flatbuffers::Vector& inputs() const { - return *subgraph_->inputs(); + return *model_->subgraphs()->Get(0)->inputs(); } TfLiteTensor* input_tensor(size_t index) { return input(index); } template @@ -133,9 +105,11 @@ class MicroInterpreter { } TfLiteTensor* output(size_t index); - size_t outputs_size() const { return subgraph_->outputs()->Length(); } + size_t outputs_size() const { + return model_->subgraphs()->Get(0)->outputs()->size(); + } const flatbuffers::Vector& outputs() const { - return *subgraph_->outputs(); + return *model_->subgraphs()->Get(0)->outputs(); } TfLiteTensor* output_tensor(size_t index) { return output(index); } template @@ -148,17 +122,20 @@ class MicroInterpreter { return nullptr; } - // Reset all variable tensors to the default value. - TfLiteStatus ResetVariableTensors(); + // Returns a pointer to the tensor for the corresponding tensor_index + TfLiteEvalTensor* GetTensor(int tensor_index, int subgraph_index = 0); + + // Reset the state to be what you would expect when the interpreter is first + // created. i.e. after Init and Prepare is called for the very first time. + TfLiteStatus Reset(); TfLiteStatus initialization_status() const { return initialization_status_; } - size_t operators_size() const { return subgraph_->operators()->size(); } - - // For debugging only. - const NodeAndRegistration node_and_registration(int node_index) const { - return node_and_registrations_[node_index]; - } + // Populates node and registration pointers representing the inference graph + // of the model from values inside the flatbuffer (loaded from the TfLiteModel + // instance). Persistent data (e.g. operator data) is allocated from the + // arena. + TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer(); // For debugging only. // Returns the actual used arena in bytes. This method gives the optimal arena @@ -168,6 +145,32 @@ class MicroInterpreter { // arena_used_bytes() + 16. size_t arena_used_bytes() const { return allocator_.used_bytes(); } + // Returns True if all Tensors are being preserves + // TODO(b/297106074) : revisit making C++ example or test for + // preserve_all_tesnors + bool preserve_all_tensors() const { + return allocator_.preserves_all_tensor(); + } + + // Set the alternate MicroProfilerInterface. + // This value is passed through to the MicroContext. + // This can be used to profile subsystems simultaneously with the profiling + // of kernels during the Eval phase. See (b/379584353). + // The alternate MicroProfilerInterface is currently used by the tensor + // decompression subsystem. + TfLiteStatus SetAlternateProfiler(MicroProfilerInterface* alt_profiler); + +#ifdef USE_TFLM_COMPRESSION + + // Set the alternate decompression memory regions. + // Can only be called during the MicroInterpreter kInit state (i.e. must + // be called before MicroInterpreter::AllocateTensors). + TfLiteStatus SetDecompressionMemory( + const std::initializer_list& + regions); + +#endif // USE_TFLM_COMPRESSION + protected: const MicroAllocator& allocator() const { return allocator_; } const TfLiteContext& context() const { return context_; } @@ -175,35 +178,28 @@ class MicroInterpreter { private: // TODO(b/158263161): Consider switching to Create() function to enable better // error reporting during initialization. - void Init(tflite::Profiler* profiler); + void Init(MicroProfilerInterface* profiler); - void CorrectTensorEndianness(TfLiteEvalTensor* tensorCorr); - - template - void CorrectTensorDataEndianness(T* data, int32_t size); - - NodeAndRegistration* node_and_registrations_ = nullptr; + // Gets the current subgraph index used from within context methods. + int get_subgraph_index() { return graph_.GetCurrentSubgraphIndex(); } const Model* model_; const MicroOpResolver& op_resolver_; - ErrorReporter* error_reporter_; TfLiteContext context_ = {}; MicroAllocator& allocator_; + MicroInterpreterGraph graph_; bool tensors_allocated_; TfLiteStatus initialization_status_; - const SubGraph* subgraph_ = nullptr; - TfLiteEvalTensor* eval_tensors_ = nullptr; ScratchBufferHandle* scratch_buffer_handles_ = nullptr; - // TODO(b/16157777): Drop this reference: - internal::ContextHelper context_helper_; - // TODO(b/162311891): Clean these pointers up when this class supports buffers // from TfLiteEvalTensor. - TfLiteTensor* input_tensor_; - TfLiteTensor* output_tensor_; + TfLiteTensor** input_tensors_; + TfLiteTensor** output_tensors_; + + MicroInterpreterContext micro_context_; }; } // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_interpreter_context.cpp b/src/tensorflow/lite/micro/micro_interpreter_context.cpp new file mode 100644 index 0000000..38c6225 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_interpreter_context.cpp @@ -0,0 +1,280 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/micro_interpreter_context.h" + +#include + +#ifdef USE_TFLM_COMPRESSION + +#include + +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_arena_constants.h" + +#endif // USE_TFLM_COMPRESSION + +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace { + +#ifdef USE_TFLM_COMPRESSION + +int GetInputTensorIndex(const TfLiteNode* node, const int index) { + if (index >= 0 && index < node->inputs->size) { + const int tensor_index = node->inputs->data[index]; + if (tensor_index != kTfLiteOptionalTensor) { + return tensor_index; + } + } + return -1; +} + +#endif // USE_TFLM_COMPRESSION + +} // namespace + +MicroInterpreterContext::MicroInterpreterContext(MicroAllocator* allocator, + const Model* model, + MicroInterpreterGraph* graph) + : allocator_(*allocator), + graph_(*graph), + model_(model), + state_(InterpreterState::kInit) {} + +MicroInterpreterContext::~MicroInterpreterContext() {} + +void* MicroInterpreterContext::AllocatePersistentBuffer(size_t bytes) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare || + state_ == InterpreterState::kInit); + return allocator_.AllocatePersistentBuffer(bytes); +} + +TfLiteStatus MicroInterpreterContext::RequestScratchBufferInArena( + size_t bytes, int* buffer_idx) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare); + return allocator_.RequestScratchBufferInArena( + bytes, graph_.GetCurrentSubgraphIndex(), buffer_idx); +} + +void* MicroInterpreterContext::GetScratchBuffer(int buffer_idx) { + TFLITE_DCHECK(state_ == InterpreterState::kInvoke); + ScratchBufferHandle* handle = scratch_buffer_handles_ + buffer_idx; + return handle->data; +} + +TfLiteTensor* MicroInterpreterContext::AllocateTempTfLiteTensor( + int tensor_idx) { + return allocator_.AllocateTempTfLiteTensor(model_, graph_.GetAllocations(), + tensor_idx, + graph_.GetCurrentSubgraphIndex()); +} + +void MicroInterpreterContext::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) { + return allocator_.DeallocateTempTfLiteTensor(tensor); +} + +uint8_t* MicroInterpreterContext::AllocateTempBuffer(size_t size, + size_t alignment) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare); + return allocator_.AllocateTempBuffer(size, alignment); +} + +void MicroInterpreterContext::DeallocateTempBuffer(uint8_t* buffer) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare); + allocator_.DeallocateTempBuffer(buffer); +} + +TfLiteEvalTensor* MicroInterpreterContext::GetEvalTensor(int tensor_idx) { + return &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()] + .tensors[tensor_idx]; +} + +void MicroInterpreterContext::SetScratchBufferHandles( + ScratchBufferHandle* scratch_buffer_handles) { + scratch_buffer_handles_ = scratch_buffer_handles; +} + +TfLiteStatus MicroInterpreterContext::set_external_context( + void* external_context_payload) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare || + state_ == InterpreterState::kInvoke); + if (external_context_payload == nullptr || + external_context_payload_ != nullptr) { + MicroPrintf( + "Attempting to set external context to %x but it was %x already", + external_context_payload, external_context_payload_); + return kTfLiteError; + } + + external_context_payload_ = external_context_payload; + return kTfLiteOk; +} + +void MicroInterpreterContext::SetInterpreterState(InterpreterState state) { + state_ = state; +} + +MicroInterpreterContext::InterpreterState +MicroInterpreterContext::GetInterpreterState() const { + return state_; +} + +#ifdef USE_TFLM_COMPRESSION + +// Available during Prepare & Eval. Returns false if tensor is not +// compressed. +bool MicroInterpreterContext::IsTensorCompressed(const TfLiteNode* node, + int tensor_idx) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare || + state_ == InterpreterState::kInvoke); + + const SubgraphAllocations* allocations = + &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()]; + if (allocations->compressed.tensors == nullptr) { + return false; + } + int index = GetInputTensorIndex(node, tensor_idx); + if (index == -1) { + return false; + } + return allocations->compressed.tensors[index] != nullptr; +} + +// Only available during Prepare. The kernel is responsible for storing the +// scratch buffer handle. +int MicroInterpreterContext::AllocateDecompressionScratchBuffer( + const TfLiteNode* node, int tensor_idx) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare); + + const SubgraphAllocations* allocations = + &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()]; + if (allocations->compressed.tensors == nullptr) { + return -1; + } + int index = GetInputTensorIndex(node, tensor_idx); + if (index == -1 || allocations->compressed.tensors[index] == nullptr) { + return -1; + } + const TfLiteEvalTensor* tensor = &allocations->tensors[index]; + const size_t byte_count = EvalTensorBytes(tensor); + + if (AllocateDecompressionMemory(byte_count, MicroArenaBufferAlignment()) != + nullptr) { + // Tensor fits in alternate decompression memory, no need to allocate + // scratch buffer. + return -1; + } + + int scratch_index = -1; + TfLiteStatus result = RequestScratchBufferInArena(byte_count, &scratch_index); + TFLITE_DCHECK(scratch_index != -1 && result == kTfLiteOk); + + return scratch_index; +} + +// Available during Prepare & Eval. Returns nullptr if tensor is not +// compressed. +const CompressionTensorData* MicroInterpreterContext::GetTensorCompressionData( + const TfLiteNode* node, int tensor_idx) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare || + state_ == InterpreterState::kInvoke); + + const SubgraphAllocations* allocations = + &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()]; + if (allocations->compressed.tensors == nullptr) { + return nullptr; + } + int index = GetInputTensorIndex(node, tensor_idx); + if (index == -1) { + return nullptr; + } + return allocations->compressed.tensors[index]; +} + +// Only available during Prepare & Eval. Returns nullptr on failure, otherwise +// returns a pointer to the buffer. +void* MicroInterpreterContext::DecompressTensorToBuffer( + const TfLiteEvalTensor& tensor, + const CompressionTensorData& compression_data, void* buffer) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare || + state_ == InterpreterState::kInvoke); + + return MicroContext::DecompressTensorToBuffer(tensor, compression_data, + buffer); +} + +TfLiteStatus MicroInterpreterContext::SetDecompressionMemory( + const std::initializer_list& regions) { + if (state_ != InterpreterState::kInit) { + return kTfLiteError; + } + + decompress_regions_ = ®ions; + decompress_regions_allocations_ = static_cast( + AllocatePersistentBuffer(sizeof(size_t) * regions.size())); + if (decompress_regions_allocations_ == nullptr) { + return kTfLiteError; + } + ResetDecompressionMemoryAllocations(); + + return kTfLiteOk; +} + +void* MicroInterpreterContext::AllocateDecompressionMemory(size_t bytes, + size_t alignment) { + TFLITE_DCHECK(state_ == InterpreterState::kPrepare || + state_ == InterpreterState::kInvoke); + if (decompress_regions_ != nullptr) { + for (size_t i = 0; i < decompress_regions_->size(); i++) { + const AlternateMemoryRegion* region = &decompress_regions_->begin()[i]; + uint8_t* start = static_cast(region->address) + + decompress_regions_allocations_[i]; + uint8_t* aligned_start = AlignPointerUp(start, alignment); + size_t total = bytes + (aligned_start - start); + if (total + decompress_regions_allocations_[i] <= region->bytes) { + decompress_regions_allocations_[i] += total; + return aligned_start; + } + } + } + + return nullptr; +} + +void MicroInterpreterContext::ResetDecompressionMemoryAllocations() { + if (decompress_regions_ == nullptr) { + return; + } + TFLITE_DCHECK(decompress_regions_allocations_ != nullptr); + std::fill_n(decompress_regions_allocations_, decompress_regions_->size(), 0); +} + +#endif // USE_TFLM_COMPRESSION + +TfLiteStatus MicroInterpreterContext::SetAlternateProfiler( + tflite::MicroProfilerInterface* alt_profiler) { + alt_profiler_ = alt_profiler; + return kTfLiteOk; +} + +MicroProfilerInterface* MicroInterpreterContext::GetAlternateProfiler() const { + return alt_profiler_; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_interpreter_context.h b/src/tensorflow/lite/micro/micro_interpreter_context.h new file mode 100644 index 0000000..a392758 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_interpreter_context.h @@ -0,0 +1,186 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_CONTEXT_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_CONTEXT_H_ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_allocator.h" +#include "tensorflow/lite/micro/micro_context.h" +#include "tensorflow/lite/micro/micro_interpreter_graph.h" +#include "tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +// A full implementation of the MicroContext, to be used by the +// MicroInterpreter. Kernels should not depend on this directly. Instead they +// should only depend on the MicroContext. +class MicroInterpreterContext : public MicroContext { + public: + // Enum that allows MicroContext to keep track of the stages different memory + // planning APIs are available to kernels. + enum class InterpreterState { + kInit, + kPrepare, + kMemoryPlanning, + kInvoke, + }; + + // Does not take any ownership, and all pointers must refer to valid objects + // that outlive the one constructed. + MicroInterpreterContext(MicroAllocator* allocator, const Model* model, + MicroInterpreterGraph* graph); + virtual ~MicroInterpreterContext(); + + // Allocate persistent buffer which has the same life time as the interpreter. + // Returns nullptr on failure. + // The memory is allocated from the tail. + // This method is only available in Init or Prepare stage. + // Virtual so that it can be faked for kernel tests. + virtual void* AllocatePersistentBuffer(size_t bytes) override; + + // Request a scratch buffer in the arena through static memory planning. + // This method is only available in Prepare stage and the buffer is allocated + // by the interpreter between Prepare and Eval stage. In Eval stage, + // GetScratchBuffer API can be used to fetch the address. + // Virtual so that it can be faked for kernel tests. + virtual TfLiteStatus RequestScratchBufferInArena(size_t bytes, + int* buffer_idx) override; + + // Get the scratch buffer pointer. + // This method is only available in Eval stage. + // Virtual so that it can be faked for kernel tests. + virtual void* GetScratchBuffer(int buffer_idx) override; + + // Returns a temporary TfLiteTensor struct for a given index. + // Virtual so that it can be faked for kernel tests. + virtual TfLiteTensor* AllocateTempTfLiteTensor(int tensor_idx) override; + + // Deallocates a temp TfLiteTensor. + // Virtual so that it can be faked for kernel tests. + virtual void DeallocateTempTfLiteTensor(TfLiteTensor* tensor) override; + + // Returns a pointer to a temporary buffer (from the arena). + // This API is only valid from the kernel's Prepare function and + // the buffer's lifetime is also that of the Prepare function. + // Virtual so that it can be faked for kernel tests. + virtual uint8_t* AllocateTempBuffer(size_t size, size_t alignment) override; + + // Signals that the temporary buffer is no longer needed. + // Virtual so that it can be faked for kernel tests. + virtual void DeallocateTempBuffer(uint8_t* buffer) override; + + // Returns a TfLiteEvalTensor struct for a given index. + // Virtual so that it can be faked for kernel tests. + virtual TfLiteEvalTensor* GetEvalTensor(int tensor_idx) override; + + // Sets the State of MemoryPlanning MicroInterpreterContext + void SetInterpreterState(InterpreterState state); + + // Sets the State of MemoryPlanning MicroInterpreterContext + InterpreterState GetInterpreterState() const; + + // Does not take ownership of the pointer and the pointer must refer to valid + // an object that outlive this class instance. + // This can only be called once to set one external context. + TfLiteStatus set_external_context(void* external_context_payload) override; + + void* external_context() override { return external_context_payload_; } + + MicroGraph& graph() override { return graph_; } + + // Sets the pointer to a list of ScratchBufferHandle instances. + // Not API between TFLM and kernels. Primarily used by the framework for + // housekeeping in MicroInterpreterContext. + void SetScratchBufferHandles(ScratchBufferHandle* scratch_buffer_handles); + +#ifdef USE_TFLM_COMPRESSION + + // Available during Prepare & Eval. Returns false if tensor is not + // compressed. + bool IsTensorCompressed(const TfLiteNode* node, int tensor_idx) override; + + // Only available during Prepare. The kernel is responsible for storing the + // scratch buffer handle. + int AllocateDecompressionScratchBuffer(const TfLiteNode* node, + int tensor_idx) override; + + // Available during Prepare & Eval. Returns nullptr if tensor is not + // compressed. + const CompressionTensorData* GetTensorCompressionData( + const TfLiteNode* node, int tensor_idx) override; + + // Only available during Prepare & Eval. Returns nullptr on failure, otherwise + // returns a pointer to the buffer. + void* DecompressTensorToBuffer(const TfLiteEvalTensor& tensor, + const CompressionTensorData& compression_data, + void* buffer) override; + + // Set the alternate decompression memory regions. + // Can only be called during the MicroInterpreter kInit state. + TfLiteStatus SetDecompressionMemory( + const std::initializer_list& regions) override; + + // Return a pointer to memory that can be used for decompression. + // The pointer will be aligned to the value. + // Return nullptr if the requested size is not available. + // Can be called during kPrepare and kInvoke states. + void* AllocateDecompressionMemory(size_t bytes, size_t alignment) override; + + // reset all allocation tracking + void ResetDecompressionMemoryAllocations() override; + +#endif // USE_TFLM_COMPRESSION + + // Set the alternate MicroProfilerInterface. + // This can be used to profile subsystems simultaneously with the profiling + // of kernels during the Eval phase. See (b/379584353). + // The alternate MicroProfilerInterface is currently used by the tensor + // decompression subsystem. + TfLiteStatus SetAlternateProfiler( + MicroProfilerInterface* alt_profiler) override; + + // Get the alternate MicroProfilerInterface. + // This can be used to profile subsystems simultaneously with the profiling + // of kernels during the Eval phase. See (b/379584353). + // The alternate MicroProfilerInterface is currently used by the tensor + // decompression subsystem. + MicroProfilerInterface* GetAlternateProfiler() const override; + + private: + MicroAllocator& allocator_; + MicroInterpreterGraph& graph_; + const Model* model_; + InterpreterState state_; + + ScratchBufferHandle* scratch_buffer_handles_ = nullptr; + void* external_context_payload_ = nullptr; + MicroProfilerInterface* alt_profiler_ = nullptr; + +#ifdef USE_TFLM_COMPRESSION + + const std::initializer_list* decompress_regions_ = + nullptr; + // array of size_t elements with length equal to decompress_regions_.size() + size_t* decompress_regions_allocations_; + +#endif // USE_TFLM_COMPRESSION + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_CONTEXT_H_ diff --git a/src/tensorflow/lite/micro/micro_interpreter_graph.cpp b/src/tensorflow/lite/micro/micro_interpreter_graph.cpp new file mode 100644 index 0000000..3e243fb --- /dev/null +++ b/src/tensorflow/lite/micro/micro_interpreter_graph.cpp @@ -0,0 +1,313 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/micro_interpreter_graph.h" + +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/flatbuffer_utils.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_profiler.h" +#include "tensorflow/lite/schema/schema_generated.h" + +#ifdef USE_TFLM_COMPRESSION + +#include "tensorflow/lite/micro/micro_context.h" + +#endif // USE_TFLM_COMPRESSION + +namespace tflite { +namespace { + +const char* OpNameFromRegistration(const TFLMRegistration* registration) { + if (registration->builtin_code == BuiltinOperator_CUSTOM) { + return registration->custom_name; + } else { + return EnumNameBuiltinOperator(BuiltinOperator(registration->builtin_code)); + } +} + +} // namespace + +MicroInterpreterGraph::MicroInterpreterGraph( + TfLiteContext* context, const Model* model, MicroAllocator* allocator, + MicroResourceVariables* resource_variables) + : context_(context), + model_(model), + allocator_(allocator), + current_subgraph_index_(0), + current_operator_index_(0), + resource_variables_(resource_variables) { + if (model != nullptr) { + subgraphs_ = model->subgraphs(); + } +} + +MicroInterpreterGraph::~MicroInterpreterGraph() {} + +TfLiteStatus MicroInterpreterGraph::InitSubgraphs() { + int previous_subgraph_idx = current_subgraph_index_; + uint32_t previous_operator_idx = current_operator_index_; + + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + current_subgraph_index_ = subgraph_idx; + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (current_operator_index_ = 0; current_operator_index_ < operators_size; + ++current_operator_index_) { + TfLiteNode* node = &(subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .node); + const TFLMRegistration* registration = + subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .registration; + size_t init_data_size; + const char* init_data; + if (registration->builtin_code == BuiltinOperator_CUSTOM) { + init_data = reinterpret_cast(node->custom_initial_data); + init_data_size = node->custom_initial_data_size; + } else { + init_data = reinterpret_cast(node->builtin_data); + init_data_size = 0; + } + if (registration->init) { + node->user_data = + registration->init(context_, init_data, init_data_size); + } + } + } + current_subgraph_index_ = previous_subgraph_idx; + current_operator_index_ = previous_operator_idx; + + return kTfLiteOk; +} + +TfLiteStatus MicroInterpreterGraph::PrepareSubgraphs() { + int previous_subgraph_idx = current_subgraph_index_; + uint32_t previous_operator_idx = current_operator_index_; + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + current_subgraph_index_ = subgraph_idx; + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (current_operator_index_ = 0; current_operator_index_ < operators_size; + ++current_operator_index_) { + TfLiteNode* node = &(subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .node); + const TFLMRegistration* registration = + subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .registration; + if (registration->prepare != nullptr) { + TfLiteStatus prepare_status = registration->prepare(context_, node); + if (prepare_status != kTfLiteOk) { + MicroPrintf("Node %s (number %df) failed to prepare with status %d", + OpNameFromRegistration(registration), + current_operator_index_, prepare_status); + return kTfLiteError; + } +#ifdef USE_TFLM_COMPRESSION + GetMicroContext(context_)->ResetDecompressionMemoryAllocations(); +#endif // USE_TFLM_COMPRESSION + } + allocator_->FinishPrepareNodeAllocations( + /*node_id=*/current_operator_index_); + } + } + current_subgraph_index_ = previous_subgraph_idx; + current_operator_index_ = previous_operator_idx; + return kTfLiteOk; +} + +TfLiteStatus MicroInterpreterGraph::ResetSubgraphs() { + int previous_subgraph_idx = current_subgraph_index_; + uint32_t previous_operator_idx = current_operator_index_; + + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + current_subgraph_index_ = subgraph_idx; + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (current_operator_index_ = 0; current_operator_index_ < operators_size; + ++current_operator_index_) { + TfLiteNode* node = &(subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .node); + const TFLMRegistration* registration = + subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .registration; + // registration is allocated outside the interpreter, so double check to + // make sure it's not nullptr; + if (registration != nullptr && registration->reset != nullptr) { + registration->reset(context_, node->user_data); + } + } + } + current_subgraph_index_ = previous_subgraph_idx; + current_operator_index_ = previous_operator_idx; + + return kTfLiteOk; +} + +TfLiteStatus MicroInterpreterGraph::FreeSubgraphs() { + int previous_subgraph_idx = current_subgraph_index_; + uint32_t previous_operator_idx = current_operator_index_; + + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + current_subgraph_index_ = subgraph_idx; + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (current_operator_index_ = 0; current_operator_index_ < operators_size; + ++current_operator_index_) { + TfLiteNode* node = &(subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .node); + const TFLMRegistration* registration = + subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .registration; + // registration is allocated outside the interpreter, so double check to + // make sure it's not nullptr; + if (registration != nullptr && registration->free != nullptr) { + registration->free(context_, node->user_data); + } + } + } + current_subgraph_index_ = previous_subgraph_idx; + current_operator_index_ = previous_operator_idx; + + return kTfLiteOk; +} + +TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) { + int previous_subgraph_idx = current_subgraph_index_; + uint32_t previous_operator_idx = current_operator_index_; + current_subgraph_index_ = subgraph_idx; + + if (static_cast(subgraph_idx) >= subgraphs_->size()) { + MicroPrintf("Accessing subgraph %d but only %d subgraphs found", + subgraph_idx, subgraphs_->size()); + return kTfLiteError; + } + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (current_operator_index_ = 0; current_operator_index_ < operators_size; + ++current_operator_index_) { + TfLiteNode* node = &(subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .node); + const TFLMRegistration* registration = + subgraph_allocations_[subgraph_idx] + .node_and_registrations[current_operator_index_] + .registration; + +// This ifdef is needed (even though ScopedMicroProfiler itself is a no-op with +// -DTF_LITE_STRIP_ERROR_STRINGS) because the function OpNameFromRegistration is +// only defined for builds with the error strings. +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + ScopedMicroProfiler scoped_profiler( + OpNameFromRegistration(registration), + reinterpret_cast(context_->profiler)); +#endif + + TFLITE_DCHECK(registration->invoke); + TfLiteStatus invoke_status = registration->invoke(context_, node); +#ifdef USE_TFLM_COMPRESSION + GetMicroContext(context_)->ResetDecompressionMemoryAllocations(); +#endif // USE_TFLM_COMPRESSION + + // All TfLiteTensor structs used in the kernel are allocated from temp + // memory in the allocator. This creates a chain of allocations in the + // temp section. The call below resets the chain of allocations to + // prepare for the next call. + allocator_->ResetTempAllocations(); + + if (invoke_status == kTfLiteError) { + MicroPrintf("Node %s (number %d) failed to invoke with status %d", + OpNameFromRegistration(registration), current_operator_index_, + invoke_status); + return kTfLiteError; + } else if (invoke_status != kTfLiteOk) { + return invoke_status; + } + } + current_subgraph_index_ = previous_subgraph_idx; + current_operator_index_ = previous_operator_idx; + return kTfLiteOk; +} + +TfLiteStatus MicroInterpreterGraph::ResetVariableTensors() { + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + const SubGraph* subgraph = (*subgraphs_)[subgraph_idx]; + for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { + auto* tensor = subgraph->tensors()->Get(i); + if (tensor->is_variable()) { + size_t buffer_size; + TF_LITE_ENSURE_STATUS(TfLiteEvalTensorByteLength( + &subgraph_allocations_[subgraph_idx].tensors[i], &buffer_size)); + + int value = 0; + if (tensor->type() == tflite::TensorType_INT8) { + value = tensor->quantization()->zero_point()->Get(0); + } + memset(subgraph_allocations_[subgraph_idx].tensors[i].data.raw, value, + buffer_size); + } + } + } + if (resource_variables_ != nullptr) { + resource_variables_->ResetAll(); + } + + return kTfLiteOk; +} + +int MicroInterpreterGraph::NumSubgraphs() { + return model_->subgraphs()->size(); +} + +void MicroInterpreterGraph::SetSubgraphAllocations( + SubgraphAllocations* subgraph_allocations) { + subgraph_allocations_ = subgraph_allocations; +} + +size_t MicroInterpreterGraph::NumSubgraphInputs(int subgraph_idx) { + return model_->subgraphs()->Get(subgraph_idx)->inputs()->size(); +} + +TfLiteEvalTensor* MicroInterpreterGraph::GetSubgraphInput(int subgraph_idx, + int input_idx) { + int tensor_idx = + model_->subgraphs()->Get(subgraph_idx)->inputs()->Get(input_idx); + return &subgraph_allocations_[subgraph_idx].tensors[tensor_idx]; +} + +size_t MicroInterpreterGraph::NumSubgraphOutputs(int subgraph_idx) { + return model_->subgraphs()->Get(subgraph_idx)->outputs() == nullptr + ? 0 + : model_->subgraphs()->Get(subgraph_idx)->outputs()->size(); +} + +TfLiteEvalTensor* MicroInterpreterGraph::GetSubgraphOutput(int subgraph_idx, + int output_idx) { + int tensor_idx = + model_->subgraphs()->Get(subgraph_idx)->outputs()->Get(output_idx); + return &subgraph_allocations_[subgraph_idx].tensors[tensor_idx]; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_interpreter_graph.h b/src/tensorflow/lite/micro/micro_interpreter_graph.h new file mode 100644 index 0000000..5fc3b4c --- /dev/null +++ b/src/tensorflow/lite/micro/micro_interpreter_graph.h @@ -0,0 +1,118 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_GRAPH_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_GRAPH_H_ + +#include "tensorflow/lite/micro/micro_allocator.h" +#include "tensorflow/lite/micro/micro_common.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/micro/micro_resource_variable.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +// Abstracts the details of interacting with the tflite::Model. +// +// Provides methods to access, initialize, prepare, invoke and free any +// subgraph in the tflite::Graph. +class MicroInterpreterGraph : public MicroGraph { + public: + // The lifetime of the context, model, allocator and resource_variables must + // be at least as long as that of the graph object, since the this class may + // need to access them at any time. If resource_variables is a nullptr, + // GetResourceVariables will return a nullptr. + MicroInterpreterGraph(TfLiteContext* context, const Model* model, + MicroAllocator* allocator, + MicroResourceVariables* resource_variables); + virtual ~MicroInterpreterGraph(); + + // Sets up builtin data and calls TFLMRegistration->Init for every + // operator in every subgraph in the model. + virtual TfLiteStatus InitSubgraphs(); + + // Calls TFLMRegistration->Prepare for every operator in every subgraph + // in the model. + virtual TfLiteStatus PrepareSubgraphs(); + + // Calls TFLMRegistration->Reset for every operator in every subgraph in + // the model. + virtual TfLiteStatus ResetSubgraphs(); + + // Calls TFLMRegistration->Free for every operator in every subgraph in + // the model. + virtual TfLiteStatus FreeSubgraphs(); + + // Calls TFLMRegistration->Invoke for every operator in a single subgraph + // in the model. + virtual TfLiteStatus InvokeSubgraph(int subgraph_idx); + + // Zeros out all variable tensors in all subgraphs in the model. + virtual TfLiteStatus ResetVariableTensors(); + + // Number of tensor inputs to a specified subgraph in the model. + virtual size_t NumSubgraphInputs(int subgraph_idx); + + // Get the specified input tensor of a specified subgraph in the model. + virtual TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int input_idx); + + // Number of tensor outputs from a specified subgraph in the model. + virtual size_t NumSubgraphOutputs(int subgraph_idx); + + // Get the specified output tensor of a specified subgraph in the model. + virtual TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx, int output_idx); + + // Number of subgraphs in the model. + virtual int NumSubgraphs(); + + // Hook to pass in subgraph allocations tracked within the interpreter, + // allowing MicroInterpreterGraph to init / prepare / invoke subgraphs in the + // model. + void SetSubgraphAllocations(SubgraphAllocations* subgraph_allocations); + + // Get the current subgraph index. Within an on operator, this is guaranteed + // to be the subgraph of that operator. + int GetCurrentSubgraphIndex() { return current_subgraph_index_; } + + // Get the current operator index inside a subgraph. + // The couple GetCurrentSubgraphIndex GetCurrentSubgraphIndex creates a unique + // identifier of the operator inside the subgraph + int GetCurrentOperatorIndex() { return current_operator_index_; } + + // Gets the list of allocations for each subgraph. This is the source of truth + // for all per-subgraph allocation data. + SubgraphAllocations* GetAllocations() { return subgraph_allocations_; } + + // Get the resource variables for this TFLM graph. + MicroResourceVariables* GetResourceVariables() { return resource_variables_; } + + private: + TfLiteContext* context_; + const Model* model_; + MicroAllocator* allocator_; + SubgraphAllocations* subgraph_allocations_ = nullptr; + int current_subgraph_index_; + uint32_t current_operator_index_; + MicroResourceVariables* resource_variables_; + const flatbuffers::Vector>* subgraphs_ = + nullptr; // Initialized as nullptr to prevent any possible issues + // related to accessing uninitialized memory. + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_GRAPH_H_ diff --git a/src/tensorflow/lite/micro/micro_log.cpp b/src/tensorflow/lite/micro/micro_log.cpp new file mode 100644 index 0000000..a08a07d --- /dev/null +++ b/src/tensorflow/lite/micro/micro_log.cpp @@ -0,0 +1,62 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/micro_log.h" + +#include +#include + +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) +#include "tensorflow/lite/micro/debug_log.h" +#endif + +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) +namespace { + +void VDebugLog(const char* format, ...) { + va_list args; + va_start(args, format); + DebugLog(format, args); + va_end(args); +} + +} // namespace + +void VMicroPrintf(const char* format, va_list args) { + DebugLog(format, args); + // TODO(b/290051015): remove "\r\n" + VDebugLog("\r\n"); +} + +void MicroPrintf(const char* format, ...) { + va_list args; + va_start(args, format); + VMicroPrintf(format, args); + va_end(args); +} + +int MicroSnprintf(char* buffer, size_t buf_size, const char* format, ...) { + va_list args; + va_start(args, format); + int result = MicroVsnprintf(buffer, buf_size, format, args); + va_end(args); + return result; +} + +int MicroVsnprintf(char* buffer, size_t buf_size, const char* format, + va_list vlist) { + return DebugVsnprintf(buffer, buf_size, format, vlist); +} +#endif // !defined(TF_LITE_STRIP_ERROR_STRINGS) diff --git a/src/tensorflow/lite/micro/micro_log.h b/src/tensorflow/lite/micro/micro_log.h new file mode 100644 index 0000000..af3c24a --- /dev/null +++ b/src/tensorflow/lite/micro/micro_log.h @@ -0,0 +1,54 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_MICRO_LOG_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_LOG_H_ + +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) +#include +#include +// These functions can be used independent of the MicroErrorReporter to get +// printf-like functionalitys and are common to all target platforms. +void MicroPrintf(const char* format, ...); +void VMicroPrintf(const char* format, va_list args); +int MicroSnprintf(char* buffer, size_t buf_size, const char* format, ...); +int MicroVsnprintf(char* buffer, size_t buf_size, const char* format, + va_list vlist); +#else +// We use a #define to ensure that the strings are completely stripped, to +// prevent an unnecessary increase in the binary size. +#define MicroPrintf(...) tflite::Unused(__VA_ARGS__) +#define VMicroPrintf(...) tflite::Unused(__VA_ARGS__) +#define MicroSnprintf(...) tflite::Unused(__VA_ARGS__) +#define MicroVsnprintf(...) tflite::Unused(__VA_ARGS__) +#endif + +namespace tflite { + +// From +// https://stackoverflow.com/questions/23235910/variadic-unused-function-macro +template +void Unused(Args&&... args) { + (void)(sizeof...(args)); +} + +template +T Unused(Args&&... args) { + (void)(sizeof...(args)); + return static_cast(0); +} + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_LOG_H_ diff --git a/src/tensorflow/lite/micro/micro_mutable_op_resolver.h b/src/tensorflow/lite/micro/micro_mutable_op_resolver.h index 7a1015c..f5f6e38 100644 --- a/src/tensorflow/lite/micro/micro_mutable_op_resolver.h +++ b/src/tensorflow/lite/micro/micro_mutable_op_resolver.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,31 +19,40 @@ limitations under the License. #include #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/core/api/flatbuffer_conversions.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/compatibility.h" +#include "tensorflow/lite/micro/kernels/add.h" +#include "tensorflow/lite/micro/kernels/conv.h" +#include "tensorflow/lite/micro/kernels/depthwise_conv.h" #include "tensorflow/lite/micro/kernels/ethosu.h" #include "tensorflow/lite/micro/kernels/fully_connected.h" #include "tensorflow/lite/micro/kernels/micro_ops.h" +#include "tensorflow/lite/micro/kernels/mul.h" +#include "tensorflow/lite/micro/kernels/pooling.h" +#include "tensorflow/lite/micro/kernels/reduce.h" +#include "tensorflow/lite/micro/kernels/softmax.h" +#include "tensorflow/lite/micro/kernels/transpose_conv.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_op_resolver.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { -TfLiteRegistration* Register_DETECTION_POSTPROCESS(); +TFLMRegistration* Register_DETECTION_POSTPROCESS(); template class MicroMutableOpResolver : public MicroOpResolver { public: - explicit MicroMutableOpResolver(ErrorReporter* error_reporter = nullptr) - : error_reporter_(error_reporter) {} + TF_LITE_REMOVE_VIRTUAL_DELETE + + explicit MicroMutableOpResolver() {} - const TfLiteRegistration* FindOp(tflite::BuiltinOperator op) const override { + const TFLMRegistration* FindOp(tflite::BuiltinOperator op) const override { if (op == BuiltinOperator_CUSTOM) return nullptr; for (unsigned int i = 0; i < registrations_len_; ++i) { - const TfLiteRegistration& registration = registrations_[i]; + const TFLMRegistration& registration = registrations_[i]; if (registration.builtin_code == op) { return ®istration; } @@ -51,9 +60,9 @@ class MicroMutableOpResolver : public MicroOpResolver { return nullptr; } - const TfLiteRegistration* FindOp(const char* op) const override { + const TFLMRegistration* FindOp(const char* op) const override { for (unsigned int i = 0; i < registrations_len_; ++i) { - const TfLiteRegistration& registration = registrations_[i]; + const TFLMRegistration& registration = registrations_[i]; if ((registration.builtin_code == BuiltinOperator_CUSTOM) && (strcmp(registration.custom_name, op) == 0)) { return ®istration; @@ -62,7 +71,7 @@ class MicroMutableOpResolver : public MicroOpResolver { return nullptr; } - MicroOpResolver::BuiltinParseFunction GetOpDataParser( + TfLiteBridgeBuiltinParseFunction GetOpDataParser( BuiltinOperator op) const override { TFLITE_DCHECK(num_buitin_ops_ <= tOpCount); for (unsigned int i = 0; i < num_buitin_ops_; ++i) { @@ -77,28 +86,23 @@ class MicroMutableOpResolver : public MicroOpResolver { // function is called again for a previously added Custom Operator, the // MicroOpResolver will be unchanged and this function will return // kTfLiteError. - TfLiteStatus AddCustom(const char* name, TfLiteRegistration* registration) { + TfLiteStatus AddCustom(const char* name, + const TFLMRegistration* registration) { if (registrations_len_ >= tOpCount) { - if (error_reporter_) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Couldn't register custom op '%s', resolver size is too small (%d)", - name, tOpCount); - } + MicroPrintf( + "Couldn't register custom op '%s', resolver size is too" + "small (%d)", + name, tOpCount); return kTfLiteError; } if (FindOp(name) != nullptr) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Calling AddCustom for the same op more than once " - "is not supported (Op: %s).", - name); - } + MicroPrintf("Calling AddCustom for the same op more than once "); + MicroPrintf("is not supported (Op: %s).", name); return kTfLiteError; } - TfLiteRegistration* new_registration = ®istrations_[registrations_len_]; + TFLMRegistration* new_registration = ®istrations_[registrations_len_]; registrations_len_ += 1; *new_registration = *registration; @@ -111,64 +115,110 @@ class MicroMutableOpResolver : public MicroOpResolver { // MicroMutableOpResolver object. TfLiteStatus AddAbs() { - return AddBuiltin(BuiltinOperator_ABS, tflite::ops::micro::Register_ABS(), - ParseAbs); + return AddBuiltin(BuiltinOperator_ABS, Register_ABS(), ParseAbs); } - TfLiteStatus AddAdd() { - return AddBuiltin(BuiltinOperator_ADD, tflite::ops::micro::Register_ADD(), - ParseAdd); + TfLiteStatus AddAdd(const TFLMRegistration& registration = Register_ADD()) { + return AddBuiltin(BuiltinOperator_ADD, registration, ParseAdd); + } + + TfLiteStatus AddAddN() { + return AddBuiltin(BuiltinOperator_ADD_N, tflite::Register_ADD_N(), + ParseAddN); } TfLiteStatus AddArgMax() { - return AddBuiltin(BuiltinOperator_ARG_MAX, - tflite::ops::micro::Register_ARG_MAX(), ParseArgMax); + return AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX(), ParseArgMax); } TfLiteStatus AddArgMin() { - return AddBuiltin(BuiltinOperator_ARG_MIN, - tflite::ops::micro::Register_ARG_MIN(), ParseArgMin); + return AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN(), ParseArgMin); } - TfLiteStatus AddAveragePool2D() { - return AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, - tflite::ops::micro::Register_AVERAGE_POOL_2D(), - ParsePool); + TfLiteStatus AddAssignVariable() { + return AddBuiltin(BuiltinOperator_ASSIGN_VARIABLE, + tflite::Register_ASSIGN_VARIABLE(), ParseAssignVariable); + } + + TfLiteStatus AddAveragePool2D( + const TFLMRegistration& registration = Register_AVERAGE_POOL_2D()) { + return AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, registration, ParsePool); + } + + TfLiteStatus AddBatchMatMul() { + return AddBuiltin(BuiltinOperator_BATCH_MATMUL, + tflite::Register_BATCH_MATMUL(), ParseBatchMatMul); + } + + TfLiteStatus AddBatchToSpaceNd() { + return AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, + Register_BATCH_TO_SPACE_ND(), ParseBatchToSpaceNd); + } + + TfLiteStatus AddBroadcastArgs() { + return AddBuiltin(BuiltinOperator_BROADCAST_ARGS, Register_BROADCAST_ARGS(), + ParseBroadcastArgs); + } + + TfLiteStatus AddBroadcastTo() { + return AddBuiltin(BuiltinOperator_BROADCAST_TO, Register_BROADCAST_TO(), + ParseBroadcastTo); + } + + TfLiteStatus AddCallOnce() { + return AddBuiltin(BuiltinOperator_CALL_ONCE, Register_CALL_ONCE(), + ParseCallOnce); + } + + TfLiteStatus AddCast() { + return AddBuiltin(BuiltinOperator_CAST, Register_CAST(), ParseCast); } TfLiteStatus AddCeil() { - return AddBuiltin(BuiltinOperator_CEIL, tflite::ops::micro::Register_CEIL(), - ParseCeil); + return AddBuiltin(BuiltinOperator_CEIL, Register_CEIL(), ParseCeil); } TfLiteStatus AddCircularBuffer() { - return AddCustom("CIRCULAR_BUFFER", - tflite::ops::micro::Register_CIRCULAR_BUFFER()); + return AddCustom("CIRCULAR_BUFFER", tflite::Register_CIRCULAR_BUFFER()); } TfLiteStatus AddConcatenation() { - return AddBuiltin(BuiltinOperator_CONCATENATION, - tflite::ops::micro::Register_CONCATENATION(), + return AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION(), ParseConcatenation); } - TfLiteStatus AddConv2D() { - return AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(), ParseConv2D); + TfLiteStatus AddConv2D( + const TFLMRegistration& registration = Register_CONV_2D()) { + return AddBuiltin(BuiltinOperator_CONV_2D, registration, ParseConv2D); } TfLiteStatus AddCos() { - return AddBuiltin(BuiltinOperator_COS, tflite::ops::micro::Register_COS(), - ParseCos); + return AddBuiltin(BuiltinOperator_COS, tflite::Register_COS(), ParseCos); + } + + TfLiteStatus AddCumSum() { + return AddBuiltin(BuiltinOperator_CUMSUM, tflite::Register_CUMSUM(), + ParseCumsum); } - TfLiteStatus AddDepthwiseConv2D() { - return AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, - Register_DEPTHWISE_CONV_2D(), ParseDepthwiseConv2D); + TfLiteStatus AddDelay() { + // TODO(b/286250473): change back name to "Delay" and remove namespace + return AddCustom("SignalDelay", tflite::tflm_signal::Register_DELAY()); + } + + TfLiteStatus AddDepthToSpace() { + return AddBuiltin(BuiltinOperator_DEPTH_TO_SPACE, + tflite::Register_DEPTH_TO_SPACE(), ParseDepthToSpace); + } + + TfLiteStatus AddDepthwiseConv2D( + const TFLMRegistration& registration = Register_DEPTHWISE_CONV_2D()) { + return AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, registration, + ParseDepthwiseConv2D); } TfLiteStatus AddDequantize() { - return AddBuiltin(BuiltinOperator_DEQUANTIZE, - tflite::ops::micro::Register_DEQUANTIZE(), + return AddBuiltin(BuiltinOperator_DEQUANTIZE, tflite::Register_DEQUANTIZE(), ParseDequantize); } @@ -177,145 +227,256 @@ class MicroMutableOpResolver : public MicroOpResolver { tflite::Register_DETECTION_POSTPROCESS()); } + TfLiteStatus AddDiv() { + return AddBuiltin(BuiltinOperator_DIV, tflite::Register_DIV(), ParseDiv); + } + + TfLiteStatus AddEmbeddingLookup() { + return AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, + Register_EMBEDDING_LOOKUP(), ParseEmbeddingLookup); + } + + TfLiteStatus AddEnergy() { + // TODO(b/286250473): change back name to "Energy" and remove namespace + return AddCustom("SignalEnergy", tflite::tflm_signal::Register_ENERGY()); + } + + TfLiteStatus AddElu() { + return AddBuiltin(BuiltinOperator_ELU, tflite::Register_ELU(), ParseElu); + } + TfLiteStatus AddEqual() { - return AddBuiltin(BuiltinOperator_EQUAL, - tflite::ops::micro::Register_EQUAL(), ParseEqual); + return AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL(), ParseEqual); } TfLiteStatus AddEthosU() { - TfLiteRegistration* registration = tflite::Register_ETHOSU(); + TFLMRegistration* registration = tflite::Register_ETHOSU(); if (registration) { return AddCustom(tflite::GetString_ETHOSU(), registration); } return kTfLiteOk; } + TfLiteStatus AddExp() { + return AddBuiltin(BuiltinOperator_EXP, Register_EXP(), ParseExp); + } + + TfLiteStatus AddExpandDims() { + return AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS(), + ParseExpandDims); + } + + TfLiteStatus AddFftAutoScale() { + // TODO(b/286250473): change back name and remove namespace + return AddCustom("SignalFftAutoScale", + tflite::tflm_signal::Register_FFT_AUTO_SCALE()); + } + + TfLiteStatus AddFill() { + return AddBuiltin(BuiltinOperator_FILL, tflite::Register_FILL(), ParseFill); + } + + TfLiteStatus AddFilterBank() { + // TODO(b/286250473): change back name to "FilterBank" and remove namespace + return AddCustom("SignalFilterBank", + tflite::tflm_signal::Register_FILTER_BANK()); + } + TfLiteStatus AddFilterBankLog() { + // TODO(b/286250473): change back name to "FilterBankLog" and remove + // namespace + return AddCustom("SignalFilterBankLog", + tflite::tflm_signal::Register_FILTER_BANK_LOG()); + } + TfLiteStatus AddFilterBankSquareRoot() { + // TODO(b/286250473): change back name to "FilterBankSquareRoot" and remove + // namespace + return AddCustom("SignalFilterBankSquareRoot", + tflite::tflm_signal::Register_FILTER_BANK_SQUARE_ROOT()); + } + TfLiteStatus AddFilterBankSpectralSubtraction() { + // TODO(b/286250473): change back name to "FilterBankSpectralSubtraction" + // and remove namespace + return AddCustom( + "SignalFilterBankSpectralSubtraction", + tflite::tflm_signal::Register_FILTER_BANK_SPECTRAL_SUBTRACTION()); + } + TfLiteStatus AddFloor() { - return AddBuiltin(BuiltinOperator_FLOOR, - tflite::ops::micro::Register_FLOOR(), ParseFloor); + return AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR(), ParseFloor); + } + + TfLiteStatus AddFloorDiv() { + return AddBuiltin(BuiltinOperator_FLOOR_DIV, tflite::Register_FLOOR_DIV(), + ParseFloorDiv); + } + + TfLiteStatus AddFloorMod() { + return AddBuiltin(BuiltinOperator_FLOOR_MOD, tflite::Register_FLOOR_MOD(), + ParseFloorMod); + } + + TfLiteStatus AddFramer() { + // TODO(b/286250473): change back name to "Framer" and remove namespace + return AddCustom("SignalFramer", tflite::tflm_signal::Register_FRAMER()); } TfLiteStatus AddFullyConnected( - const TfLiteRegistration& registration = Register_FULLY_CONNECTED()) { + const TFLMRegistration& registration = Register_FULLY_CONNECTED()) { return AddBuiltin(BuiltinOperator_FULLY_CONNECTED, registration, ParseFullyConnected); } + TfLiteStatus AddGather() { + return AddBuiltin(BuiltinOperator_GATHER, tflite::Register_GATHER(), + ParseGather); + } + + TfLiteStatus AddGatherNd() { + return AddBuiltin(BuiltinOperator_GATHER_ND, tflite::Register_GATHER_ND(), + ParseGatherNd); + } + TfLiteStatus AddGreater() { - return AddBuiltin(BuiltinOperator_GREATER, - tflite::ops::micro::Register_GREATER(), ParseGreater); + return AddBuiltin(BuiltinOperator_GREATER, Register_GREATER(), + ParseGreater); } TfLiteStatus AddGreaterEqual() { - return AddBuiltin(BuiltinOperator_GREATER_EQUAL, - tflite::ops::micro::Register_GREATER_EQUAL(), + return AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL(), ParseGreaterEqual); } TfLiteStatus AddHardSwish() { - return AddBuiltin(BuiltinOperator_HARD_SWISH, - tflite::ops::micro::Register_HARD_SWISH(), + return AddBuiltin(BuiltinOperator_HARD_SWISH, tflite::Register_HARD_SWISH(), ParseHardSwish); } + TfLiteStatus AddIf() { + return AddBuiltin(BuiltinOperator_IF, tflite::Register_IF(), ParseIf); + } + + TfLiteStatus AddIrfft(const TFLMRegistration* registration = + tflite::tflm_signal::Register_IRFFT()) { + // TODO(b/286250473): change back name and remove namespace + return AddCustom("SignalIrfft", registration); + } + TfLiteStatus AddL2Normalization() { return AddBuiltin(BuiltinOperator_L2_NORMALIZATION, - tflite::ops::micro::Register_L2_NORMALIZATION(), - ParseL2Normalization); + Register_L2_NORMALIZATION(), ParseL2Normalization); + } + + TfLiteStatus AddL2Pool2D() { + return AddBuiltin(BuiltinOperator_L2_POOL_2D, tflite::Register_L2_POOL_2D(), + ParsePool); + } + + TfLiteStatus AddLeakyRelu() { + return AddBuiltin(BuiltinOperator_LEAKY_RELU, tflite::Register_LEAKY_RELU(), + ParseLeakyRelu); } TfLiteStatus AddLess() { - return AddBuiltin(BuiltinOperator_LESS, tflite::ops::micro::Register_LESS(), - ParseLess); + return AddBuiltin(BuiltinOperator_LESS, Register_LESS(), ParseLess); } TfLiteStatus AddLessEqual() { - return AddBuiltin(BuiltinOperator_LESS_EQUAL, - tflite::ops::micro::Register_LESS_EQUAL(), + return AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL(), ParseLessEqual); } TfLiteStatus AddLog() { - return AddBuiltin(BuiltinOperator_LOG, tflite::ops::micro::Register_LOG(), - ParseLog); + return AddBuiltin(BuiltinOperator_LOG, Register_LOG(), ParseLog); } TfLiteStatus AddLogicalAnd() { return AddBuiltin(BuiltinOperator_LOGICAL_AND, - tflite::ops::micro::Register_LOGICAL_AND(), - ParseLogicalAnd); + tflite::Register_LOGICAL_AND(), ParseLogicalAnd); } TfLiteStatus AddLogicalNot() { - return AddBuiltin(BuiltinOperator_LOGICAL_NOT, - tflite::ops::micro::Register_LOGICAL_NOT(), + return AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT(), ParseLogicalNot); } TfLiteStatus AddLogicalOr() { - return AddBuiltin(BuiltinOperator_LOGICAL_OR, - tflite::ops::micro::Register_LOGICAL_OR(), + return AddBuiltin(BuiltinOperator_LOGICAL_OR, tflite::Register_LOGICAL_OR(), ParseLogicalOr); } TfLiteStatus AddLogistic() { - return AddBuiltin(BuiltinOperator_LOGISTIC, - tflite::ops::micro::Register_LOGISTIC(), ParseLogistic); + return AddBuiltin(BuiltinOperator_LOGISTIC, tflite::Register_LOGISTIC(), + ParseLogistic); + } + + TfLiteStatus AddLogSoftmax() { + return AddBuiltin(BuiltinOperator_LOG_SOFTMAX, + tflite::Register_LOG_SOFTMAX(), ParseLogSoftmax); } TfLiteStatus AddMaximum() { - return AddBuiltin(BuiltinOperator_MAXIMUM, - tflite::ops::micro::Register_MAXIMUM(), ParseMaximum); + return AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM(), + ParseMaximum); + } + + TfLiteStatus AddMaxPool2D( + const TFLMRegistration& registration = Register_MAX_POOL_2D()) { + return AddBuiltin(BuiltinOperator_MAX_POOL_2D, registration, ParsePool); } - TfLiteStatus AddMaxPool2D() { - return AddBuiltin(BuiltinOperator_MAX_POOL_2D, - tflite::ops::micro::Register_MAX_POOL_2D(), ParsePool); + TfLiteStatus AddMirrorPad() { + return AddBuiltin(BuiltinOperator_MIRROR_PAD, tflite::Register_MIRROR_PAD(), + ParseMirrorPad); } TfLiteStatus AddMean() { - return AddBuiltin(BuiltinOperator_MEAN, tflite::ops::micro::Register_MEAN(), - ParseReducer); + return AddBuiltin(BuiltinOperator_MEAN, Register_MEAN(), ParseReducer); } TfLiteStatus AddMinimum() { - return AddBuiltin(BuiltinOperator_MINIMUM, - tflite::ops::micro::Register_MINIMUM(), ParseMinimum); + return AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM(), + ParseMinimum); } - TfLiteStatus AddMul() { - return AddBuiltin(BuiltinOperator_MUL, tflite::ops::micro::Register_MUL(), - ParseMul); + TfLiteStatus AddMul(const TFLMRegistration& registration = Register_MUL()) { + return AddBuiltin(BuiltinOperator_MUL, registration, ParseMul); } TfLiteStatus AddNeg() { - return AddBuiltin(BuiltinOperator_NEG, tflite::ops::micro::Register_NEG(), - ParseNeg); + return AddBuiltin(BuiltinOperator_NEG, Register_NEG(), ParseNeg); } TfLiteStatus AddNotEqual() { - return AddBuiltin(BuiltinOperator_NOT_EQUAL, - tflite::ops::micro::Register_NOT_EQUAL(), ParseNotEqual); + return AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL(), + ParseNotEqual); + } + + TfLiteStatus AddOverlapAdd() { + // TODO(b/286250473): change back name to "OverlapAdd" and remove namespace + return AddCustom("SignalOverlapAdd", + tflite::tflm_signal::Register_OVERLAP_ADD()); } TfLiteStatus AddPack() { - return AddBuiltin(BuiltinOperator_PACK, tflite::ops::micro::Register_PACK(), - ParsePack); + return AddBuiltin(BuiltinOperator_PACK, Register_PACK(), ParsePack); } - TfLiteStatus AddPad() { - return AddBuiltin(BuiltinOperator_PAD, tflite::ops::micro::Register_PAD(), - ParsePad); + TfLiteStatus AddPad(const TFLMRegistration& registration = Register_PAD()) { + return AddBuiltin(BuiltinOperator_PAD, registration, ParsePad); } TfLiteStatus AddPadV2() { - return AddBuiltin(BuiltinOperator_PADV2, - tflite::ops::micro::Register_PADV2(), ParsePadV2); + return AddBuiltin(BuiltinOperator_PADV2, Register_PADV2(), ParsePadV2); + } + + TfLiteStatus AddPCAN() { + // TODO(b/286250473): change back name to "PCAN" and remove namespace + return AddCustom("SignalPCAN", tflite::tflm_signal::Register_PCAN()); } TfLiteStatus AddPrelu() { - return AddBuiltin(BuiltinOperator_PRELU, - tflite::ops::micro::Register_PRELU(), ParsePrelu); + return AddBuiltin(BuiltinOperator_PRELU, tflite::Register_PRELU(), + ParsePrelu); } TfLiteStatus AddQuantize() { @@ -323,40 +484,58 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseQuantize); } + TfLiteStatus AddReadVariable() { + return AddBuiltin(BuiltinOperator_READ_VARIABLE, + tflite::Register_READ_VARIABLE(), ParseReadVariable); + } + TfLiteStatus AddReduceMax() { - return AddBuiltin(BuiltinOperator_REDUCE_MAX, - tflite::ops::micro::Register_REDUCE_MAX(), ParseReducer); + return AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX(), + ParseReducer); } TfLiteStatus AddRelu() { - return AddBuiltin(BuiltinOperator_RELU, tflite::ops::micro::Register_RELU(), - ParseRelu); + return AddBuiltin(BuiltinOperator_RELU, tflite::Register_RELU(), ParseRelu); } TfLiteStatus AddRelu6() { - return AddBuiltin(BuiltinOperator_RELU6, - tflite::ops::micro::Register_RELU6(), ParseRelu6); + return AddBuiltin(BuiltinOperator_RELU6, tflite::Register_RELU6(), + ParseRelu6); } TfLiteStatus AddReshape() { - return AddBuiltin(BuiltinOperator_RESHAPE, - tflite::ops::micro::Register_RESHAPE(), ParseReshape); + return AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE(), + ParseReshape); + } + + TfLiteStatus AddResizeBilinear() { + return AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, + Register_RESIZE_BILINEAR(), ParseResizeBilinear); } TfLiteStatus AddResizeNearestNeighbor() { return AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, - tflite::ops::micro::Register_RESIZE_NEAREST_NEIGHBOR(), + Register_RESIZE_NEAREST_NEIGHBOR(), ParseResizeNearestNeighbor); } + TfLiteStatus AddRfft(const TFLMRegistration* registration = + tflite::tflm_signal::Register_RFFT()) { + // TODO(b/286250473): change back name and remove namespace + return AddCustom("SignalRfft", registration); + } + TfLiteStatus AddRound() { - return AddBuiltin(BuiltinOperator_ROUND, - tflite::ops::micro::Register_ROUND(), ParseRound); + return AddBuiltin(BuiltinOperator_ROUND, Register_ROUND(), ParseRound); } TfLiteStatus AddRsqrt() { - return AddBuiltin(BuiltinOperator_RSQRT, - tflite::ops::micro::Register_RSQRT(), ParseRsqrt); + return AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT(), ParseRsqrt); + } + + TfLiteStatus AddSelectV2() { + return AddBuiltin(BuiltinOperator_SELECT_V2, Register_SELECT_V2(), + ParseSelectV2); } TfLiteStatus AddShape() { @@ -364,94 +543,143 @@ class MicroMutableOpResolver : public MicroOpResolver { } TfLiteStatus AddSin() { - return AddBuiltin(BuiltinOperator_SIN, tflite::ops::micro::Register_SIN(), - ParseSin); + return AddBuiltin(BuiltinOperator_SIN, Register_SIN(), ParseSin); + } + + TfLiteStatus AddSlice() { + return AddBuiltin(BuiltinOperator_SLICE, Register_SLICE(), ParseSlice); } - TfLiteStatus AddSoftmax() { - return AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX(), - ParseSoftmax); + TfLiteStatus AddSoftmax( + const TFLMRegistration& registration = Register_SOFTMAX()) { + return AddBuiltin(BuiltinOperator_SOFTMAX, registration, ParseSoftmax); + } + + TfLiteStatus AddSpaceToBatchNd() { + return AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, + Register_SPACE_TO_BATCH_ND(), ParseSpaceToBatchNd); + } + + TfLiteStatus AddSpaceToDepth() { + return AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH(), + ParseSpaceToDepth); } TfLiteStatus AddSplit() { - return AddBuiltin(BuiltinOperator_SPLIT, - tflite::ops::micro::Register_SPLIT(), ParseSplit); + return AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT(), ParseSplit); } TfLiteStatus AddSplitV() { - return AddBuiltin(BuiltinOperator_SPLIT_V, - tflite::ops::micro::Register_SPLIT_V(), ParseSplitV); + return AddBuiltin(BuiltinOperator_SPLIT_V, Register_SPLIT_V(), ParseSplitV); + } + + TfLiteStatus AddSqueeze() { + return AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE(), + ParseSqueeze); } TfLiteStatus AddSqrt() { - return AddBuiltin(BuiltinOperator_SQRT, tflite::ops::micro::Register_SQRT(), - ParseSqrt); + return AddBuiltin(BuiltinOperator_SQRT, Register_SQRT(), ParseSqrt); } TfLiteStatus AddSquare() { - return AddBuiltin(BuiltinOperator_SQUARE, - tflite::ops::micro::Register_SQUARE(), ParseSquare); + return AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE(), ParseSquare); + } + + TfLiteStatus AddSquaredDifference() { + return AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, + tflite::Register_SQUARED_DIFFERENCE(), + ParseSquaredDifference); } TfLiteStatus AddStridedSlice() { - return AddBuiltin(BuiltinOperator_STRIDED_SLICE, - tflite::ops::micro::Register_STRIDED_SLICE(), + return AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE(), ParseStridedSlice); } + TfLiteStatus AddStacker() { + // TODO(b/286250473): change back name to "Stacker" and remove namespace + return AddCustom("SignalStacker", tflite::tflm_signal::Register_STACKER()); + } + TfLiteStatus AddSub() { - return AddBuiltin(BuiltinOperator_SUB, tflite::ops::micro::Register_SUB(), - ParseSub); + return AddBuiltin(BuiltinOperator_SUB, tflite::Register_SUB(), ParseSub); } - TfLiteStatus AddSvdf() { - return AddBuiltin(BuiltinOperator_SVDF, Register_SVDF(), ParseSvdf); + TfLiteStatus AddSum() { + return AddBuiltin(BuiltinOperator_SUM, Register_SUM(), ParseReducer); + } + + TfLiteStatus AddSvdf(const TFLMRegistration& registration = Register_SVDF()) { + return AddBuiltin(BuiltinOperator_SVDF, registration, ParseSvdf); } TfLiteStatus AddTanh() { - return AddBuiltin(BuiltinOperator_TANH, tflite::ops::micro::Register_TANH(), - ParseTanh); + return AddBuiltin(BuiltinOperator_TANH, Register_TANH(), ParseTanh); + } + + TfLiteStatus AddTransposeConv( + const TFLMRegistration& registration = Register_TRANSPOSE_CONV()) { + return AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, registration, + ParseTransposeConv); + } + + TfLiteStatus AddTranspose() { + return AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE(), + ParseTranspose); } TfLiteStatus AddUnpack() { - return AddBuiltin(BuiltinOperator_UNPACK, - tflite::ops::micro::Register_UNPACK(), ParseUnpack); + return AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK(), ParseUnpack); + } + + TfLiteStatus AddUnidirectionalSequenceLSTM( + const TFLMRegistration& registration = + Register_UNIDIRECTIONAL_SEQUENCE_LSTM()) { + return AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, + registration, ParseUnidirectionalSequenceLSTM); + } + + TfLiteStatus AddVarHandle() { + return AddBuiltin(BuiltinOperator_VAR_HANDLE, Register_VAR_HANDLE(), + ParseVarHandle); + } + + TfLiteStatus AddWhile() { + return AddBuiltin(BuiltinOperator_WHILE, Register_WHILE(), ParseWhile); + } + + TfLiteStatus AddWindow() { + // TODO(b/286250473): change back name to "Window" and remove namespace + return AddCustom("SignalWindow", tflite::tflm_signal::Register_WINDOW()); + } + + TfLiteStatus AddZerosLike() { + return AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE(), + ParseZerosLike); } unsigned int GetRegistrationLength() { return registrations_len_; } private: - TF_LITE_REMOVE_VIRTUAL_DELETE - TfLiteStatus AddBuiltin(tflite::BuiltinOperator op, - const TfLiteRegistration& registration, - MicroOpResolver::BuiltinParseFunction parser) { + const TFLMRegistration& registration, + TfLiteBridgeBuiltinParseFunction parser) { if (op == BuiltinOperator_CUSTOM) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Invalid parameter BuiltinOperator_CUSTOM to the " - "AddBuiltin function."); - } + MicroPrintf("Invalid parameter BuiltinOperator_CUSTOM to the "); + MicroPrintf("AddBuiltin function."); return kTfLiteError; } if (FindOp(op) != nullptr) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Calling AddBuiltin with the same op more than " - "once is not supported (Op: #%d).", - op); - } + MicroPrintf("Calling AddBuiltin with the same op more than "); + MicroPrintf("once is not supported (Op: #%d).", op); return kTfLiteError; } if (registrations_len_ >= tOpCount) { - if (error_reporter_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Couldn't register builtin op #%d, resolver size " - "is too small (%d).", - op, tOpCount); - } + MicroPrintf("Couldn't register builtin op #%d, resolver size ", op); + MicroPrintf("is too small (%d).", tOpCount); return kTfLiteError; } @@ -468,16 +696,14 @@ class MicroMutableOpResolver : public MicroOpResolver { return kTfLiteOk; } - TfLiteRegistration registrations_[tOpCount]; + TFLMRegistration registrations_[tOpCount]; unsigned int registrations_len_ = 0; // Arrays (and counter) to store the builtin codes and their corresponding // parse functions as these are registered with the Op Resolver. BuiltinOperator builtin_codes_[tOpCount]; - MicroOpResolver::BuiltinParseFunction builtin_parsers_[tOpCount]; + TfLiteBridgeBuiltinParseFunction builtin_parsers_[tOpCount]; unsigned int num_buitin_ops_ = 0; - - ErrorReporter* error_reporter_; }; }; // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_op_resolver.cpp b/src/tensorflow/lite/micro/micro_op_resolver.cpp new file mode 100644 index 0000000..8d9603c --- /dev/null +++ b/src/tensorflow/lite/micro/micro_op_resolver.cpp @@ -0,0 +1,55 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/micro_op_resolver.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/schema/schema_utils.h" + +namespace tflite { + +TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, + const MicroOpResolver& op_resolver, + const TFLMRegistration** registration) { + TfLiteStatus status = kTfLiteOk; + *registration = nullptr; + auto builtin_code = GetBuiltinCode(opcode); + + if (builtin_code > BuiltinOperator_MAX) { + MicroPrintf("Op builtin_code out of range: %d.", builtin_code); + status = kTfLiteError; + } else if (builtin_code != BuiltinOperator_CUSTOM) { + *registration = op_resolver.FindOp(builtin_code); + if (*registration == nullptr) { + MicroPrintf("Didn't find op for builtin opcode '%s'", + EnumNameBuiltinOperator(builtin_code)); + status = kTfLiteError; + } + } else if (!opcode->custom_code()) { + MicroPrintf("Operator with CUSTOM builtin_code has no custom_code.\n"); + status = kTfLiteError; + } else { + const char* name = opcode->custom_code()->c_str(); + *registration = op_resolver.FindOp(name); + if (*registration == nullptr) { + // Do not report error for unresolved custom op, we do the final check + // while preparing ops. + status = kTfLiteError; + } + } + return status; +} +} // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_op_resolver.h b/src/tensorflow/lite/micro/micro_op_resolver.h index 757b6b8..e9aac38 100644 --- a/src/tensorflow/lite/micro/micro_op_resolver.h +++ b/src/tensorflow/lite/micro/micro_op_resolver.h @@ -16,9 +16,8 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/core/api/op_resolver.h" +#include "tensorflow/lite/micro/micro_common.h" +#include "tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -32,42 +31,32 @@ namespace tflite { // We need an interface class instead of directly using MicroMutableOpResolver // because MicroMutableOpResolver is a class template with the number of // registered Ops as the template parameter. -class MicroOpResolver : public OpResolver { +class MicroOpResolver { public: - typedef TfLiteStatus (*BuiltinParseFunction)(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - // Returns the Op registration struct corresponding to the enum code from the // flatbuffer schema. Returns nullptr if the op is not found or if op == // BuiltinOperator_CUSTOM. - virtual const TfLiteRegistration* FindOp(BuiltinOperator op) const = 0; + virtual const TFLMRegistration* FindOp(BuiltinOperator op) const = 0; // Returns the Op registration struct corresponding to the custom operator by // name. - virtual const TfLiteRegistration* FindOp(const char* op) const = 0; - - // This implementation exists for compatibility with the OpResolver base class - // and disregards the version parameter. - const TfLiteRegistration* FindOp(BuiltinOperator op, - int version) const final { - return FindOp(op); - } - - // This implementation exists for compatibility with the OpResolver base class - // and disregards the version parameter. - const TfLiteRegistration* FindOp(const char* op, int version) const final { - return FindOp(op); - } + virtual const TFLMRegistration* FindOp(const char* op) const = 0; // Returns the operator specific parsing function for the OpData for a // BuiltinOperator (if registered), else nullptr. - virtual BuiltinParseFunction GetOpDataParser(BuiltinOperator op) const = 0; + virtual TfLiteBridgeBuiltinParseFunction GetOpDataParser( + BuiltinOperator op) const = 0; - ~MicroOpResolver() override {} + virtual ~MicroOpResolver() {} }; +// Handles the logic for converting between an OperatorCode structure extracted +// from a flatbuffer and information about a registered operator +// implementation. +TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, + const MicroOpResolver& op_resolver, + const TFLMRegistration** registration); + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ diff --git a/src/tensorflow/lite/micro/micro_profiler.cpp b/src/tensorflow/lite/micro/micro_profiler.cpp index 83fb9f6..e349bf7 100644 --- a/src/tensorflow/lite/micro/micro_profiler.cpp +++ b/src/tensorflow/lite/micro/micro_profiler.cpp @@ -12,31 +12,121 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ - #include "tensorflow/lite/micro/micro_profiler.h" +#include +#include +#include + #include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/micro_log.h" #include "tensorflow/lite/micro/micro_time.h" namespace tflite { -MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter) - : reporter_(reporter) {} +uint32_t MicroProfiler::BeginEvent(const char* tag) { + if (num_events_ == kMaxEvents) { + MicroPrintf( + "MicroProfiler errored out because total number of events exceeded the " + "maximum of %d.", + kMaxEvents); + TFLITE_ASSERT_FALSE; + } -uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) { - start_time_ = GetCurrentTimeTicks(); - TFLITE_DCHECK(tag != nullptr); - event_tag_ = tag; - return 0; + tags_[num_events_] = tag; + start_ticks_[num_events_] = GetCurrentTimeTicks(); + end_ticks_[num_events_] = start_ticks_[num_events_] - 1; + return num_events_++; } void MicroProfiler::EndEvent(uint32_t event_handle) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - int32_t end_time = GetCurrentTimeTicks(); - TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_, - end_time - start_time_); + TFLITE_DCHECK(event_handle < kMaxEvents); + end_ticks_[event_handle] = GetCurrentTimeTicks(); +} + +uint32_t MicroProfiler::GetTotalTicks() const { + int32_t ticks = 0; + for (int i = 0; i < num_events_; ++i) { + ticks += end_ticks_[i] - start_ticks_[i]; + } + return ticks; +} + +void MicroProfiler::Log() const { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + for (int i = 0; i < num_events_; ++i) { + uint32_t ticks = end_ticks_[i] - start_ticks_[i]; + MicroPrintf("%s took %u ticks (%d ms).", tags_[i], ticks, TicksToMs(ticks)); + } +#endif +} + +void MicroProfiler::LogCsv() const { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + MicroPrintf("\"Event\",\"Tag\",\"Ticks\""); + for (int i = 0; i < num_events_; ++i) { +#if defined(HEXAGON) || defined(CMSIS_NN) + int ticks = end_ticks_[i] - start_ticks_[i]; + MicroPrintf("%d,%s,%d", i, tags_[i], ticks); +#else + uint32_t ticks = end_ticks_[i] - start_ticks_[i]; + MicroPrintf("%d,%s,%" PRIu32, i, tags_[i], ticks); +#endif + } #endif } + +void MicroProfiler::LogTicksPerTagCsv() { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + MicroPrintf( + "\"Unique Tag\",\"Total ticks across all events with that tag.\""); + int total_ticks = 0; + for (int i = 0; i < num_events_; ++i) { + uint32_t ticks = end_ticks_[i] - start_ticks_[i]; + TFLITE_DCHECK(tags_[i] != nullptr); + int position = FindExistingOrNextPosition(tags_[i]); + TFLITE_DCHECK(position >= 0); + total_ticks_per_tag_[position].tag = tags_[i]; + total_ticks_per_tag_[position].ticks = + total_ticks_per_tag_[position].ticks + ticks; + total_ticks += ticks; + } + + for (int i = 0; i < num_events_; ++i) { + TicksPerTag each_tag_entry = total_ticks_per_tag_[i]; + if (each_tag_entry.tag == nullptr) { + break; + } + MicroPrintf("%s, %d", each_tag_entry.tag, each_tag_entry.ticks); + } + MicroPrintf("\"total number of ticks\", %d", total_ticks); +#endif +} + +// This method finds a particular array element in the total_ticks_per_tag array +// with the matching tag_name passed in the method. If it can find a +// matching array element that has the same tag_name, then it will return the +// position of the matching element. But if it unable to find a matching element +// with the given tag_name, it will return the next available empty position +// from the array. +int MicroProfiler::FindExistingOrNextPosition(const char* tag_name) { + int pos = 0; + for (; pos < num_events_; pos++) { + TicksPerTag each_tag_entry = total_ticks_per_tag_[pos]; + if (each_tag_entry.tag == nullptr || + strcmp(each_tag_entry.tag, tag_name) == 0) { + return pos; + } + } + return pos < num_events_ ? pos : -1; +} + +void MicroProfiler::ClearEvents() { + for (int i = 0; i < num_events_; i++) { + total_ticks_per_tag_[i].tag = nullptr; + } + + num_events_ = 0; +} + } // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_profiler.h b/src/tensorflow/lite/micro/micro_profiler.h index a3144b3..fd8bc42 100644 --- a/src/tensorflow/lite/micro/micro_profiler.h +++ b/src/tensorflow/lite/micro/micro_profiler.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,9 +16,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ #define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/profiler.h" #include "tensorflow/lite/micro/compatibility.h" +#include "tensorflow/lite/micro/micro_profiler_interface.h" namespace tflite { @@ -26,45 +25,115 @@ namespace tflite { // performance. Bottleck operators can be identified along with slow code // sections. This can be used in conjunction with running the relevant micro // benchmark to evaluate end-to-end performance. +class MicroProfiler : public MicroProfilerInterface { + public: + MicroProfiler() = default; + virtual ~MicroProfiler() = default; + + // Marks the start of a new event and returns an event handle that can be used + // to mark the end of the event via EndEvent. The lifetime of the tag + // parameter must exceed that of the MicroProfiler. + virtual uint32_t BeginEvent(const char* tag) override; + + // Marks the end of an event associated with event_handle. It is the + // responsibility of the caller to ensure than EndEvent is called once and + // only once per event_handle. + // + // If EndEvent is called more than once for the same event_handle, the last + // call will be used as the end of event marker. If EndEvent is called 0 times + // for a particular event_handle, the duration of that event will be 0 ticks. + virtual void EndEvent(uint32_t event_handle) override; + + // Clears all the events that have been currently profiled. + void ClearEvents(); + + // Returns the sum of the ticks taken across all the events. This number + // is only meaningful if all of the events are disjoint (the end time of + // event[i] <= start time of event[i+1]). + uint32_t GetTotalTicks() const; + + // Prints the profiling information of each of the events in human readable + // form. + void Log() const; + + // Prints the profiling information of each of the events in CSV (Comma + // Separated Value) form. + void LogCsv() const; + + // Prints total ticks for each unique tag in CSV format. + // Output will have one row for each unique tag along with the + // total ticks summed across all events with that particular tag. + void LogTicksPerTagCsv(); + + private: + // Maximum number of events that this class can keep track of. The + // MicroProfiler will abort if AddEvent is called more than kMaxEvents number + // of times. Increase this number if you need more events. + static constexpr int kMaxEvents = 4096; + + const char* tags_[kMaxEvents]; + uint32_t start_ticks_[kMaxEvents]; + uint32_t end_ticks_[kMaxEvents]; + int num_events_ = 0; + + struct TicksPerTag { + const char* tag; + uint32_t ticks; + }; + // In practice, the number of tags will be much lower than the number of + // events. But it is theoretically possible that each event to be unique and + // hence we allow total_ticks_per_tag to have kMaxEvents entries. + TicksPerTag total_ticks_per_tag_[kMaxEvents] = {}; + + int FindExistingOrNextPosition(const char* tag_name); + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +#if defined(TF_LITE_STRIP_ERROR_STRINGS) +// For release builds, the ScopedMicroProfiler is a noop. // +// This is done because the ScipedProfiler is used as part of the +// MicroInterpreter and we want to ensure zero overhead for the release builds. +class ScopedMicroProfiler { + public: + explicit ScopedMicroProfiler(const char* tag, + MicroProfilerInterface* profiler) {} +}; + +#else + +// This class can be used to add events to a MicroProfiler object that span the +// lifetime of the ScopedMicroProfiler object. // Usage example: -// MicroProfiler profiler(error_reporter); +// +// MicroProfiler profiler(); +// ... // { -// ScopedProfile scoped_profile(profiler, tag); +// ScopedMicroProfiler scoped_profiler("custom_tag", profiler); // work_to_profile(); // } -// -// This will call the following methods in order: -// int event_handle = profiler->BeginEvent(op_name, EventType::DEFAULT, 0) -// work_to_profile(); -// profiler->EndEvent(event_handle) -class MicroProfiler : public tflite::Profiler { +class ScopedMicroProfiler { public: - explicit MicroProfiler(tflite::ErrorReporter* reporter); - ~MicroProfiler() override = default; + explicit ScopedMicroProfiler(const char* tag, + MicroProfilerInterface* profiler) + : profiler_(profiler) { + if (profiler_ != nullptr) { + event_handle_ = profiler_->BeginEvent(tag); + } + } - // AddEvent is unused for Tf Micro. - void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata1, - int64_t event_metadata2) override{}; - - // BeginEvent followed by code followed by EndEvent will profile the code - // enclosed. Multiple concurrent events are unsupported, so the return value - // is always 0. Event_metadata1 and event_metadata2 are unused. The tag - // pointer must be valid until EndEvent is called. - uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) override; - - // Event_handle is ignored since TF Micro does not support concurrent events. - void EndEvent(uint32_t event_handle) override; + ~ScopedMicroProfiler() { + if (profiler_ != nullptr) { + profiler_->EndEvent(event_handle_); + } + } private: - tflite::ErrorReporter* reporter_; - int32_t start_time_; - const char* event_tag_; - TF_LITE_REMOVE_VIRTUAL_DELETE + uint32_t event_handle_ = 0; + MicroProfilerInterface* profiler_ = nullptr; }; +#endif // !defined(TF_LITE_STRIP_ERROR_STRINGS) } // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_profiler_interface.h b/src/tensorflow/lite/micro/micro_profiler_interface.h new file mode 100644 index 0000000..f839a74 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_profiler_interface.h @@ -0,0 +1,38 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_ + +#include + +namespace tflite { + +// Interface class that the TFLM framework relies on for profiling. +class MicroProfilerInterface { + public: + virtual ~MicroProfilerInterface() {} + + // Marks the start of a new event and returns an event handle that can be used + // to mark the end of the event via EndEvent. + virtual uint32_t BeginEvent(const char* tag) = 0; + + // Marks the end of an event associated with event_handle. + virtual void EndEvent(uint32_t event_handle) = 0; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_ diff --git a/src/tensorflow/lite/micro/micro_resource_variable.cpp b/src/tensorflow/lite/micro/micro_resource_variable.cpp new file mode 100644 index 0000000..843aac6 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_resource_variable.cpp @@ -0,0 +1,159 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/micro_resource_variable.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace {} // namespace + +MicroResourceVariables* MicroResourceVariables::Create( + MicroAllocator* allocator, int max_num_variables) { + TFLITE_DCHECK(allocator != nullptr); + + uint8_t* allocator_buffer = static_cast( + allocator->AllocatePersistentBuffer(sizeof(MicroResourceVariables))); + MicroResourceVariable* variable_array = + static_cast(allocator->AllocatePersistentBuffer( + sizeof(MicroResourceVariable) * max_num_variables)); + MicroResourceVariables* variables = new (allocator_buffer) + MicroResourceVariables(variable_array, max_num_variables); + return variables; +} + +int MicroResourceVariables::CreateIdIfNoneFound(const char* container, + const char* shared_name) { + int resource_id = FindId(container, shared_name); + if (resource_id >= 0) { + return resource_id; + } + + // no existing variable found for the given container and shared name pair. + if (num_resource_variables_ >= max_variable_count_) { + MicroPrintf( + "Failed to allocate resource variable. Maximum resource variable count " + "(%d) " + "reached.", + max_variable_count_); + return -1; + } + + resource_id = num_resource_variables_++; + resource_variables_[resource_id].container = container; + resource_variables_[resource_id].shared_name = shared_name; + resource_variables_[resource_id].resource_buffer = nullptr; + resource_variables_[resource_id].bytes = 0; + resource_variables_[resource_id].default_value = 0; + return resource_id; +} + +TfLiteStatus MicroResourceVariables::Read(int id, + const TfLiteEvalTensor* tensor) { + if (id < 0 || id >= num_resource_variables_) { + MicroPrintf("Attempting to read non-existent resource variable %d", id); + return kTfLiteError; + } + MicroResourceVariable variable = resource_variables_[id]; + TFLITE_DCHECK(EvalTensorBytes(tensor) == variable.bytes); + TFLITE_DCHECK(variable.resource_buffer != nullptr); + memcpy(tensor->data.raw, variable.resource_buffer, variable.bytes); + return kTfLiteOk; +} + +TfLiteStatus MicroResourceVariables::Allocate(int id, TfLiteContext* context, + const TfLiteTensor* tensor) { + if (id < 0 || id >= num_resource_variables_) { + MicroPrintf("Attempting to read non-existent resource variable %d", id); + return kTfLiteError; + } + + MicroResourceVariable& variable = resource_variables_[id]; + + if (variable.resource_buffer == nullptr) { + variable.bytes = tensor->bytes; + variable.resource_buffer = + context->AllocatePersistentBuffer(context, tensor->bytes); + if (variable.resource_buffer == nullptr) { + MicroPrintf("Failed to allocate resource buffer."); + return kTfLiteError; + } + // Set resource buffers to the zero_point by default. Buffers can be + // initialized to nonzero values using ASSIGN_VARIABLE. + // See comment#2 in b/269648474 for more details why we use zero_point. + if (tensor->quantization.params != nullptr) { + auto* quantization_data = reinterpret_cast( + tensor->quantization.params); + int8_t zero_point = quantization_data->zero_point[0].data[0]; + variable.default_value = zero_point; + } + // TODO(b/269669735): Explains why casting zero_point to int8 and memset. + memset(variable.resource_buffer, variable.default_value, variable.bytes); + } + + return kTfLiteOk; +} + +TfLiteStatus MicroResourceVariables::Assign(int id, size_t count_bytes, + const void* input_buffer) { + if (id < 0 || id >= num_resource_variables_) { + MicroPrintf("Attempting to read non-existent resource variable %d", id); + return kTfLiteError; + } + MicroResourceVariable variable = resource_variables_[id]; + + if (variable.resource_buffer == nullptr) { + MicroPrintf( + "Attempting to assign from a TfLiteEvalTensor before the resource " + "buffer has been allocated. Make sure to call AssignResourceVariable " + "with a TfLiteTensor first."); + return kTfLiteError; + } + TFLITE_DCHECK(count_bytes == variable.bytes); + TFLITE_DCHECK(input_buffer != nullptr); + memcpy(variable.resource_buffer, input_buffer, variable.bytes); + return kTfLiteOk; +} + +TfLiteStatus MicroResourceVariables::ResetAll() { + for (int i = 0; i < num_resource_variables_; i++) { + MicroResourceVariable variable = resource_variables_[i]; + // TODO(b/269669735): Explains why casting zero_point to int8 and memset. + memset(variable.resource_buffer, variable.default_value, variable.bytes); + } + return kTfLiteOk; +} + +int MicroResourceVariables::FindId(const char* container, + const char* shared_name) { + for (int i = 0; i < num_resource_variables_; i++) { + // Some TFLite flatbuffers contain null container names to save space. + if ((container == nullptr || + !strcmp(container, resource_variables_[i].container)) && + !strcmp(shared_name, resource_variables_[i].shared_name)) { + return i; + } + } + return -1; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_resource_variable.h b/src/tensorflow/lite/micro/micro_resource_variable.h new file mode 100644 index 0000000..57da649 --- /dev/null +++ b/src/tensorflow/lite/micro/micro_resource_variable.h @@ -0,0 +1,89 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_ +#define TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_ + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_allocator.h" + +namespace tflite { + +class MicroResourceVariables { + public: + // Create + static MicroResourceVariables* Create(MicroAllocator* allocator, + int num_variables); + + // Creates a resource variable if none is available for the given container + // and shared name pair. Returns the resource ID corresponding to the + // container and shared name pair. If allocation fails, the returned resource + // ID will be negative. The the container and shared_name must outlive this + // class. + int CreateIdIfNoneFound(const char* container, const char* shared_name); + + // Read the resource buffer associated with the given ID into the given + // tensor. + TfLiteStatus Read(int id, const TfLiteEvalTensor* tensor); + + // Allocates the resource buffer if none has been allocated, based on the + // length of the input tensor. Copies input tensor contents to the resource + // buffer. + TfLiteStatus Allocate(int id, TfLiteContext* context, + const TfLiteTensor* tensor); + + // Copies input_buffer contents to the resource buffer. + // AllocateResourceVariable with a TFLite tensor must have been called first + // in order to allocate the resource buffer. + TfLiteStatus Assign(int id, size_t count_bytes, const void* input_buffer); + + // Zeros out all resource buffers. + TfLiteStatus ResetAll(); + + private: + int FindId(const char* container, const char* shared_name); + + // Micro resource contains the mapping between resource container/name strings + // and resouce IDs. Each resource ID corresponds to a resource buffer pointer. + // The resouce ID is created during the VAR_HANDLE operator preparation stage. + // The resource buffer pointer is created during ASSIGN_VARIABLE preparation + // stage based on the size of the TFLiteTensor being assigned. + struct MicroResourceVariable { + const char* container; + const char* shared_name; + void* resource_buffer; + + // This is only for verifying read size. + size_t bytes; + // Initialization default value + int8_t default_value; + }; + + MicroResourceVariables(MicroResourceVariable* variables, + int max_variable_count) + : resource_variables_(variables), + max_variable_count_(max_variable_count), + num_resource_variables_(0) {} + + MicroResourceVariable* resource_variables_; + int max_variable_count_; + int num_resource_variables_; +}; + +} // namespace tflite + +#endif // TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_ diff --git a/src/tensorflow/lite/micro/micro_string.cpp b/src/tensorflow/lite/micro/micro_string.cpp deleted file mode 100644 index ad769f6..0000000 --- a/src/tensorflow/lite/micro/micro_string.cpp +++ /dev/null @@ -1,309 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Implements debug logging for numbers by converting them into strings and then -// calling the main DebugLog(char*) function. These are separated into a -// different file so that platforms can just implement the string output version -// of DebugLog() and then get the numerical variations without requiring any -// more code. - -#include "tensorflow/lite/micro/micro_string.h" - -#include -#include -#include - -namespace { - -// Int formats can need up to 10 bytes for the value plus a single byte for the -// sign. -constexpr int kMaxIntCharsNeeded = 10 + 1; -// Hex formats can need up to 8 bytes for the value plus two bytes for the "0x". -constexpr int kMaxHexCharsNeeded = 8 + 2; - -// Float formats can need up to 7 bytes for the fraction plus 3 bytes for "x2^" -// plus 3 bytes for the exponent and a single sign bit. -constexpr float kMaxFloatCharsNeeded = 7 + 3 + 3 + 1; - -// All input buffers to the number conversion functions must be this long. -const int kFastToBufferSize = 48; - -// Reverses a zero-terminated string in-place. -char* ReverseStringInPlace(char* start, char* end) { - char* p1 = start; - char* p2 = end - 1; - while (p1 < p2) { - char tmp = *p1; - *p1++ = *p2; - *p2-- = tmp; - } - return start; -} - -// Appends a string to a string, in-place. You need to pass in the maximum -// string length as the second argument. -char* StrCatStr(char* main, int main_max_length, const char* to_append) { - char* current = main; - while (*current != 0) { - ++current; - } - char* current_end = main + (main_max_length - 1); - while ((*to_append != 0) && (current < current_end)) { - *current = *to_append; - ++current; - ++to_append; - } - *current = 0; - return current; -} - -// Populates the provided buffer with an ASCII representation of the number. -char* FastUInt32ToBufferLeft(uint32_t i, char* buffer, int base) { - char* start = buffer; - do { - int32_t digit = i % base; - char character; - if (digit < 10) { - character = '0' + digit; - } else { - character = 'a' + (digit - 10); - } - *buffer++ = character; - i /= base; - } while (i > 0); - *buffer = 0; - ReverseStringInPlace(start, buffer); - return buffer; -} - -// Populates the provided buffer with an ASCII representation of the number. -char* FastInt32ToBufferLeft(int32_t i, char* buffer) { - uint32_t u = i; - if (i < 0) { - *buffer++ = '-'; - u = -u; - } - return FastUInt32ToBufferLeft(u, buffer, 10); -} - -// Converts a number to a string and appends it to another. -char* StrCatInt32(char* main, int main_max_length, int32_t number) { - char number_string[kFastToBufferSize]; - FastInt32ToBufferLeft(number, number_string); - return StrCatStr(main, main_max_length, number_string); -} - -// Converts a number to a string and appends it to another. -char* StrCatUInt32(char* main, int main_max_length, uint32_t number, int base) { - char number_string[kFastToBufferSize]; - FastUInt32ToBufferLeft(number, number_string, base); - return StrCatStr(main, main_max_length, number_string); -} - -// Populates the provided buffer with ASCII representation of the float number. -// Avoids the use of any floating point instructions (since these aren't -// supported on many microcontrollers) and as a consequence prints values with -// power-of-two exponents. -char* FastFloatToBufferLeft(float f, char* buffer) { - char* current = buffer; - char* current_end = buffer + (kFastToBufferSize - 1); - // Access the bit fields of the floating point value to avoid requiring any - // float instructions. These constants are derived from IEEE 754. - const uint32_t sign_mask = 0x80000000; - const uint32_t exponent_mask = 0x7f800000; - const int32_t exponent_shift = 23; - const int32_t exponent_bias = 127; - const uint32_t fraction_mask = 0x007fffff; - uint32_t u; - memcpy(&u, &f, sizeof(int32_t)); - const int32_t exponent = - ((u & exponent_mask) >> exponent_shift) - exponent_bias; - const uint32_t fraction = (u & fraction_mask); - // Expect ~0x2B1B9D3 for fraction. - if (u & sign_mask) { - *current = '-'; - current += 1; - } - *current = 0; - // These are special cases for infinities and not-a-numbers. - if (exponent == 128) { - if (fraction == 0) { - current = StrCatStr(current, (current_end - current), "Inf"); - return current; - } else { - current = StrCatStr(current, (current_end - current), "NaN"); - return current; - } - } - // 0x007fffff (8388607) represents 0.99... for the fraction, so to print the - // correct decimal digits we need to scale our value before passing it to the - // conversion function. This scale should be 10000000/8388608 = 1.1920928955. - // We can approximate this using multiply-adds and right-shifts using the - // values in this array. The 1. portion of the number string is printed out - // in a fixed way before the fraction, below. - const int32_t scale_shifts_size = 13; - const int8_t scale_shifts[13] = {3, 4, 8, 11, 13, 14, 17, - 18, 19, 20, 21, 22, 23}; - uint32_t scaled_fraction = fraction; - for (int i = 0; i < scale_shifts_size; ++i) { - scaled_fraction += (fraction >> scale_shifts[i]); - } - *current = '1'; - current += 1; - *current = '.'; - current += 1; - *current = 0; - - // Prepend leading zeros to fill in all 7 bytes of the fraction. Truncate - // zeros off the end of the fraction. Every fractional value takes 7 bytes. - // For example, 2500 would be written into the buffer as 0002500 since it - // represents .00025. - constexpr int kMaxFractionalDigits = 7; - - // Abort early if there is not enough space in the buffer. - if (current_end - current <= kMaxFractionalDigits) { - return current; - } - - // Pre-fill buffer with zeros to ensure zero-truncation works properly. - for (int i = 1; i < kMaxFractionalDigits; i++) { - *(current + i) = '0'; - } - - // Track how large the fraction is to add leading zeros. - char* previous = current; - current = StrCatUInt32(current, (current_end - current), scaled_fraction, 10); - int fraction_digits = current - previous; - int leading_zeros = kMaxFractionalDigits - fraction_digits; - - // Overwrite the null terminator from StrCatUInt32 to ensure zero-trunctaion - // works properly. - *current = '0'; - - // Shift fraction values and prepend zeros if necessary. - if (leading_zeros != 0) { - for (int i = 0; i < fraction_digits; i++) { - current--; - *(current + leading_zeros) = *current; - *current = '0'; - } - current += kMaxFractionalDigits; - } - - // Truncate trailing zeros for cleaner logs. Ensure we leave at least one - // fractional character for the case when scaled_fraction is 0. - while (*(current - 1) == '0' && (current - 1) > previous) { - current--; - } - *current = 0; - current = StrCatStr(current, (current_end - current), "*2^"); - current = StrCatInt32(current, (current_end - current), exponent); - return current; -} - -int FormatInt32(char* output, int32_t i) { - return static_cast(FastInt32ToBufferLeft(i, output) - output); -} - -int FormatUInt32(char* output, uint32_t i) { - return static_cast(FastUInt32ToBufferLeft(i, output, 10) - output); -} - -int FormatHex(char* output, uint32_t i) { - return static_cast(FastUInt32ToBufferLeft(i, output, 16) - output); -} - -int FormatFloat(char* output, float i) { - return static_cast(FastFloatToBufferLeft(i, output) - output); -} - -} // namespace - -extern "C" int MicroVsnprintf(char* output, int len, const char* format, - va_list args) { - int output_index = 0; - const char* current = format; - // One extra character must be left for the null terminator. - const int usable_length = len - 1; - while (*current != '\0' && output_index < usable_length) { - if (*current == '%') { - current++; - switch (*current) { - case 'd': - // Cut off log message if format could exceed log buffer length. - if (usable_length - output_index < kMaxIntCharsNeeded) { - output[output_index++] = '\0'; - return output_index; - } - output_index += - FormatInt32(&output[output_index], va_arg(args, int32_t)); - current++; - break; - case 'u': - if (usable_length - output_index < kMaxIntCharsNeeded) { - output[output_index++] = '\0'; - return output_index; - } - output_index += - FormatUInt32(&output[output_index], va_arg(args, uint32_t)); - current++; - break; - case 'x': - if (usable_length - output_index < kMaxHexCharsNeeded) { - output[output_index++] = '\0'; - return output_index; - } - output[output_index++] = '0'; - output[output_index++] = 'x'; - output_index += - FormatHex(&output[output_index], va_arg(args, uint32_t)); - current++; - break; - case 'f': - if (usable_length - output_index < kMaxFloatCharsNeeded) { - output[output_index++] = '\0'; - return output_index; - } - output_index += - FormatFloat(&output[output_index], va_arg(args, double)); - current++; - break; - case '%': - output[output_index++] = *current++; - break; - case 's': - char* string = va_arg(args, char*); - int string_idx = 0; - while (string_idx + output_index < usable_length && - string[string_idx] != '\0') { - output[output_index++] = string[string_idx++]; - } - current++; - } - } else { - output[output_index++] = *current++; - } - } - output[output_index++] = '\0'; - return output_index; -} - -extern "C" int MicroSnprintf(char* output, int len, const char* format, ...) { - va_list args; - va_start(args, format); - int bytes_written = MicroVsnprintf(output, len, format, args); - va_end(args); - return bytes_written; -} diff --git a/src/tensorflow/lite/micro/micro_string.h b/src/tensorflow/lite/micro/micro_string.h deleted file mode 100644 index 59303e8..0000000 --- a/src/tensorflow/lite/micro/micro_string.h +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ - -#include - -// Implements simple string formatting for numeric types. Returns the number of -// bytes written to output. -extern "C" { -// Functionally equivalent to vsnprintf, trimmed down for TFLite Micro. -// MicroSnprintf() is implemented using MicroVsnprintf(). -int MicroVsnprintf(char* output, int len, const char* format, va_list args); -// Functionally equavalent to snprintf, trimmed down for TFLite Micro. -// For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string -// "int 10" in the buffer. -// Floating point values are logged in exponent notation (1.XXX*2^N). -int MicroSnprintf(char* output, int len, const char* format, ...); -} - -#endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ diff --git a/src/tensorflow/lite/micro/micro_time.cpp b/src/tensorflow/lite/micro/micro_time.cpp new file mode 100644 index 0000000..34298be --- /dev/null +++ b/src/tensorflow/lite/micro/micro_time.cpp @@ -0,0 +1,34 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Raspberry Pi Pico-specific implementation of timing functions. + +#include "tensorflow/lite/micro/micro_time.h" + +#include "tensorflow/lite/micro/debug_log.h" + +// These are headers from the RP2's SDK. +#include "hardware/timer.h" // NOLINT + +namespace tflite { +namespace { +// Pico's time_us_32() returns microseconds. +const uint32_t kClocksPerSecond = 1000000; +} // namespace + +uint32_t ticks_per_second() { return kClocksPerSecond; } + +uint32_t GetCurrentTimeTicks() { + return static_cast(time_us_32()); +} + +} // namespace tflite \ No newline at end of file diff --git a/src/tensorflow/lite/micro/micro_time.h b/src/tensorflow/lite/micro/micro_time.h index 465490a..5e8ad11 100644 --- a/src/tensorflow/lite/micro/micro_time.h +++ b/src/tensorflow/lite/micro/micro_time.h @@ -15,16 +15,24 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ -#include +#include namespace tflite { // These functions should be implemented by each target platform, and provide an // accurate tick count along with how many ticks there are per second. -int32_t ticks_per_second(); +uint32_t ticks_per_second(); // Return time in ticks. The meaning of a tick varies per platform. -int32_t GetCurrentTimeTicks(); +uint32_t GetCurrentTimeTicks(); + +inline uint32_t TicksToMs(int32_t ticks) { + uint32_t _ticks_per_second = ticks_per_second(); + _ticks_per_second = + _ticks_per_second > 0 ? _ticks_per_second : 1; // zero divide prevention + return static_cast(1000.0f * static_cast(ticks) / + static_cast(_ticks_per_second)); +} } // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_utils.cpp b/src/tensorflow/lite/micro/micro_utils.cpp index 9615236..7b0c9cf 100644 --- a/src/tensorflow/lite/micro/micro_utils.cpp +++ b/src/tensorflow/lite/micro/micro_utils.cpp @@ -20,7 +20,10 @@ limitations under the License. #include #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { @@ -32,17 +35,26 @@ int ElementCount(const TfLiteIntArray& dims) { return result; } -void SignedSymmetricPerChannelQuantize(const float* values, - TfLiteIntArray* dims, - int quantized_dimension, - int8_t* quantized_values, - float* scaling_factors) { +size_t EvalTensorBytes(const TfLiteEvalTensor* tensor) { + size_t bytes_per_element; + TFLITE_DCHECK(kTfLiteOk == + TfLiteTypeSizeOf(tensor->type, &bytes_per_element)); + return ElementCount(*tensor->dims) * bytes_per_element; +} + +void SignedSymmetricPerChannelQuantize( + const float* values, TfLiteIntArray* dims, int quantized_dimension, + int8_t* quantized_values, float* scaling_factors, TfLiteType type) { int input_size = ElementCount(*dims); int channel_count = dims->data[quantized_dimension]; int per_channel_size = input_size / channel_count; int stride; int channel_stride; + + int qmin = QMinFromTfLiteType(type); + int qmax = QMaxFromTfLiteType(type); + if (quantized_dimension == 0) { stride = 1; channel_stride = per_channel_size; @@ -50,7 +62,8 @@ void SignedSymmetricPerChannelQuantize(const float* values, stride = channel_count; channel_stride = 1; } else { - TF_LITE_FATAL("quantized dimension must be 0 or 3"); + MicroPrintf("quantized dimension must be 0 or 3"); + TFLITE_ABORT; } // Calculate scales for each channel. @@ -63,16 +76,13 @@ void SignedSymmetricPerChannelQuantize(const float* values, min = fminf(min, values[idx]); max = fmaxf(max, values[idx]); } - scaling_factors[channel] = - fmaxf(fabs(min), fabs(max)) / std::numeric_limits::max(); + scaling_factors[channel] = fmaxf(fabs(min), fabs(max)) / qmax; for (int i = 0; i < per_channel_size; i++) { int idx = channel * channel_stride + i * stride; const int32_t quantized_value = static_cast(roundf(values[idx] / scaling_factors[channel])); // Clamp: just in case some odd numeric offset. - quantized_values[idx] = - fminf(std::numeric_limits::max(), - fmaxf(std::numeric_limits::min() + 1, quantized_value)); + quantized_values[idx] = fminf(qmax, fmaxf(qmin + 1, quantized_value)); } } } diff --git a/src/tensorflow/lite/micro/micro_utils.h b/src/tensorflow/lite/micro/micro_utils.h index b9a3121..b362d34 100644 --- a/src/tensorflow/lite/micro/micro_utils.h +++ b/src/tensorflow/lite/micro/micro_utils.h @@ -19,6 +19,7 @@ limitations under the License. #include #include #include +#include #include "tensorflow/lite/c/common.h" @@ -28,6 +29,12 @@ namespace tflite { int ElementCount(const TfLiteIntArray& dims); +size_t EvalTensorBytes(const TfLiteEvalTensor* tensor); + +// C++11 does not support constexpr max; hence, use ternary conditional to +// create our own constexpr Max function. +constexpr int Max(int a, int b) { return a >= b ? a : b; } + // Converts a float value into a quantized value. Note that large values (close // to max int and min int) may see significant error due to a lack of floating // point granularity for large values. @@ -43,11 +50,13 @@ T FloatToQuantizedType(const float value, const float scale, int zero_point) { template T FloatToSymmetricQuantizedType(const float value, const float scale) { - int32_t result = round(value / scale); - result = - std::max(static_cast(std::numeric_limits::min() + 1), result); - result = - std::min(static_cast(std::numeric_limits::max()), result); + // 64-bit values are required since 8x16 conv accumulates to int64, meaning + // an int64 bias is required. + std::int64_t result = round(value / scale); + result = std::max( + static_cast(std::numeric_limits::min() + 1), result); + result = std::min(static_cast(std::numeric_limits::max()), + result); return result; } @@ -81,12 +90,19 @@ void SymmetricQuantize(const float* input, T* output, int num_elements, template void SymmetricPerChannelQuantize(const float* input, T* output, int num_elements, int num_channels, - float* scales) { + float* scales, + size_t quantized_dimension = 0) { int elements_per_channel = num_elements / num_channels; for (int i = 0; i < num_channels; i++) { for (int j = 0; j < elements_per_channel; j++) { - output[i * elements_per_channel + j] = FloatToSymmetricQuantizedType( - input[i * elements_per_channel + j], scales[i]); + size_t offset; + if (quantized_dimension == 0) { + offset = i * elements_per_channel + j; + } else { + offset = i + elements_per_channel * j; + } + output[offset] = + FloatToSymmetricQuantizedType(input[offset], scales[i]); } } } @@ -95,7 +111,8 @@ void SignedSymmetricPerChannelQuantize(const float* values, TfLiteIntArray* dims, int quantized_dimension, int8_t* quantized_values, - float* scaling_factor); + float* scaling_factor, + TfLiteType type = kTfLiteNoType); // Quantizes inputs based on the values provided, choosing the smallest range // which includes all input values. @@ -129,6 +146,24 @@ void Dequantize(const T* values, const int size, const float scale, } } +// based on TfLiteType passed in to these functions the corresponding max / min +// int for that type are returned +inline int QMinFromTfLiteType(TfLiteType type) { + if (type == kTfLiteInt4) { + return -8; + } else { + return std::numeric_limits::min(); + } +} + +inline int QMaxFromTfLiteType(TfLiteType type) { + if (type == kTfLiteInt4) { + return 7; + } else { + return std::numeric_limits::max(); + } +} + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ diff --git a/src/tensorflow/lite/micro/mock_micro_graph.cpp b/src/tensorflow/lite/micro/mock_micro_graph.cpp new file mode 100644 index 0000000..9c652fb --- /dev/null +++ b/src/tensorflow/lite/micro/mock_micro_graph.cpp @@ -0,0 +1,64 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/mock_micro_graph.h" + +#include "tensorflow/lite/micro/test_helpers.h" + +namespace tflite { + +MockMicroGraph::MockMicroGraph(SingleArenaBufferAllocator* allocator) + : allocator_(allocator), init_count_(0), prepare_count_(0), free_count_(0) { + memset(invoke_counts_, 0, sizeof(invoke_counts_)); + mock_tensor_ = + reinterpret_cast(allocator_->AllocatePersistentBuffer( + sizeof(TfLiteEvalTensor), alignof(TfLiteEvalTensor))); + int* dims_array = reinterpret_cast( + allocator_->AllocatePersistentBuffer(3 * sizeof(int), alignof(int))); + float* data_array = reinterpret_cast( + allocator_->AllocatePersistentBuffer(2 * sizeof(float), alignof(float))); + int dims[] = {2, 1, 2}; + memcpy(dims_array, dims, 3 * sizeof(int)); + mock_tensor_->dims = testing::IntArrayFromInts(dims_array); + mock_tensor_->data.f = data_array; + mock_tensor_->type = kTfLiteFloat32; +} + +TfLiteStatus MockMicroGraph::InvokeSubgraph(int subgraph_idx) { + invoke_counts_[subgraph_idx]++; + return kTfLiteOk; +} + +size_t MockMicroGraph::NumSubgraphInputs(int subgraph_idx) { return 1; } + +TfLiteEvalTensor* MockMicroGraph::GetSubgraphInput(int subgraph_idx, + int tensor_idx) { + return mock_tensor_; +} + +size_t MockMicroGraph::NumSubgraphOutputs(int subgraph_idx) { return 1; } + +TfLiteEvalTensor* MockMicroGraph::GetSubgraphOutput(int subgraph_idx, + int tensor_idx) { + return mock_tensor_; +} + +int MockMicroGraph::NumSubgraphs() { return kMaxSubgraphs; } + +MicroResourceVariables* MockMicroGraph::GetResourceVariables() { + return nullptr; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/mock_micro_graph.h b/src/tensorflow/lite/micro/mock_micro_graph.h new file mode 100644 index 0000000..745e8f0 --- /dev/null +++ b/src/tensorflow/lite/micro/mock_micro_graph.h @@ -0,0 +1,60 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_ +#define TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/micro_allocator.h" +#include "tensorflow/lite/micro/micro_graph.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +// MockMicroGraph stubs out all MicroGraph methods used during invoke. A count +// of the number of calls to invoke for each subgraph is maintained for +// validation of control flow operators. +class MockMicroGraph : public MicroGraph { + public: + explicit MockMicroGraph(SingleArenaBufferAllocator* allocator); + TfLiteStatus InvokeSubgraph(int subgraph_idx) override; + size_t NumSubgraphInputs(int subgraph_idx) override; + TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int tensor_idx) override; + size_t NumSubgraphOutputs(int subgraph_idx) override; + TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx, + int tensor_idx) override; + int NumSubgraphs() override; + MicroResourceVariables* GetResourceVariables() override; + int get_init_count() const { return init_count_; } + int get_prepare_count() const { return prepare_count_; } + int get_free_count() const { return free_count_; } + int get_invoke_count(int subgraph_idx) const { + return invoke_counts_[subgraph_idx]; + } + + private: + static constexpr int kMaxSubgraphs = 10; + SingleArenaBufferAllocator* allocator_; + TfLiteEvalTensor* mock_tensor_; + int init_count_; + int prepare_count_; + int free_count_; + int invoke_counts_[kMaxSubgraphs]; + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_ diff --git a/src/tensorflow/lite/micro/recording_micro_allocator.cpp b/src/tensorflow/lite/micro/recording_micro_allocator.cpp index 6bb5297..18addae 100644 --- a/src/tensorflow/lite/micro/recording_micro_allocator.cpp +++ b/src/tensorflow/lite/micro/recording_micro_allocator.cpp @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,33 +15,49 @@ limitations under the License. #include "tensorflow/lite/micro/recording_micro_allocator.h" -#include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h" #include "tensorflow/lite/micro/compatibility.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" #include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/recording_simple_memory_allocator.h" +#include "tensorflow/lite/micro/micro_log.h" namespace tflite { +size_t RecordingMicroAllocator::GetDefaultTailUsage() { + // RecordingMicroAllocator inherits from MicroAllocator and its tail usage is + // similar with MicroAllocator with SingleArenaBufferAllocator and + // MicroAllocator being replaced. + return MicroAllocator::GetDefaultTailUsage( + /*is_memory_planner_given=*/false) + + AlignSizeUp() - + AlignSizeUp() + + AlignSizeUp() - AlignSizeUp(); +} + RecordingMicroAllocator::RecordingMicroAllocator( - RecordingSimpleMemoryAllocator* recording_memory_allocator, - ErrorReporter* error_reporter) - : MicroAllocator(recording_memory_allocator, error_reporter), + RecordingSingleArenaBufferAllocator* recording_memory_allocator, + MicroMemoryPlanner* memory_planner) + : MicroAllocator(recording_memory_allocator, memory_planner), recording_memory_allocator_(recording_memory_allocator) {} -RecordingMicroAllocator* RecordingMicroAllocator::Create( - uint8_t* tensor_arena, size_t arena_size, ErrorReporter* error_reporter) { - TFLITE_DCHECK(error_reporter != nullptr); - - RecordingSimpleMemoryAllocator* simple_memory_allocator = - RecordingSimpleMemoryAllocator::Create(error_reporter, tensor_arena, - arena_size); +RecordingMicroAllocator* RecordingMicroAllocator::Create(uint8_t* tensor_arena, + size_t arena_size) { + RecordingSingleArenaBufferAllocator* simple_memory_allocator = + RecordingSingleArenaBufferAllocator::Create(tensor_arena, arena_size); TFLITE_DCHECK(simple_memory_allocator != nullptr); - uint8_t* allocator_buffer = simple_memory_allocator->AllocateFromTail( + uint8_t* memory_planner_buffer = + simple_memory_allocator->AllocatePersistentBuffer( + sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); + GreedyMemoryPlanner* memory_planner = + new (memory_planner_buffer) GreedyMemoryPlanner(); + + uint8_t* allocator_buffer = simple_memory_allocator->AllocatePersistentBuffer( sizeof(RecordingMicroAllocator), alignof(RecordingMicroAllocator)); RecordingMicroAllocator* allocator = new (allocator_buffer) - RecordingMicroAllocator(simple_memory_allocator, error_reporter); + RecordingMicroAllocator(simple_memory_allocator, memory_planner); return allocator; } @@ -62,30 +78,29 @@ RecordedAllocation RecordingMicroAllocator::GetRecordedAllocation( return recorded_node_and_registration_array_data_; case RecordedAllocationType::kOpData: return recorded_op_data_; +#ifdef USE_TFLM_COMPRESSION + case RecordedAllocationType::kCompressionData: + return recorded_compression_data_; +#endif // USE_TFLM_COMPRESSION + default: + break; } - TF_LITE_REPORT_ERROR(error_reporter(), "Invalid allocation type supplied: %d", - allocation_type); + MicroPrintf("Invalid allocation type supplied: %d", allocation_type); return RecordedAllocation(); } -const RecordingSimpleMemoryAllocator* +const RecordingSingleArenaBufferAllocator* RecordingMicroAllocator::GetSimpleMemoryAllocator() const { return recording_memory_allocator_; } void RecordingMicroAllocator::PrintAllocations() const { - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation total %d bytes", - recording_memory_allocator_->GetUsedBytes()); - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation head %d bytes", - recording_memory_allocator_->GetHeadUsedBytes()); - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation tail %d bytes", - recording_memory_allocator_->GetTailUsedBytes()); + MicroPrintf("[RecordingMicroAllocator] Arena allocation total %d bytes", + recording_memory_allocator_->GetUsedBytes()); + MicroPrintf("[RecordingMicroAllocator] Arena allocation head %d bytes", + recording_memory_allocator_->GetNonPersistentUsedBytes()); + MicroPrintf("[RecordingMicroAllocator] Arena allocation tail %d bytes", + recording_memory_allocator_->GetPersistentUsedBytes()); PrintRecordedAllocation(RecordedAllocationType::kTfLiteEvalTensorData, "TfLiteEvalTensor data", "allocations"); PrintRecordedAllocation(RecordedAllocationType::kPersistentTfLiteTensorData, @@ -103,6 +118,13 @@ void RecordingMicroAllocator::PrintAllocations() const { "NodeAndRegistration structs"); PrintRecordedAllocation(RecordedAllocationType::kOpData, "Operator runtime data", "OpData structs"); + +#ifdef USE_TFLM_COMPRESSION + + PrintRecordedAllocation(RecordedAllocationType::kCompressionData, + "Persistent compression data", "allocations"); + +#endif // USE_TFLM_COMPRESSION } void* RecordingMicroAllocator::AllocatePersistentBuffer(size_t bytes) { @@ -119,8 +141,7 @@ void RecordingMicroAllocator::PrintRecordedAllocation( #ifndef TF_LITE_STRIP_ERROR_STRINGS RecordedAllocation allocation = GetRecordedAllocation(allocation_type); if (allocation.used_bytes > 0 || allocation.requested_bytes > 0) { - TF_LITE_REPORT_ERROR( - error_reporter(), + MicroPrintf( "[RecordingMicroAllocator] '%s' used %d bytes with alignment overhead " "(requested %d bytes for %d %s)", allocation_name, allocation.used_bytes, allocation.requested_bytes, @@ -130,97 +151,111 @@ void RecordingMicroAllocator::PrintRecordedAllocation( } TfLiteStatus RecordingMicroAllocator::AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations) { + const Model* model, SubgraphAllocations* subgraph_allocations) { RecordedAllocation allocations = SnapshotAllocationUsage(); - TfLiteStatus status = MicroAllocator::AllocateNodeAndRegistrations( - model, node_and_registrations); + TfLiteStatus status = + MicroAllocator::AllocateNodeAndRegistrations(model, subgraph_allocations); RecordAllocationUsage(allocations, recorded_node_and_registration_array_data_); - // The allocation count in SimpleMemoryAllocator will only be 1. To provide - // better logging, decrement by 1 and add in the actual number of operators - // used in the graph: - // The allocation for this recording will always be 1. This is because the - // parent class mallocs one large allocation for the number of nodes in the - // graph (e.g. sizeof(NodeAndRegistration) * num_nodes). - // To prevent extra overhead and potential for fragmentation, manually adjust - // the accounting by decrementing by 1 and adding the actual number of nodes - // used in the graph: - recorded_node_and_registration_array_data_.count += - GetSubGraphFromModel(model)->operators()->size() - 1; - return status; -} - -TfLiteStatus -RecordingMicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = - MicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - model, op_resolver, node_and_registrations); - RecordAllocationUsage(allocations, recorded_op_data_); + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + // The allocation count in SingleArenaBufferAllocator will only be 1. To + // provide better logging, decrement by 1 and add in the actual number of + // operators used in the graph: The allocation for this recording will + // always be 1. This is because the parent class mallocs one large + // allocation for the number of nodes in the graph (e.g. + // sizeof(NodeAndRegistration) * num_nodes). To prevent extra overhead and + // potential for fragmentation, manually adjust the accounting by + // decrementing by 1 and adding the actual number of nodes used in the + // graph: + if (model->subgraphs()->Get(subgraph_idx)->operators()) { + recorded_node_and_registration_array_data_.count += + model->subgraphs()->Get(subgraph_idx)->operators()->size() - 1; + } else { + recorded_node_and_registration_array_data_.count -= 1; + } + } return status; } TfLiteStatus RecordingMicroAllocator::AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) { + const Model* model, SubgraphAllocations* subgraph_allocations) { RecordedAllocation allocations = SnapshotAllocationUsage(); TfLiteStatus status = - MicroAllocator::AllocateTfLiteEvalTensors(model, eval_tensors); + MicroAllocator::AllocateTfLiteEvalTensors(model, subgraph_allocations); RecordAllocationUsage(allocations, recorded_tflite_eval_tensor_data_); - // The allocation for this recording will always be 1. This is because the - // parent class mallocs one large allocation for the number of tensors in the - // graph (e.g. sizeof(TfLiteEvalTensor) * num_tensors). - // To prevent extra overhead and potential for fragmentation, manually adjust - // the accounting by decrementing by 1 and adding the actual number of tensors - // used in the graph: - recorded_tflite_eval_tensor_data_.count += - GetSubGraphFromModel(model)->tensors()->size() - 1; + + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + // The allocation for this recording will always be 1. This is because the + // parent class mallocs one large allocation for the number of tensors in + // the graph (e.g. sizeof(TfLiteEvalTensor) * num_tensors). To prevent extra + // overhead and potential for fragmentation, manually adjust the accounting + // by decrementing by 1 and adding the actual number of tensors used in the + // graph: + recorded_tflite_eval_tensor_data_.count += + model->subgraphs()->Get(subgraph_idx)->tensors()->size() - 1; + } return status; } TfLiteStatus RecordingMicroAllocator::AllocateVariables( - const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) { + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, + const int32_t* offline_planner_offsets) { RecordedAllocation allocations = SnapshotAllocationUsage(); - TfLiteStatus status = - MicroAllocator::AllocateVariables(subgraph, eval_tensors); + TfLiteStatus status = MicroAllocator::AllocateVariables( + subgraph, eval_tensors, offline_planner_offsets); RecordAllocationUsage(allocations, recorded_tflite_tensor_variable_buffer_data_); return status; } -TfLiteTensor* RecordingMicroAllocator::AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { +TfLiteTensor* +RecordingMicroAllocator::AllocatePersistentTfLiteTensorInternal() { RecordedAllocation allocations = SnapshotAllocationUsage(); - TfLiteTensor* result = MicroAllocator::AllocatePersistentTfLiteTensorInternal( - model, eval_tensors, tensor_index); + TfLiteTensor* result = + MicroAllocator::AllocatePersistentTfLiteTensorInternal(); RecordAllocationUsage(allocations, recorded_persistent_tflite_tensor_data_); return result; } TfLiteStatus RecordingMicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp) { + const Model* model, TfLiteTensor* tensor, int tensor_index, + int subgraph_index, bool allocate_temp) { RecordedAllocation allocations = SnapshotAllocationUsage(); TfLiteStatus status = MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - model, subgraph, tensor, tensor_index, allocate_temp); + model, tensor, tensor_index, subgraph_index, allocate_temp); RecordAllocationUsage(allocations, recorded_persistent_tflite_tensor_quantization_data_); return status; } +#ifdef USE_TFLM_COMPRESSION + +TfLiteStatus RecordingMicroAllocator::AllocateCompressedTensorsList( + const Model* model, SubgraphAllocations* subgraph_allocations) { + RecordedAllocation allocations = SnapshotAllocationUsage(); + + TfLiteStatus status = MicroAllocator::AllocateCompressedTensorsList( + model, subgraph_allocations); + + RecordAllocationUsage(allocations, recorded_compression_data_); + return status; +} + +#endif // USE_TFLM_COMPRESSION + RecordedAllocation RecordingMicroAllocator::SnapshotAllocationUsage() const { return {/*requested_bytes=*/recording_memory_allocator_->GetRequestedBytes(), /*used_bytes=*/recording_memory_allocator_->GetUsedBytes(), diff --git a/src/tensorflow/lite/micro/recording_micro_allocator.h b/src/tensorflow/lite/micro/recording_micro_allocator.h index 47246e1..80f1632 100644 --- a/src/tensorflow/lite/micro/recording_micro_allocator.h +++ b/src/tensorflow/lite/micro/recording_micro_allocator.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_RECORDING_MICRO_ALLOCATOR_H_ #define TENSORFLOW_LITE_MICRO_RECORDING_MICRO_ALLOCATOR_H_ +#include "tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h" #include "tensorflow/lite/micro/compatibility.h" #include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/recording_simple_memory_allocator.h" namespace tflite { @@ -33,6 +33,11 @@ enum class RecordedAllocationType { kTfLiteTensorVariableBufferData, kNodeAndRegistrationArray, kOpData, +#ifdef USE_TFLM_COMPRESSION + kCompressionData, +#endif // USE_TFLM_COMPRESSION + + kNumAllocationTypes, // must be last }; // Container for holding information about allocation recordings by a given @@ -48,21 +53,23 @@ struct RecordedAllocation { // Utility subclass of MicroAllocator that records all allocations // inside the arena. A summary of allocations can be logged through the // ErrorReporter by invoking LogAllocations(). This special allocator requires -// an instance of RecordingSimpleMemoryAllocator to capture allocations in the -// head and tail. Arena allocation recording can be retrieved by type through -// the GetRecordedAllocation() function. This class should only be used for -// auditing memory usage or integration testing. +// an instance of RecordingSingleArenaBufferAllocator to capture allocations in +// the head and tail. Arena allocation recording can be retrieved by type +// through the GetRecordedAllocation() function. This class should only be used +// for auditing memory usage or integration testing. class RecordingMicroAllocator : public MicroAllocator { public: static RecordingMicroAllocator* Create(uint8_t* tensor_arena, - size_t arena_size, - ErrorReporter* error_reporter); + size_t arena_size); + + // Returns the fixed amount of memory overhead of RecordingMicroAllocator. + static size_t GetDefaultTailUsage(); // Returns the recorded allocations information for a given allocation type. RecordedAllocation GetRecordedAllocation( RecordedAllocationType allocation_type) const; - const RecordingSimpleMemoryAllocator* GetSimpleMemoryAllocator() const; + const RecordingSingleArenaBufferAllocator* GetSimpleMemoryAllocator() const; // Logs out through the ErrorReporter all allocation recordings by type // defined in RecordedAllocationType. @@ -72,32 +79,35 @@ class RecordingMicroAllocator : public MicroAllocator { protected: TfLiteStatus AllocateNodeAndRegistrations( - const Model* model, - NodeAndRegistration** node_and_registrations) override; - TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) override; + const Model* model, SubgraphAllocations* subgraph_allocations) override; TfLiteStatus AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) override; - TfLiteStatus AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors) override; + const Model* model, SubgraphAllocations* subgraph_allocations) override; + TfLiteStatus AllocateVariables( + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, + const int32_t* offline_planner_offsets) override; // TODO(b/162311891): Once all kernels have been updated to the new API drop // this method. It is only used to record TfLiteTensor persistent allocations. - TfLiteTensor* AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, - int tensor_index) override; + TfLiteTensor* AllocatePersistentTfLiteTensorInternal() override; + // TODO(b/162311891): Once all kernels have been updated to the new API drop // this function since all allocations for quantized data will take place in // the temp section. TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(const Model* model, - const SubGraph* subgraph, TfLiteTensor* tensor, int tensor_index, + int subgraph_index, bool allocate_temp) override; +#ifdef USE_TFLM_COMPRESSION + + TfLiteStatus AllocateCompressedTensorsList( + const Model* model, SubgraphAllocations* subgraph_allocations) override; + +#endif // USE_TFLM_COMPRESSION + private: - RecordingMicroAllocator(RecordingSimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); + RecordingMicroAllocator(RecordingSingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner); void PrintRecordedAllocation(RecordedAllocationType allocation_type, const char* allocation_name, @@ -107,7 +117,7 @@ class RecordingMicroAllocator : public MicroAllocator { void RecordAllocationUsage(const RecordedAllocation& snapshotted_allocation, RecordedAllocation& recorded_allocation); - const RecordingSimpleMemoryAllocator* recording_memory_allocator_; + const RecordingSingleArenaBufferAllocator* recording_memory_allocator_; RecordedAllocation recorded_tflite_eval_tensor_data_ = {}; RecordedAllocation recorded_persistent_tflite_tensor_data_ = {}; @@ -115,6 +125,11 @@ class RecordingMicroAllocator : public MicroAllocator { RecordedAllocation recorded_persistent_buffer_data_ = {}; RecordedAllocation recorded_tflite_tensor_variable_buffer_data_ = {}; RecordedAllocation recorded_node_and_registration_array_data_ = {}; +#ifdef USE_TFLM_COMPRESSION + RecordedAllocation recorded_compression_data_ = {}; +#endif // USE_TFLM_COMPRESSION + + // TODO(b/187993291): Re-enable OpData allocating tracking. RecordedAllocation recorded_op_data_ = {}; TF_LITE_REMOVE_VIRTUAL_DELETE diff --git a/src/tensorflow/lite/micro/recording_micro_interpreter.h b/src/tensorflow/lite/micro/recording_micro_interpreter.h index 0a579b0..24987c2 100644 --- a/src/tensorflow/lite/micro/recording_micro_interpreter.h +++ b/src/tensorflow/lite/micro/recording_micro_interpreter.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_ #include "tensorflow/lite/micro/micro_interpreter.h" +#include "tensorflow/lite/micro/micro_profiler_interface.h" #include "tensorflow/lite/micro/recording_micro_allocator.h" namespace tflite { @@ -37,19 +38,22 @@ class RecordingMicroInterpreter : public MicroInterpreter { RecordingMicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, uint8_t* tensor_arena, size_t tensor_arena_size, - ErrorReporter* error_reporter) - : MicroInterpreter(model, op_resolver, - RecordingMicroAllocator::Create( - tensor_arena, tensor_arena_size, error_reporter), - error_reporter), + MicroResourceVariables* resource_variable = nullptr, + MicroProfilerInterface* profiler = nullptr) + : MicroInterpreter( + model, op_resolver, + RecordingMicroAllocator::Create(tensor_arena, tensor_arena_size), + resource_variable, profiler), recording_micro_allocator_( static_cast(allocator())) {} RecordingMicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, RecordingMicroAllocator* allocator, - ErrorReporter* error_reporter) - : MicroInterpreter(model, op_resolver, allocator, error_reporter), + MicroResourceVariables* resource_variable = nullptr, + MicroProfilerInterface* profiler = nullptr) + : MicroInterpreter(model, op_resolver, allocator, resource_variable, + profiler), recording_micro_allocator_(*allocator) {} const RecordingMicroAllocator& GetMicroAllocator() const { diff --git a/src/tensorflow/lite/micro/recording_simple_memory_allocator.cpp b/src/tensorflow/lite/micro/recording_simple_memory_allocator.cpp deleted file mode 100644 index ef30aca..0000000 --- a/src/tensorflow/lite/micro/recording_simple_memory_allocator.cpp +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/recording_simple_memory_allocator.h" - -#include - -#include "tensorflow/lite/kernels/internal/compatibility.h" - -namespace tflite { - -RecordingSimpleMemoryAllocator::RecordingSimpleMemoryAllocator( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) - : SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size), - requested_head_bytes_(0), - requested_tail_bytes_(0), - used_bytes_(0), - alloc_count_(0) {} - -RecordingSimpleMemoryAllocator::~RecordingSimpleMemoryAllocator() {} - -RecordingSimpleMemoryAllocator* RecordingSimpleMemoryAllocator::Create( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) { - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(buffer_head != nullptr); - RecordingSimpleMemoryAllocator tmp = - RecordingSimpleMemoryAllocator(error_reporter, buffer_head, buffer_size); - - uint8_t* allocator_buffer = - tmp.AllocateFromTail(sizeof(RecordingSimpleMemoryAllocator), - alignof(RecordingSimpleMemoryAllocator)); - // Use the default copy constructor to populate internal states. - return new (allocator_buffer) RecordingSimpleMemoryAllocator(tmp); -} - -size_t RecordingSimpleMemoryAllocator::GetRequestedBytes() const { - return requested_head_bytes_ + requested_tail_bytes_; -} - -size_t RecordingSimpleMemoryAllocator::GetUsedBytes() const { - return used_bytes_; -} - -size_t RecordingSimpleMemoryAllocator::GetAllocatedCount() const { - return alloc_count_; -} - -TfLiteStatus RecordingSimpleMemoryAllocator::SetHeadBufferSize( - size_t size, size_t alignment) { - const uint8_t* previous_head = head(); - TfLiteStatus status = - SimpleMemoryAllocator::SetHeadBufferSize(size, alignment); - if (status == kTfLiteOk) { - used_bytes_ += head() - previous_head; - requested_head_bytes_ = size; - } - return status; -} - -uint8_t* RecordingSimpleMemoryAllocator::AllocateFromTail(size_t size, - size_t alignment) { - const uint8_t* previous_tail = tail(); - uint8_t* result = SimpleMemoryAllocator::AllocateFromTail(size, alignment); - if (result != nullptr) { - used_bytes_ += previous_tail - tail(); - requested_tail_bytes_ += size; - alloc_count_++; - } - return result; -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/recording_simple_memory_allocator.h b/src/tensorflow/lite/micro/recording_simple_memory_allocator.h deleted file mode 100644 index 3526716..0000000 --- a/src/tensorflow/lite/micro/recording_simple_memory_allocator.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ - -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/simple_memory_allocator.h" - -namespace tflite { - -// Utility class used to log allocations of a SimpleMemoryAllocator. Should only -// be used in debug/evaluation settings or unit tests to evaluate allocation -// usage. -class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator { - public: - RecordingSimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer_head, size_t buffer_size); - // TODO(b/157615197): Cleanup constructors/destructor and use factory - // functions. - ~RecordingSimpleMemoryAllocator() override; - - static RecordingSimpleMemoryAllocator* Create(ErrorReporter* error_reporter, - uint8_t* buffer_head, - size_t buffer_size); - - // Returns the number of bytes requested from the head or tail. - size_t GetRequestedBytes() const; - - // Returns the number of bytes actually allocated from the head or tail. This - // value will be >= to the number of requested bytes due to padding and - // alignment. - size_t GetUsedBytes() const; - - // Returns the number of alloc calls from the head or tail. - size_t GetAllocatedCount() const; - - TfLiteStatus SetHeadBufferSize(size_t size, size_t alignment) override; - uint8_t* AllocateFromTail(size_t size, size_t alignment) override; - - private: - size_t requested_head_bytes_; - size_t requested_tail_bytes_; - size_t used_bytes_; - size_t alloc_count_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ diff --git a/src/tensorflow/lite/micro/rp2/debug_log.cpp b/src/tensorflow/lite/micro/rp2/debug_log.cpp deleted file mode 100644 index 18f1b8e..0000000 --- a/src/tensorflow/lite/micro/rp2/debug_log.cpp +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Reference implementation of the DebugLog() function that's required for a -// platform to support the TensorFlow Lite for Microcontrollers library. This is -// the only function that's absolutely required to be available on a target -// device, since it's used for communicating test results back to the host so -// that we can verify the implementation is working correctly. -// It's designed to be as easy as possible to supply an implementation though. -// On platforms that have a POSIX stack or C library, it can be written as a -// single call to `fprintf(stderr, "%s", s)` to output a string to the error -// stream of the console, but if there's no OS or C library available, there's -// almost always an equivalent way to write out a string to some serial -// interface that can be used instead. For example on Arm M-series MCUs, calling -// the `bkpt #0xAB` assembler instruction will output the string in r1 to -// whatever debug serial connection is available. If you're running mbed, you -// can do the same by creating `Serial pc(USBTX, USBRX)` and then calling -// `pc.printf("%s", s)`. -// To add an equivalent function for your own platform, create your own -// implementation file, and place it in a subfolder with named after the OS -// you're targeting. For example, see the Cortex M bare metal version in -// tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on -// tensorflow/lite/micro/mbed/debug_log.cc. - -#include "tensorflow/lite/micro/debug_log.h" - -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#include - -#include "pico/stdlib.h" -#endif - -extern "C" void DebugLog(const char* s) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - static bool has_uart_been_set_up = false; - if (!has_uart_been_set_up) { - setup_default_uart(); - has_uart_been_set_up = true; - } - // Reusing TF_LITE_STRIP_ERROR_STRINGS to disable DebugLog completely to get - // maximum reduction in binary size. This is because we have DebugLog calls - // via TF_LITE_CHECK that are not stubbed out by TF_LITE_REPORT_ERROR. - printf("%s", s); -#endif -} diff --git a/src/tensorflow/lite/micro/rp2/micro_time.cpp b/src/tensorflow/lite/micro/rp2/micro_time.cpp deleted file mode 100644 index ef580c0..0000000 --- a/src/tensorflow/lite/micro/rp2/micro_time.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Raspberry Pi Pico-specific implementation of timing functions. - -#include "tensorflow/lite/micro/micro_time.h" - -#include "tensorflow/lite/micro/debug_log.h" - -// These are headers from the RP2's SDK. -#include "hardware/timer.h" // NOLINT - -namespace tflite { -namespace { -// Pico's time_us_32() returns microseconds. -const int32_t kClocksPerSecond = 1000000; -} // namespace - -int32_t ticks_per_second() { return kClocksPerSecond; } - -int32_t GetCurrentTimeTicks() { - return static_cast(time_us_32()); -} - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/simple_memory_allocator.cpp b/src/tensorflow/lite/micro/simple_memory_allocator.cpp deleted file mode 100644 index 08b6789..0000000 --- a/src/tensorflow/lite/micro/simple_memory_allocator.cpp +++ /dev/null @@ -1,149 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/simple_memory_allocator.h" - -#include -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { - -SimpleMemoryAllocator::SimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer_head, - uint8_t* buffer_tail) - : error_reporter_(error_reporter), - buffer_head_(buffer_head), - buffer_tail_(buffer_tail), - head_(buffer_head), - tail_(buffer_tail), - temp_(buffer_head_) {} - -SimpleMemoryAllocator::SimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer, - size_t buffer_size) - : SimpleMemoryAllocator(error_reporter, buffer, buffer + buffer_size) {} - -/* static */ -SimpleMemoryAllocator* SimpleMemoryAllocator::Create( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) { - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(buffer_head != nullptr); - SimpleMemoryAllocator tmp = - SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size); - - // Allocate enough bytes from the buffer to create a SimpleMemoryAllocator. - // The new instance will use the current adjusted tail buffer from the tmp - // allocator instance. - uint8_t* allocator_buffer = tmp.AllocateFromTail( - sizeof(SimpleMemoryAllocator), alignof(SimpleMemoryAllocator)); - // Use the default copy constructor to populate internal states. - return new (allocator_buffer) SimpleMemoryAllocator(tmp); -} - -SimpleMemoryAllocator::~SimpleMemoryAllocator() {} - -TfLiteStatus SimpleMemoryAllocator::SetHeadBufferSize(size_t size, - size_t alignment) { - if (head_ != temp_) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Internal error: SetHeadBufferSize() needs to be called " - "after ResetTempAllocations()."); - return kTfLiteError; - } - - uint8_t* const aligned_result = AlignPointerUp(buffer_head_, alignment); - const size_t available_memory = tail_ - aligned_result; - if (available_memory < size) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to set head size. Requested: %u, available %u, missing: %u", - size, available_memory, size - available_memory); - return kTfLiteError; - } - head_ = aligned_result + size; - temp_ = head_; - - return kTfLiteOk; -} - -uint8_t* SimpleMemoryAllocator::AllocateFromTail(size_t size, - size_t alignment) { - uint8_t* const aligned_result = AlignPointerDown(tail_ - size, alignment); - if (aligned_result < head_) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - const size_t missing_memory = head_ - aligned_result; - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate tail memory. Requested: %u, " - "available %u, missing: %u", - size, size - missing_memory, missing_memory); -#endif - return nullptr; - } - tail_ = aligned_result; - return aligned_result; -} - -uint8_t* SimpleMemoryAllocator::AllocateTemp(size_t size, size_t alignment) { - uint8_t* const aligned_result = AlignPointerUp(temp_, alignment); - const size_t available_memory = tail_ - aligned_result; - if (available_memory < size) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate temp memory. Requested: %u, " - "available %u, missing: %u", - size, available_memory, size - available_memory); - return nullptr; - } - temp_ = aligned_result + size; - return aligned_result; -} - -void SimpleMemoryAllocator::ResetTempAllocations() { temp_ = head_; } - -uint8_t* SimpleMemoryAllocator::GetHeadBuffer() const { return buffer_head_; } - -size_t SimpleMemoryAllocator::GetHeadUsedBytes() const { - return head_ - buffer_head_; -} - -size_t SimpleMemoryAllocator::GetTailUsedBytes() const { - return buffer_tail_ - tail_; -} - -size_t SimpleMemoryAllocator::GetAvailableMemory(size_t alignment) const { - uint8_t* const aligned_temp = AlignPointerUp(temp_, alignment); - uint8_t* const aligned_tail = AlignPointerDown(tail_, alignment); - return aligned_tail - aligned_temp; -} - -size_t SimpleMemoryAllocator::GetUsedBytes() const { - return GetBufferSize() - (tail_ - temp_); -} - -size_t SimpleMemoryAllocator::GetBufferSize() const { - return buffer_tail_ - buffer_head_; -} - -uint8_t* SimpleMemoryAllocator::head() const { return head_; } - -uint8_t* SimpleMemoryAllocator::tail() const { return tail_; } - -} // namespace tflite diff --git a/src/tensorflow/lite/micro/simple_memory_allocator.h b/src/tensorflow/lite/micro/simple_memory_allocator.h deleted file mode 100644 index 35adaf1..0000000 --- a/src/tensorflow/lite/micro/simple_memory_allocator.h +++ /dev/null @@ -1,112 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/micro/compatibility.h" - -namespace tflite { - -// TODO(petewarden): This allocator never frees up or reuses any memory, even -// though we have enough information about lifetimes of the tensors to do so. -// This makes it pretty wasteful, so we should use a more intelligent method. -class SimpleMemoryAllocator { - public: - // TODO(b/157615197): Cleanup constructors/destructor and use factory - // functions. - SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer_head, - uint8_t* buffer_tail); - SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer, - size_t buffer_size); - virtual ~SimpleMemoryAllocator(); - - // Creates a new SimpleMemoryAllocator from a given buffer head and size. - static SimpleMemoryAllocator* Create(ErrorReporter* error_reporter, - uint8_t* buffer_head, - size_t buffer_size); - - // Adjusts the head (lowest address and moving upwards) memory allocation to a - // given size. Calls to this method will also invalidate all temporary - // allocation values (it sets the location of temp space at the end of the - // head section). This call will fail if a chain of allocations through - // AllocateTemp() have not been cleaned up with a call to - // ResetTempAllocations(). - virtual TfLiteStatus SetHeadBufferSize(size_t size, size_t alignment); - - // Allocates memory starting at the tail of the arena (highest address and - // moving downwards). - virtual uint8_t* AllocateFromTail(size_t size, size_t alignment); - - // Allocates a temporary buffer from the head of the arena (lowest address and - // moving upwards) but does not update the actual head allocation size or - // position. The returned buffer is guaranteed until either - // ResetTempAllocations() is called or another call to AllocateFromHead(). - // Repeat calls to this function will create a chain of temp allocations. All - // calls to AllocateTemp() must end with a call to ResetTempAllocations(). If - // AllocateFromHead() is called before a call to ResetTempAllocations(), it - // will fail with an error message. - virtual uint8_t* AllocateTemp(size_t size, size_t alignment); - - // Resets a chain of temporary allocations back to the current head of the - // arena (lowest address). - virtual void ResetTempAllocations(); - - // Returns a pointer to the buffer currently assigned to the head section. - // This buffer is set by calling SetHeadSize(). - uint8_t* GetHeadBuffer() const; - - // Returns the size of the head section in bytes. - size_t GetHeadUsedBytes() const; - - // Returns the size of all allocations in the tail section in bytes. - size_t GetTailUsedBytes() const; - - // Returns the number of bytes available with a given alignment. This number - // takes in account any temporary allocations. - size_t GetAvailableMemory(size_t alignment) const; - - // Returns the number of used bytes in the allocator. This number takes in - // account any temporary allocations. - size_t GetUsedBytes() const; - - protected: - // Returns a pointer to the current end of the head buffer. - uint8_t* head() const; - - // Returns a pointer to the current end of the tail buffer. - uint8_t* tail() const; - - private: - size_t GetBufferSize() const; - - ErrorReporter* error_reporter_; - uint8_t* buffer_head_; - uint8_t* buffer_tail_; - uint8_t* head_; - uint8_t* tail_; - uint8_t* temp_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ diff --git a/src/tensorflow/lite/micro/span.h b/src/tensorflow/lite/micro/span.h new file mode 100644 index 0000000..9399f1d --- /dev/null +++ b/src/tensorflow/lite/micro/span.h @@ -0,0 +1,69 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef TENSORFLOW_LITE_MICRO_SPAN_H_ +#define TENSORFLOW_LITE_MICRO_SPAN_H_ + +#include +#include + +namespace tflite { + +// A poor man's std::span, we should consider using the Pigweed span instead. +template +class Span { + public: + constexpr Span(T* data, size_t size) noexcept : data_(data), size_(size) {} + + template + constexpr Span(T (&data)[N]) noexcept : data_(data), size_(N) {} + + template + constexpr Span(std::array& array) noexcept + : data_(array.data()), size_(N) {} + + constexpr T& operator[](size_t idx) const noexcept { return *(data_ + idx); } + + constexpr T* data() const noexcept { return data_; } + constexpr size_t size() const noexcept { return size_; } + + private: + T* data_; + size_t size_; +}; + +template +bool operator==(const Span& a, const Span& b) { + if (a.size() != b.size()) { + return false; + } + + for (size_t i = 0; i < a.size(); ++i) { + if (a[i] != b[i]) { + return false; + } + } + + return true; +} + +template +bool operator!=(const Span& a, const Span& b) { + return !(a == b); +} + +} // end namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_SPAN_H_ diff --git a/src/tensorflow/lite/micro/static_vector.h b/src/tensorflow/lite/micro/static_vector.h new file mode 100644 index 0000000..d332093 --- /dev/null +++ b/src/tensorflow/lite/micro/static_vector.h @@ -0,0 +1,83 @@ +// Copyright 2024 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TENSORFLOW_LITE_MICRO_STATIC_VECTOR_H_ +#define TENSORFLOW_LITE_MICRO_STATIC_VECTOR_H_ + +#include +#include +#include + +#include "tensorflow/lite/kernels/op_macros.h" + +namespace tflite { + +template +class StaticVector { + // A staticlly-allocated vector. Add to the interface as needed. + + private: + std::array array_; + std::size_t size_{0}; + + public: + using iterator = typename decltype(array_)::iterator; + using const_iterator = typename decltype(array_)::const_iterator; + using pointer = typename decltype(array_)::pointer; + using reference = typename decltype(array_)::reference; + using const_reference = typename decltype(array_)::const_reference; + + StaticVector() {} + + StaticVector(std::initializer_list values) { + for (const T& v : values) { + push_back(v); + } + } + + static constexpr std::size_t max_size() { return MaxSize; } + std::size_t size() const { return size_; } + bool full() const { return size() == max_size(); } + iterator begin() { return array_.begin(); } + const_iterator begin() const { return array_.begin(); } + iterator end() { return begin() + size(); } + const_iterator end() const { return begin() + size(); } + pointer data() { return array_.data(); } + reference operator[](int i) { return array_[i]; } + const_reference operator[](int i) const { return array_[i]; } + void clear() { size_ = 0; } + + template + bool operator==(const StaticVector& other) const { + return std::equal(begin(), end(), other.begin(), other.end()); + } + + template + bool operator!=(const StaticVector& other) const { + return !(*this == other); + } + + void push_back(const T& t) { + TF_LITE_ASSERT(!full()); + *end() = t; + ++size_; + } +}; + +template +StaticVector(T, U...) -> StaticVector; + +} // end namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_STATIC_VECTOR_H_ diff --git a/src/tensorflow/lite/micro/system_setup.cpp b/src/tensorflow/lite/micro/system_setup.cpp new file mode 100644 index 0000000..81f6071 --- /dev/null +++ b/src/tensorflow/lite/micro/system_setup.cpp @@ -0,0 +1,67 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Reference implementation of the DebugLog() function that's required for a +// platform to support the TensorFlow Lite for Microcontrollers library. This is +// the only function that's absolutely required to be available on a target +// device, since it's used for communicating test results back to the host so +// that we can verify the implementation is working correctly. +// It's designed to be as easy as possible to supply an implementation though. +// On platforms that have a POSIX stack or C library, it can be written as a +// single call to `fprintf(stderr, "%s", s)` to output a string to the error +// stream of the console, but if there's no OS or C library available, there's +// almost always an equivalent way to write out a string to some serial +// interface that can be used instead. For example on Arm M-series MCUs, calling +// the `bkpt #0xAB` assembler instruction will output the string in r1 to +// whatever debug serial connection is available. If you're running mbed, you +// can do the same by creating `Serial pc(USBTX, USBRX)` and then calling +// `pc.printf("%s", s)`. +// To add an equivalent function for your own platform, create your own +// implementation file, and place it in a subfolder with named after the OS +// you're targeting. For example, see the Cortex M bare metal version in +// tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on +// tensorflow/lite/micro/mbed/debug_log.cc. + +#include "tensorflow/lite/micro/debug_log.h" + +#ifndef TF_LITE_STRIP_ERROR_STRINGS +#include + +#include "pico/stdlib.h" +#endif // TF_LITE_STRIP_ERROR_STRINGS + +namespace tflite { + +void InitializeTarget() { +#ifndef TF_LITE_STRIP_ERROR_STRINGS + stdio_init_all(); +#endif // TF_LITE_STRIP_ERROR_STRINGS +} + +} // namespace tflite + +extern "C" void DebugLog(const char* format, va_list args) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS + // Reusing TF_LITE_STRIP_ERROR_STRINGS to disable DebugLog completely to get + // maximum reduction in binary size. This is because we have DebugLog calls + // via TF_LITE_CHECK that are not stubbed out by TF_LITE_REPORT_ERROR. + vfprintf(stderr, format, args); +#endif +} + +#ifndef TF_LITE_STRIP_ERROR_STRINGS +// Only called from MicroVsnprintf (micro_log.h) +int DebugVsnprintf(char* buffer, size_t buf_size, const char* format, + va_list vlist) { + return vsnprintf(buffer, buf_size, format, vlist); +} +#endif diff --git a/src/tensorflow/lite/micro/system_setup.h b/src/tensorflow/lite/micro/system_setup.h new file mode 100644 index 0000000..ce49ddb --- /dev/null +++ b/src/tensorflow/lite/micro/system_setup.h @@ -0,0 +1,59 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_SYSTEM_SETUP_H_ +#define TENSORFLOW_LITE_MICRO_SYSTEM_SETUP_H_ + +#include +#include + +namespace tflite { + +// This should called during initialization of TFLM binaries and tests. It can +// be specialized if there is a need for custom target-specific intialization. +// For more information, see tensorflow/lite/micro/system_setup.cc. +void InitializeTarget(); + +} // namespace tflite + +namespace test_over_serial { + +// computed for Arduino Nano 33 BLE Sense +constexpr size_t kSerialMaxInputLength = (64); + +// Change baud rate on default serial port +void SerialChangeBaudRate(const int baud); + +// SerialReadLine +// Read a set of ASCII characters from the default +// serial port. Data is read up to the first newline ('\\n') character. +// This function uses an internal buffer which is automatically reset. +// The buffer will not contain the newline character. +// The buffer will be zero ('\\0') terminated. +// The value is in milliseconds. Any negative value means that +// the wait for data will be forever. +// Returns std::pair. +// The first pair element is the number of characters in buffer not including +// the newline character or zero terminator. +// Returns {0, NULL} if the timeout occurs. +std::pair SerialReadLine(int timeout); + +// SerialWrite +// Write the ASCII characters in to the default serial port. +// The must be zero terminated. +void SerialWrite(const char* buffer); + +} // namespace test_over_serial + +#endif // TENSORFLOW_LITE_MICRO_SYSTEM_SETUP_H_ diff --git a/src/tensorflow/lite/micro/test_helper_custom_ops.cpp b/src/tensorflow/lite/micro/test_helper_custom_ops.cpp new file mode 100644 index 0000000..9eac31c --- /dev/null +++ b/src/tensorflow/lite/micro/test_helper_custom_ops.cpp @@ -0,0 +1,297 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/test_helper_custom_ops.h" + +#include +#include +#include +#include +#include + +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/micro_utils.h" +#include "tensorflow/lite/schema/schema_generated.h" + +// TODO(b/170464050): Use TFLM test only version of schema_utils. + +namespace tflite { +namespace testing { + +namespace { + +template +void BroadcastAdd(const T input_scalar, const T* weights, T* output, + const size_t count) { + for (size_t i = 0; i < count; i++) { + output[i] = input_scalar + weights[i]; + } +} + +} // namespace + +const TFLMRegistration* PackerOp::getRegistration() { + return GetMutableRegistration(); +} + +TFLMRegistration* PackerOp::GetMutableRegistration() { + static TFLMRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + r.free = Free; + return &r; +} + +void* PackerOp::Init(TfLiteContext* context, const char* buffer, + size_t length) { + freed_ = false; + // Do nothing. + return nullptr; +} + +void PackerOp::Free(TfLiteContext* context, void* buffer) { freed_ = true; } + +TfLiteStatus PackerOp::Prepare(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +TfLiteStatus PackerOp::Invoke(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, 0); + TF_LITE_ENSURE(context, input1 != nullptr); + const int32_t* input1_data = input1->data.i32; + TF_LITE_ENSURE_EQ(context, input1->dims->size, 1); + const int32_t input1_len = input1->dims->data[0]; + + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, 1); + TF_LITE_ENSURE(context, input2 != nullptr); + const int32_t* input2_data = input2->data.i32; + TF_LITE_ENSURE_EQ(context, input2->dims->size, 1); + const int32_t input2_len = input2->dims->data[0]; + + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); + int32_t* output_data = output->data.i32; + int32_t output_len = output->dims->data[0]; + + // Fill output with input: first with the first tensor, then with the second + // tensor up to the size of the output tensor. + int cnt = 0; + int i; + for (i = 0; i < input1_len && cnt < output_len; i++, cnt++) { + output_data[cnt] = input1_data[i]; + } + if (cnt >= output_len) { + return kTfLiteOk; + } + + for (i = 0; i < input2_len && cnt < output_len; i++, cnt++) { + output_data[cnt] = input2_data[i]; + } + if (cnt >= output_len) { + return kTfLiteOk; + } + + for (; cnt < output_len; cnt++) { + output_data[cnt] = 0; + } + return kTfLiteOk; +} + +bool PackerOp::freed_ = false; + +const TFLMRegistration* BroadcastAddOp::getRegistration() { + return GetMutableRegistration(); +} + +TFLMRegistration* BroadcastAddOp::GetMutableRegistration() { + static TFLMRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + return &r; +} + +void* BroadcastAddOp::Init(TfLiteContext* context, const char* buffer, + size_t length) { +#ifdef USE_TFLM_COMPRESSION + + weight_scratch_index_ = -1; + +#endif // USE_TFLM_COMPRESSION + + // Do nothing. + return nullptr; +} + +TfLiteStatus BroadcastAddOp::Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* weights = micro_context->AllocateTempInputTensor(node, 1); + TF_LITE_ENSURE(context, weights != nullptr); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, weights->type); + TF_LITE_ENSURE( + context, input->type == kTfLiteFloat32 || input->type == kTfLiteInt8 || + input->type == kTfLiteInt16 || input->type == kTfLiteInt32 || + input->type == kTfLiteInt64); + TF_LITE_ENSURE(context, input->quantization.type == kTfLiteNoQuantization); + TF_LITE_ENSURE(context, weights->quantization.type == kTfLiteNoQuantization); + TF_LITE_ENSURE(context, output->quantization.type == kTfLiteNoQuantization); + TF_LITE_ENSURE(context, + ElementCount(*weights->dims) == ElementCount(*output->dims)); + TF_LITE_ENSURE(context, ElementCount(*input->dims) == 1); + TF_LITE_ENSURE(context, input->dims->size == 1); + TF_LITE_ENSURE(context, weights->dims->size == 1); + +#ifdef USE_TFLM_COMPRESSION + + // Compression scratch buffers. + // These will only be allocated if the tensor is compressed. + weight_scratch_index_ = + micro_context->AllocateDecompressionScratchBuffer(node, 1); + if (!micro_context->IsTensorCompressed(node, 1)) { + TF_LITE_ENSURE(context, weight_scratch_index_ == -1); + } + +#endif // USE_TFLM_COMPRESSION + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(weights); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus BroadcastAddOp::Invoke(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteEvalTensor* weights = + tflite::micro::GetEvalInput(context, node, 1); + TF_LITE_ENSURE(context, weights != nullptr); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); + +#ifdef USE_TFLM_COMPRESSION + + MicroContext* micro_context = GetMicroContext(context); + + const CompressionTensorData* weights_comp_td = + micro_context->GetTensorCompressionData(node, 1); + if (micro_context->IsTensorCompressed(node, 1)) { + TF_LITE_ENSURE(context, weights_comp_td != nullptr); + } else { + TF_LITE_ENSURE(context, weights_comp_td == nullptr); + } + +#endif // USE_TFLM_COMPRESSION + + switch (input->type) { + case kTfLiteFloat32: { + BroadcastAdd( + tflite::micro::GetTensorData(input)[0], +#ifdef USE_TFLM_COMPRESSION + tflite::micro::GetTensorData( + micro_context, weights, weights_comp_td, weight_scratch_index_), +#else // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(weights), +#endif // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(output), + ElementCount(*output->dims)); + } break; + + case kTfLiteInt8: { + BroadcastAdd( + tflite::micro::GetTensorData(input)[0], +#ifdef USE_TFLM_COMPRESSION + tflite::micro::GetTensorData( + micro_context, weights, weights_comp_td, weight_scratch_index_), +#else // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(weights), +#endif // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(output), + ElementCount(*output->dims)); + } break; + + case kTfLiteInt16: { + BroadcastAdd( + tflite::micro::GetTensorData(input)[0], +#ifdef USE_TFLM_COMPRESSION + tflite::micro::GetTensorData( + micro_context, weights, weights_comp_td, weight_scratch_index_), +#else // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(weights), +#endif // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(output), + ElementCount(*output->dims)); + } break; + + case kTfLiteInt32: { + BroadcastAdd( + tflite::micro::GetTensorData(input)[0], +#ifdef USE_TFLM_COMPRESSION + tflite::micro::GetTensorData( + micro_context, weights, weights_comp_td, weight_scratch_index_), +#else // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(weights), +#endif // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(output), + ElementCount(*output->dims)); + } break; + + case kTfLiteInt64: { + BroadcastAdd( + tflite::micro::GetTensorData(input)[0], +#ifdef USE_TFLM_COMPRESSION + tflite::micro::GetTensorData( + micro_context, weights, weights_comp_td, weight_scratch_index_), +#else // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(weights), +#endif // USE_TFLM_COMPRESSION + tflite::micro::GetTensorData(output), + ElementCount(*output->dims)); + } break; + + default: { + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + } + + return kTfLiteOk; +} + +#ifdef USE_TFLM_COMPRESSION + +int BroadcastAddOp::weight_scratch_index_ = -1; + +#endif // USE_TFLM_COMPRESSION + +} // namespace testing +} // namespace tflite diff --git a/src/tensorflow/lite/micro/test_helper_custom_ops.h b/src/tensorflow/lite/micro/test_helper_custom_ops.h new file mode 100644 index 0000000..b67a9fc --- /dev/null +++ b/src/tensorflow/lite/micro/test_helper_custom_ops.h @@ -0,0 +1,66 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_TEST_HELPER_CUSTOM_OPS_H_ +#define TENSORFLOW_LITE_MICRO_TEST_HELPER_CUSTOM_OPS_H_ + +#include +#include + +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/micro/micro_common.h" +#include "tensorflow/lite/micro/micro_utils.h" +#include "tensorflow/lite/portable_type_to_tflitetype.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +namespace testing { + +class PackerOp { + public: + static const TFLMRegistration* getRegistration(); + static TFLMRegistration* GetMutableRegistration(); + static void* Init(TfLiteContext* context, const char* buffer, size_t length); + static void Free(TfLiteContext* context, void* buffer); + static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); + static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); + + private: + static bool freed_; +}; + +// This op optionally supports compressed weights +class BroadcastAddOp { + public: + static const TFLMRegistration* getRegistration(); + static TFLMRegistration* GetMutableRegistration(); + static void* Init(TfLiteContext* context, const char* buffer, size_t length); + static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); + static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); + + private: +#ifdef USE_TFLM_COMPRESSION + + static int weight_scratch_index_; // decompression scratch buffer index + +#endif // USE_TFLM_COMPRESSION +}; + +} // namespace testing +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_TEST_HELPER_CUSTOM_OPS_H_ diff --git a/src/tensorflow/lite/micro/test_helpers.cpp b/src/tensorflow/lite/micro/test_helpers.cpp index f73073f..9e2267f 100644 --- a/src/tensorflow/lite/micro/test_helpers.cpp +++ b/src/tensorflow/lite/micro/test_helpers.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,22 +15,31 @@ limitations under the License. #include "tensorflow/lite/micro/test_helpers.h" +#include #include #include #include #include #include -#include "flatbuffers/flatbuffers.h" // from @flatbuffers +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/all_ops_resolver.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_arena_constants.h" #include "tensorflow/lite/micro/micro_utils.h" +#include "tensorflow/lite/micro/test_helper_custom_ops.h" #include "tensorflow/lite/schema/schema_generated.h" +#ifdef USE_TFLM_COMPRESSION + +#include "tensorflow/lite/micro/compression/metadata_saved.h" + +#endif // USE_TFLM_COMPRESSION + // TODO(b/170464050): Use TFLM test only version of schema_utils. namespace tflite { @@ -39,7 +48,9 @@ namespace { class StackAllocator : public flatbuffers::Allocator { public: - StackAllocator() : data_(data_backing_), data_size_(0) {} + StackAllocator(size_t alignment) : data_size_(0) { + data_ = AlignPointerUp(data_backing_, alignment); + } uint8_t* allocate(size_t size) override { TFLITE_DCHECK((data_size_ + size) <= kStackAllocatorSize); @@ -51,10 +62,10 @@ class StackAllocator : public flatbuffers::Allocator { void deallocate(uint8_t* p, size_t) override {} - static StackAllocator& instance() { + static StackAllocator& instance(size_t alignment = 1) { // Avoid using true dynamic memory allocation to be portable to bare metal. static char inst_memory[sizeof(StackAllocator)]; - static StackAllocator* inst = new (inst_memory) StackAllocator; + static StackAllocator* inst = new (inst_memory) StackAllocator(alignment); return *inst; } @@ -64,13 +75,16 @@ class StackAllocator : public flatbuffers::Allocator { uint8_t data_backing_[kStackAllocatorSize]; uint8_t* data_; int data_size_; + + TF_LITE_REMOVE_VIRTUAL_DELETE }; flatbuffers::FlatBufferBuilder* BuilderInstance() { static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)]; static flatbuffers::FlatBufferBuilder* inst = new (inst_memory) flatbuffers::FlatBufferBuilder( - StackAllocator::kStackAllocatorSize, &StackAllocator::instance()); + StackAllocator::kStackAllocatorSize, + &StackAllocator::instance(MicroArenaBufferAlignment())); return inst; } @@ -101,7 +115,9 @@ class ModelBuilder { // Adds a node to the model with given input and output Tensors. Node AddNode(Operator op, std::initializer_list inputs, - std::initializer_list outputs); + std::initializer_list outputs, + std::initializer_list intermediates = + std::initializer_list{}); void AddMetadata(const char* description_string, const int32_t* metadata_buffer_data, size_t num_elements); @@ -156,12 +172,17 @@ ModelBuilder::Operator ModelBuilder::RegisterOp(BuiltinOperator op, ModelBuilder::Node ModelBuilder::AddNode( ModelBuilder::Operator op, std::initializer_list inputs, - std::initializer_list outputs) { + std::initializer_list outputs, + std::initializer_list intermediates) { TFLITE_DCHECK(next_operator_id_ <= kMaxOperators); operators_[next_operator_id_] = tflite::CreateOperator( *builder_, op, builder_->CreateVector(inputs.begin(), inputs.size()), builder_->CreateVector(outputs.begin(), outputs.size()), - BuiltinOptions_NONE); + BuiltinOptions_NONE, + /*builtin_options=*/0, + /*custom_options=*/0, tflite::CustomOptionsFormat_FLEXBUFFERS, + /*mutating_variable_inputs =*/0, + builder_->CreateVector(intermediates.begin(), intermediates.size())); next_operator_id_++; return next_operator_id_ - 1; } @@ -195,7 +216,7 @@ const Model* ModelBuilder::BuildModel( buffers[i] = metadata_buffers_[i - 1]; } - // TFLM only supports single subgraph. + // Default to single subgraph model. constexpr size_t subgraphs_size = 1; // Find out number of subgraph inputs. @@ -222,7 +243,7 @@ const Model* ModelBuilder::BuildModel( *builder_, 0, builder_->CreateVector(operator_codes_, next_operator_code_id_), builder_->CreateVector(subgraphs, subgraphs_size), - builder_->CreateString("teset_model"), + builder_->CreateString("test_model"), builder_->CreateVector(buffers, buffer_size), 0, builder_->CreateVector(metadata_, ModelBuilder::nbr_of_metadata_buffers_)); @@ -231,7 +252,7 @@ const Model* ModelBuilder::BuildModel( *builder_, 0, builder_->CreateVector(operator_codes_, next_operator_code_id_), builder_->CreateVector(subgraphs, subgraphs_size), - builder_->CreateString("teset_model"), + builder_->CreateString("test_model"), builder_->CreateVector(buffers, buffer_size)); } @@ -261,13 +282,16 @@ const Model* BuildSimpleStatefulModel() { const int op_id = model_builder.RegisterOp(BuiltinOperator_CUSTOM, "simple_stateful_op"); - const int input_tensor = model_builder.AddTensor(TensorType_UINT8, {3}); - const int median_tensor = model_builder.AddTensor(TensorType_UINT8, {3}); + const int input_tensor = model_builder.AddTensor(TensorType_INT8, {3}); + const int median_tensor = model_builder.AddTensor(TensorType_INT8, {3}); const int invoke_count_tensor = model_builder.AddTensor(TensorType_INT32, {1}); + const int intermediate_tensor = + model_builder.AddTensor(TensorType_FLOAT32, {0}); model_builder.AddNode(op_id, {input_tensor}, - {median_tensor, invoke_count_tensor}); + {median_tensor, invoke_count_tensor}, + {intermediate_tensor}); return model_builder.BuildModel({input_tensor}, {median_tensor, invoke_count_tensor}); } @@ -341,6 +365,149 @@ const Model* BuildModelWithOfflinePlanning(int number_of_tensors, node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs); } +const Model* BuildModelWithUnusedInputs() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = {CreateBuffer(*builder)}; + constexpr size_t tensor_shape_size = 2; + const int32_t tensor_shape[tensor_shape_size] = {1, 64}; + constexpr size_t tensors_size = 4; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_input_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_unused_input_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_output_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_unused_tensor"), 0, false), + }; + constexpr size_t inputs_size = 2; + const int32_t inputs[inputs_size] = {0, 1}; + constexpr size_t outputs_size = 1; + const int32_t outputs[outputs_size] = {2}; + constexpr size_t operator_inputs_size = 1; + const int32_t operator_inputs[operator_inputs_size] = {0}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {2}; + constexpr size_t operators_size = 1; + const Offset operators[operators_size] = { + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "mock_custom", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildModelWithUnusedOperatorOutputs() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = {CreateBuffer(*builder)}; + constexpr size_t tensor_shape_size = 2; + const int32_t tensor_shape[tensor_shape_size] = {1, 64}; + constexpr size_t tensors_size = 2; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_input_tensor"), 0, false), + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_unused_output_tensor"), 0, false)}; + constexpr size_t inputs_size = 0; + const int32_t inputs[inputs_size] = {}; + constexpr size_t outputs_size = 1; + const int32_t outputs[outputs_size] = {0}; + constexpr size_t operator_inputs_size = 0; + const int32_t operator_inputs[operator_inputs_size] = {}; + constexpr size_t operator_outputs_size = 2; + const int32_t operator_outputs[operator_outputs_size] = {0, 1}; + constexpr size_t operators_size = 1; + const Offset operators[operators_size] = { + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "mock_custom", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildModelWith256x256Tensor() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); + + ModelBuilder model_builder(fb_builder); + + const int op_id = + model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); + const int input1_tensor = + model_builder.AddTensor(TensorType_INT8, {256, 256}); + const int input2_tensor = + model_builder.AddTensor(TensorType_INT8, {256, 256}); + const int output_tensor = + model_builder.AddTensor(TensorType_INT8, {256, 256}); + + model_builder.AddNode(op_id, {input1_tensor, input2_tensor}, {output_tensor}); + return model_builder.BuildModel({input1_tensor, input2_tensor}, + {output_tensor}); +} + const Model* BuildSimpleMockModel() { using flatbuffers::Offset; flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); @@ -362,7 +529,7 @@ const Model* BuildSimpleMockModel() { builder->CreateString("test_input_tensor"), 0, false), CreateTensor(*builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 1, + TensorType_INT8, 1, builder->CreateString("test_weight_tensor"), 0, false), CreateTensor(*builder, builder->CreateVector(tensor_shape, tensor_shape_size), @@ -418,6 +585,117 @@ const Model* BuildSimpleMockModel() { return model; } +#ifdef USE_TFLM_COMPRESSION + +const flatbuffers::span BuildLutMetadata( + uint32_t tensor_index, uint32_t value_table_buffer_index, + uint32_t bit_width) { + using flatbuffers::Offset; + namespace compression = tflite::micro::compression; + + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + auto lut_tensor = compression::CreateLutTensor( + *builder, tensor_index, value_table_buffer_index, bit_width); + auto subgraph = compression::CreateSubgraph( + *builder, builder->CreateVector(&lut_tensor, 1)); + constexpr uint32_t schema_version = 1; + auto metadata = compression::CreateMetadata( + *builder, schema_version, builder->CreateVector(&subgraph, 1)); + compression::FinishMetadataBuffer(*builder, metadata); + return builder->GetBufferSpan(); +} + +const Model* BuildSimpleMockModelCompressed() { + using flatbuffers::Offset; + using flatbuffers::Vector; + using tflite::micro::compression::LutTensor; + constexpr uint32_t kEmptyBuffer = 0; + constexpr uint32_t kMetadataBuffer = 1; + constexpr uint32_t kWeightsBuffer = 2; + constexpr uint32_t kValueTableBuffer = 3; + // constexpr uint32_t kInputTensor = 0; + constexpr uint32_t kWeightsTensor = 1; + // constexpr uint32_t kOutputTensor = 2; + constexpr uint32_t kCompressedBitWidth = 4; + + auto lut_tensors_span = + BuildLutMetadata(kWeightsTensor, kValueTableBuffer, kCompressedBitWidth); + + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + // [1, 2, 3, 4, 5, -1, -2, -3, -4, -5, 1, 2, 3, 4, 5] + const std::initializer_list weights_data = {0x01, 0x23, 0x45, 0x98, + 0x76, 0x01, 0x23, 0x40}; + const std::initializer_list value_table_data = {1, 2, 3, 4, 5, + -1, -5, -4, -3, -2}; + auto value_table_offset = builder->CreateVector(value_table_data).o; + const std::initializer_list> buffers = { + CreateBuffer(*builder), + CreateBuffer(*builder, builder->CreateVector(lut_tensors_span)), + CreateBuffer(*builder, builder->CreateVector(weights_data)), + CreateBuffer(*builder, Offset>(value_table_offset)), + }; + + const std::initializer_list input_shape = {1}; + const std::initializer_list weights_shape = {15}; + const std::initializer_list output_shape = weights_shape; + const std::initializer_list> tensors = { + CreateTensor(*builder, builder->CreateVector(input_shape), + TensorType_INT16, kEmptyBuffer, + builder->CreateString("test_input_tensor"), 0, false), + CreateTensor(*builder, builder->CreateVector(weights_shape), + TensorType_INT16, kWeightsBuffer, + builder->CreateString("test_weight_tensor"), 0, false), + CreateTensor(*builder, builder->CreateVector(output_shape), + TensorType_INT16, kEmptyBuffer, + builder->CreateString("test_output_tensor"), 0, false), + }; + + const std::initializer_list subgraph_inputs = {0}; + const std::initializer_list subgraph_outputs = {2}; + const std::initializer_list operator_inputs = {0, 1}; + const std::initializer_list operator_outputs = {2}; + const std::initializer_list> operators = { + CreateOperator(*builder, 0, builder->CreateVector(operator_inputs), + builder->CreateVector(operator_outputs), + BuiltinOptions_NONE), + }; + + const std::initializer_list> subgraphs = { + CreateSubGraph(*builder, builder->CreateVector(tensors), + builder->CreateVector(subgraph_inputs), + builder->CreateVector(subgraph_outputs), + builder->CreateVector(operators), + builder->CreateString("test_subgraph")), + }; + + const std::initializer_list> operator_codes = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "broadcast_add_op", + /*version=*/0, BuiltinOperator_CUSTOM), + }; + + const std::initializer_list> metadata = { + CreateMetadata(*builder, + builder->CreateString(kCompressionMetadataString), + kMetadataBuffer), + }; + + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes), + builder->CreateVector(subgraphs), builder->CreateString("test_model"), + builder->CreateVector(buffers), 0, builder->CreateVector(metadata)); + + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + + return model; +} + +#endif // USE_TFLM_COMPRESSION + const Model* BuildComplexMockModel() { using flatbuffers::Offset; flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); @@ -458,7 +736,7 @@ const Model* BuildComplexMockModel() { 0, true /* is_variable */), CreateTensor( *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_1"), 0, + TensorType_INT8, 2, builder->CreateString("test_weight_tensor_1"), 0, false /* is_variable */), // Op 1 output / Op 2 input: CreateTensor( @@ -472,7 +750,7 @@ const Model* BuildComplexMockModel() { 0, true /* is_variable */), CreateTensor( *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_2"), 0, + TensorType_INT8, 2, builder->CreateString("test_weight_tensor_2"), 0, false /* is_variable */), // Op 2 output / Op 3 input: CreateTensor( @@ -486,7 +764,7 @@ const Model* BuildComplexMockModel() { 0, true /* is_variable */), CreateTensor( *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_3"), 0, + TensorType_INT8, 2, builder->CreateString("test_weight_tensor_3"), 0, false /* is_variable */), // Op 3 output: CreateTensor( @@ -638,14 +916,644 @@ const Model* BuildSimpleMultipleInputsModel() { return model; } +const Model* BuildSimpleModelWithSubgraphsAndIf() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + const int32_t condition_tensor_shape[] = {1}; + const int32_t data_tensor_shape[] = {1, 2}; + constexpr size_t tensors_size = 4; + const Offset subgraph1_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(condition_tensor_shape, 1), + TensorType_BOOL, 0, + builder->CreateString("condition tensor"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + const Offset subgraph2_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + const Offset subgraph3_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + + constexpr size_t if_inputs_size = 3; + const int32_t if_inputs[if_inputs_size] = {0, 1, 2}; + constexpr size_t outputs_size = 1; + const int32_t if_outputs[outputs_size] = {3}; + constexpr size_t operator_inputs_size = 2; + const int32_t operator_inputs[operator_inputs_size] = {0, 1}; + const int32_t operator_outputs[outputs_size] = {2}; + constexpr size_t operators_size = 1; + const Offset subgraph1_operators[operators_size] = { + CreateOperator( + *builder, 0, builder->CreateVector(if_inputs, if_inputs_size), + builder->CreateVector(if_outputs, outputs_size), + BuiltinOptions_IfOptions, CreateIfOptions(*builder, 1, 2).Union()), + }; + const Offset subgraph2_operators[operators_size] = { + CreateOperator( + *builder, 1, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + BuiltinOptions_NONE), + }; + const Offset subgraph3_operators[operators_size] = { + CreateOperator( + *builder, 2, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 3; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(subgraph1_tensors, 4), + builder->CreateVector(if_inputs, if_inputs_size), + builder->CreateVector(if_outputs, outputs_size), + builder->CreateVector(subgraph1_operators, operators_size), + builder->CreateString("if_subgraph")), + CreateSubGraph( + *builder, builder->CreateVector(subgraph2_tensors, 3), + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + builder->CreateVector(subgraph2_operators, operators_size), + builder->CreateString("then_subgraph")), + CreateSubGraph( + *builder, builder->CreateVector(subgraph3_tensors, 3), + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + builder->CreateVector(subgraph3_operators, operators_size), + builder->CreateString("else_subgraph")), + }; + constexpr size_t operator_codes_size = 3; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_IF), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_ADD), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_MUL), + }; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildSimpleModelWithIfAndEmptySubgraph() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + const int32_t condition_tensor_shape[] = {1}; + const int32_t data_tensor_shape[] = {1, 2}; + constexpr size_t tensors_size = 4; + const Offset subgraph1_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(condition_tensor_shape, 1), + TensorType_BOOL, 0, + builder->CreateString("condition tensor"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + const Offset subgraph2_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + const Offset subgraph3_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + + constexpr size_t if_inputs_size = 3; + const int32_t if_inputs[if_inputs_size] = {0, 1, 2}; + constexpr size_t outputs_size = 1; + const int32_t if_outputs[outputs_size] = {3}; + constexpr size_t operator_inputs_size = 2; + const int32_t operator_inputs[operator_inputs_size] = {0, 1}; + const int32_t operator_outputs[outputs_size] = {2}; + constexpr size_t operators_size = 1; + const Offset subgraph1_operators[operators_size] = { + CreateOperator( + *builder, 0, builder->CreateVector(if_inputs, if_inputs_size), + builder->CreateVector(if_outputs, outputs_size), + BuiltinOptions_IfOptions, CreateIfOptions(*builder, 1, 2).Union()), + }; + const Offset subgraph2_operators[operators_size] = { + CreateOperator( + *builder, 1, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 3; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(subgraph1_tensors, 4), + builder->CreateVector(if_inputs, if_inputs_size), + builder->CreateVector(if_outputs, outputs_size), + builder->CreateVector(subgraph1_operators, operators_size), + builder->CreateString("if_subgraph")), + CreateSubGraph( + *builder, builder->CreateVector(subgraph2_tensors, 3), + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + builder->CreateVector(subgraph2_operators, operators_size), + builder->CreateString("then_subgraph")), + CreateSubGraph( + *builder, builder->CreateVector(subgraph3_tensors, 3), + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), 0, + builder->CreateString("else_subgraph")), + }; + constexpr size_t operator_codes_size = 3; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_IF), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_ADD), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_MUL), + }; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildSimpleModelWithSubgraphsAndWhile() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + const int32_t data_tensor_shape[] = {1, 1}; + constexpr size_t while_tensors_size = 4; + constexpr size_t op_tensors_size = 3; + const Offset subgraph0_tensors[while_tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor0"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor0"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor1"), 0, false), + }; + const Offset subgraph1_tensors[op_tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_BOOL, 0, + builder->CreateString("condition_tensor"), 0, false), + }; + const Offset subgraph2_tensors[op_tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor0"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor0"), 0, false), + }; + + constexpr size_t inputs_size = 2; + const int32_t inputs[inputs_size] = {0, 1}; + constexpr size_t while_outputs_size = 2; + const int32_t while_outputs[while_outputs_size] = {2, 3}; + constexpr size_t cond_outputs_size = 1; + const int32_t cond_outputs[cond_outputs_size] = {2}; + constexpr size_t add_outputs_size = 1; + const int32_t add_outputs[add_outputs_size] = {2}; + constexpr size_t add_subgraph_outputs_size = 2; + const int32_t add_subgraph_outputs[add_subgraph_outputs_size] = {2, 1}; + constexpr size_t operators_size = 1; + const Offset subgraph0_operators[operators_size] = { + CreateOperator(*builder, 0, builder->CreateVector(inputs, inputs_size), + builder->CreateVector(while_outputs, while_outputs_size), + BuiltinOptions_WhileOptions, + CreateWhileOptions(*builder, 1, 2).Union()), + }; + const Offset subgraph1_operators[operators_size] = { + CreateOperator(*builder, 1, builder->CreateVector(inputs, inputs_size), + builder->CreateVector(cond_outputs, cond_outputs_size), + BuiltinOptions_NONE), + }; + const Offset subgraph2_operators[operators_size] = { + CreateOperator(*builder, 2, builder->CreateVector(inputs, inputs_size), + builder->CreateVector(add_outputs, add_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 3; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(subgraph0_tensors, 4), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(while_outputs, while_outputs_size), + builder->CreateVector(subgraph0_operators, operators_size), + builder->CreateString("while_subgraph")), + CreateSubGraph(*builder, builder->CreateVector(subgraph1_tensors, 3), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(cond_outputs, cond_outputs_size), + builder->CreateVector(subgraph1_operators, operators_size), + builder->CreateString("cond_subgraph")), + CreateSubGraph(*builder, builder->CreateVector(subgraph2_tensors, 3), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(add_subgraph_outputs, + add_subgraph_outputs_size), + builder->CreateVector(subgraph2_operators, operators_size), + builder->CreateString("body_subgraph")), + }; + constexpr size_t operator_codes_size = 3; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_WHILE), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_LESS), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_ADD), + }; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +// Build a model with If and two subgraphs: two data tensors A1 of size 2, A2 of +// size 4 are first concatenated, then cut to a new tensor A3 of size 3; the new +// tensor A3 of size 3 is then concatenated with A2 tensor of size 4 to produce +// a final output tensor A4. This model is specially crafted to capture the +// corner case outlined in go/avoid-memory-corruption-in-if-operator. +// +// Subgraph0 +// A0(1) A2_0(4) A1_0(2) +// | | | ---+ +// v v v | +// +--------------+ | +// | IF | | +// +------+-------+ | +// | A3_0(3) | +// v | +// +--------------+ | +// | CUSTOM |<---+ +// +------+-------+ +// | +// v +// A4_0(8) +// +// Subgraph1/2 +// A1_1(2) A2_1(4) +// | | +// v v +// +---------------+ +// | CUSTOM | +// +-------+-------+ +// | +// v A3_1(3) +// +// And it leads to memory plan as below +// +// Subgraph0 Layout +// +// +// <------------A4_0 -------------> <----- A2_0-------> <----A3_0 ---> +// +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+ +// | | | | | | | | | 3 | 4 | 5 | 6 | | | | +// +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+ +// +// +----+----+----+ +// | 1 | 2 | A0 | +// +----+----+----+ +// <---A1_0--> +// +// Subgraph 1 Layout +// +// +----+----+----+----+----+----+----+----+----+ +// | | | | | | | | | | +// +----+----+----+----+----+----+----+----+----+ +// +// +// <------A2_1 -------><----A3_1 ---><--A1_1---> +// +// +// A1_1 of subgraph 1 will overlap with A2_0 of subgraph 0. +// In a buggy implementation of IF, two overwrite may happen: +// 1. copying input from A1_0 to A1_1 overwrites A2_0 before A2_0 is copied to +// A2_1; thus subgraph 1 produce incorrect output. +// 2. copying output from A3_1 to A4_0 overwrites A1_0, which should remain +// intact so that it can be used by the OP after the IF operator in subgraph 0 +// + +const Model* BuildModelWithIfAndSubgraphInputTensorOverlap() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr TensorType kTensorType = TensorType_INT32; + constexpr int kBlockSize = + tflite::MicroArenaBufferAlignment() / sizeof(int32_t); + constexpr size_t kBuffersCount = 1; + const Offset buffers[kBuffersCount] = { + CreateBuffer(*builder), + }; + const int32_t kConditionTensorShape[] = {1}; + const int32_t kIfInput1TensorShape[] = {2 * kBlockSize}; + const int32_t kIfInput2TensorShape[] = {4 * kBlockSize}; + const int32_t kIfOutputTensorShape[] = {3 * kBlockSize}; + const int32_t kFinalOutputTensorShape[] = {8 * kBlockSize}; + constexpr size_t kSubgraph0TensorsCount = 5; + const Offset kSubgraph0Tensors[kSubgraph0TensorsCount] = { + CreateTensor(*builder, builder->CreateVector(kConditionTensorShape, 1), + TensorType_BOOL, 0, + builder->CreateString("condition tensor"), 0, false), + CreateTensor(*builder, builder->CreateVector(kIfInput1TensorShape, 1), + kTensorType, 0, builder->CreateString("if_input_tensor1"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kIfInput2TensorShape, 1), + kTensorType, 0, builder->CreateString("if_input_tensor2"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kIfOutputTensorShape, 1), + kTensorType, 0, builder->CreateString("if_output_tensor"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kFinalOutputTensorShape, 1), + kTensorType, 0, builder->CreateString("final_output_tensor"), + 0, false), + }; + + // Subgraph 1 is the chosen path if condition tensor in IF is true. + constexpr size_t kSubgraph1TensorsCount = 3; + const Offset kSubgraph1Tensors[kSubgraph1TensorsCount] = { + CreateTensor(*builder, builder->CreateVector(kIfInput1TensorShape, 1), + kTensorType, 0, + builder->CreateString("subgraph1_input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(kIfInput2TensorShape, 1), + kTensorType, 0, + builder->CreateString("subgraph1_input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(kIfOutputTensorShape, 1), + kTensorType, 0, + builder->CreateString("subgraph1_output_tensor"), 0, false), + }; + + // Subgraph 2 is the chosen path if condition tensor in IF is false + constexpr size_t kSubgraph2TensorsCount = 3; + const Offset kSubgraph2Tensors[kSubgraph2TensorsCount] = { + CreateTensor(*builder, builder->CreateVector(kIfInput1TensorShape, 1), + kTensorType, 0, builder->CreateString("if_input_tensor1"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kIfInput2TensorShape, 1), + kTensorType, 0, builder->CreateString("if_input_tensor2"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kIfOutputTensorShape, 1), + kTensorType, 0, builder->CreateString("if_output_tensor"), 0, + false), + }; + + constexpr int kIfOpCodeIndex = 0; + constexpr int kCustomOpCodeIndex = 1; + + constexpr size_t kIfInputsCount = 3; + const int32_t kIfInputs[kIfInputsCount] = {0, 1, 2}; + constexpr size_t kOutputsCount = 1; + const int32_t kIfOutputs[kOutputsCount] = {3}; + constexpr size_t kOpAfterIfInputsCount = 2; + const int32_t kOpAfterIfInputs[kOpAfterIfInputsCount] = {3, 2}; + const int32_t kOpAfterIfOutputs[kOutputsCount] = {4}; + constexpr size_t kOperatorsCount = 2; + const Offset kSubgraph0Operators[kOperatorsCount] = { + CreateOperator(*builder, kIfOpCodeIndex, + builder->CreateVector(kIfInputs, kIfInputsCount), + builder->CreateVector(kIfOutputs, kOutputsCount), + BuiltinOptions_IfOptions, + CreateIfOptions(*builder, 1, 2).Union()), + CreateOperator( + *builder, kCustomOpCodeIndex, + builder->CreateVector(kOpAfterIfInputs, kOpAfterIfInputsCount), + builder->CreateVector(kOpAfterIfOutputs, kOutputsCount)), + }; + + constexpr size_t kSubgraph1InputsCount = 2; + const int32_t kSubgraph1Inputs[kSubgraph1InputsCount] = {0, 1}; + constexpr size_t kSubgraph1OutputsCount = 1; + const int32_t kSubgraph1Outputs[kSubgraph1OutputsCount] = {2}; + constexpr size_t kSubgraph1OperatorsCount = 1; + const Offset kSubgraph1Operators[kSubgraph1OperatorsCount] = { + CreateOperator( + *builder, kCustomOpCodeIndex, + builder->CreateVector(kSubgraph1Inputs, kSubgraph1InputsCount), + builder->CreateVector(kSubgraph1Outputs, kSubgraph1OutputsCount), + BuiltinOptions_NONE), + }; + + constexpr size_t kSubgraph2InputsCount = 2; + const int32_t kSubgraph2Inputs[kSubgraph2InputsCount] = {0, 1}; + constexpr size_t kSubgraph2OutputsCount = 1; + const int32_t kSubgraph2Outputs[kSubgraph2OutputsCount] = {2}; + constexpr size_t kSubgraph2OperatorsCount = 1; + const Offset kSubgraph2Operators[kSubgraph2OperatorsCount] = { + CreateOperator( + *builder, kCustomOpCodeIndex, + builder->CreateVector(kSubgraph2Inputs, kSubgraph2InputsCount), + builder->CreateVector(kSubgraph2Outputs, kSubgraph2OutputsCount), + BuiltinOptions_NONE), + }; + + constexpr size_t kSubgraphsCount = 3; + const Offset kSubgraphs[kSubgraphsCount] = { + CreateSubGraph( + *builder, + builder->CreateVector(kSubgraph0Tensors, kSubgraph0TensorsCount), + builder->CreateVector(kIfInputs, kIfInputsCount), + builder->CreateVector(kOpAfterIfOutputs, kOutputsCount), + builder->CreateVector(kSubgraph0Operators, kOperatorsCount), + builder->CreateString("if_subgraph")), + CreateSubGraph( + *builder, + builder->CreateVector(kSubgraph1Tensors, kSubgraph1TensorsCount), + builder->CreateVector(kSubgraph1Inputs, kSubgraph1InputsCount), + builder->CreateVector(kSubgraph1Outputs, kSubgraph1OutputsCount), + builder->CreateVector(kSubgraph1Operators, kSubgraph1OperatorsCount), + builder->CreateString("then_subgraph")), + CreateSubGraph( + *builder, + builder->CreateVector(kSubgraph2Tensors, kSubgraph2TensorsCount), + builder->CreateVector(kSubgraph2Inputs, kSubgraph2InputsCount), + builder->CreateVector(kSubgraph2Outputs, kSubgraph2OutputsCount), + builder->CreateVector(kSubgraph2Operators, kSubgraph2OperatorsCount), + builder->CreateString("else_subgraph")), + }; + + constexpr size_t kOperatorCodesCount = 2; + const Offset kOperatorCodes[kOperatorCodesCount] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, "if", + /*version=*/0, BuiltinOperator_IF), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "custom_packer_op", + /*version=*/0, BuiltinOperator_CUSTOM), + }; + const Offset kModelOffset = CreateModel( + *builder, 0, builder->CreateVector(kOperatorCodes, kOperatorCodesCount), + builder->CreateVector(kSubgraphs, kSubgraphsCount), + builder->CreateString("test_model"), + builder->CreateVector(buffers, kBuffersCount)); + FinishModelBuffer(*builder, kModelOffset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +// Mock model with one main subgraph containing a single CALL_ONCE op (with null +// inputs and outputs) which invokes a second subgraph which has null inputs and +// outputs. +const Model* BuildSimpleMockModelWithNullInputsOutputs() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {0}; + constexpr size_t tensors_size = 1; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_input_tensor1"), 0, false), + }; + constexpr size_t subgraph0_inputs_size = 1; + const int32_t subgraph0_inputs[subgraph0_inputs_size] = {0}; + constexpr size_t subgraph0_outputs_size = 1; + const int32_t subgraph0_outputs[subgraph0_outputs_size] = {0}; + constexpr size_t operators_size = 1; + const Offset subgraph0_operators[operators_size] = { + CreateOperator(*builder, 0, {}, {}, BuiltinOptions_CallOnceOptions, + CreateCallOnceOptions(*builder, 1).Union()), + }; + const Offset subgraph1_operators[operators_size] = { + CreateOperator(*builder, 1, {}, {}, BuiltinOptions_NONE)}; + constexpr size_t subgraphs_size = 2; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph( + *builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(subgraph0_inputs, subgraph0_inputs_size), + builder->CreateVector(subgraph0_outputs, subgraph0_outputs_size), + builder->CreateVector(subgraph0_operators, operators_size), + builder->CreateString("main_subgraph")), + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), {}, + {}, + builder->CreateVector(subgraph1_operators, operators_size), + builder->CreateString("secondary subgraph")), + }; + constexpr size_t operator_codes_size = 2; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "call_once_op", + /*version=*/0, BuiltinOperator_CALL_ONCE), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, "no_op", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + } // namespace -const TfLiteRegistration* SimpleStatefulOp::getRegistration() { +const TFLMRegistration* SimpleStatefulOp::getRegistration() { return GetMutableRegistration(); } -TfLiteRegistration* SimpleStatefulOp::GetMutableRegistration() { - static TfLiteRegistration r; +TFLMRegistration* SimpleStatefulOp::GetMutableRegistration() { + static TFLMRegistration r; r.init = Init; r.prepare = Prepare; r.invoke = Invoke; @@ -654,10 +1562,6 @@ TfLiteRegistration* SimpleStatefulOp::GetMutableRegistration() { void* SimpleStatefulOp::Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocateBufferForEval == nullptr); - TFLITE_DCHECK(context->GetScratchBuffer == nullptr); - TFLITE_DCHECK(context->RequestScratchBufferInArena == nullptr); - void* raw = context->AllocatePersistentBuffer(context, sizeof(OpData)); OpData* data = reinterpret_cast(raw); *data = {}; @@ -669,9 +1573,12 @@ TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context, OpData* data = reinterpret_cast(node->user_data); // Make sure that the input is in uint8_t with at least 1 data entry. - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - if (input->type != kTfLiteUInt8) return kTfLiteError; + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + + if (input->type != kTfLiteInt8) return kTfLiteError; if (NumElements(input->dims) == 0) return kTfLiteError; // Allocate a temporary buffer with the same size of input for sorting. @@ -683,6 +1590,7 @@ TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context, context->AllocatePersistentBuffer(context, sizeof(int))); *data->invoke_count = 0; + micro_context->DeallocateTempTfLiteTensor(input); return kTfLiteOk; } @@ -691,9 +1599,10 @@ TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context, OpData* data = reinterpret_cast(node->user_data); *data->invoke_count += 1; - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - const uint8_t* input_data = GetTensorData(input); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const uint8_t* input_data = input->data.uint8; int size = NumElements(input->dims); uint8_t* sorting_buffer = reinterpret_cast( @@ -711,26 +1620,26 @@ TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context, } } - TfLiteTensor* median; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kMedianTensor, &median)); - uint8_t* median_data = GetTensorData(median); - TfLiteTensor* invoke_count; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kInvokeCount, &invoke_count)); - int32_t* invoke_count_data = GetTensorData(invoke_count); + TfLiteEvalTensor* median = + tflite::micro::GetEvalOutput(context, node, kMedianTensor); + TF_LITE_ENSURE(context, median != nullptr); + uint8_t* median_data = median->data.uint8; + TfLiteEvalTensor* invoke_count = + tflite::micro::GetEvalOutput(context, node, kInvokeCount); + TF_LITE_ENSURE(context, invoke_count != nullptr); + int32_t* invoke_count_data = invoke_count->data.i32; median_data[0] = sorting_buffer[size / 2]; invoke_count_data[0] = *data->invoke_count; return kTfLiteOk; } -const TfLiteRegistration* MockCustom::getRegistration() { +const TFLMRegistration* MockCustom::getRegistration() { return GetMutableRegistration(); } -TfLiteRegistration* MockCustom::GetMutableRegistration() { - static TfLiteRegistration r; +TFLMRegistration* MockCustom::GetMutableRegistration() { + static TFLMRegistration r; r.init = Init; r.prepare = Prepare; r.invoke = Invoke; @@ -755,14 +1664,15 @@ TfLiteStatus MockCustom::Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TF_LITE_ENSURE(context, input != nullptr); const int32_t* input_data = input->data.i32; - const TfLiteTensor* weight; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &weight)); + const TfLiteEvalTensor* weight = + tflite::micro::GetEvalInput(context, node, 1); + TF_LITE_ENSURE(context, weight != nullptr); const uint8_t* weight_data = weight->data.uint8; - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); int32_t* output_data = output->data.i32; output_data[0] = 0; // Catch output tensor sharing memory with an input tensor @@ -772,12 +1682,12 @@ TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) { bool MockCustom::freed_ = false; -const TfLiteRegistration* MultipleInputs::getRegistration() { +const TFLMRegistration* MultipleInputs::getRegistration() { return GetMutableRegistration(); } -TfLiteRegistration* MultipleInputs::GetMutableRegistration() { - static TfLiteRegistration r; +TFLMRegistration* MultipleInputs::GetMutableRegistration() { + static TFLMRegistration r; r.init = Init; r.prepare = Prepare; r.invoke = Invoke; @@ -804,18 +1714,20 @@ TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TF_LITE_ENSURE(context, input != nullptr); const int32_t* input_data = input->data.i32; - const TfLiteTensor* input1; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input1)); + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, 1); + TF_LITE_ENSURE(context, input1 != nullptr); const int32_t* input_data1 = input1->data.i32; - const TfLiteTensor* input2; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input2)); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, 2); + TF_LITE_ENSURE(context, input2 != nullptr); const int32_t* input_data2 = input2->data.i32; - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); int32_t* output_data = output->data.i32; output_data[0] = 0; // Catch output tensor sharing memory with an input tensor @@ -825,14 +1737,77 @@ TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) { bool MultipleInputs::freed_ = false; -AllOpsResolver GetOpResolver() { - AllOpsResolver op_resolver; - op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration()); - op_resolver.AddCustom("simple_stateful_op", - SimpleStatefulOp::GetMutableRegistration()); - op_resolver.AddCustom("multiple_inputs_op", - MultipleInputs::GetMutableRegistration()); - return op_resolver; +const TFLMRegistration* NoOp::getRegistration() { + return GetMutableRegistration(); +} + +TFLMRegistration* NoOp::GetMutableRegistration() { + static TFLMRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + r.free = Free; + return &r; +} + +void* NoOp::Init(TfLiteContext* context, const char* buffer, size_t length) { + // We don't support delegate in TFL micro. This is a weak check to test if + // context struct being zero-initialized. + TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); + freed_ = false; + // Do nothing. + return nullptr; +} + +void NoOp::Free(TfLiteContext* context, void* buffer) { freed_ = true; } + +TfLiteStatus NoOp::Prepare(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +TfLiteStatus NoOp::Invoke(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +bool NoOp::freed_ = false; + +TfLiteStatus GetTestingOpResolver( + tflite::testing::TestingOpResolver& op_resolver) { + TF_LITE_ENSURE_STATUS(op_resolver.AddCustom( + "mock_custom", MockCustom::GetMutableRegistration())); + TF_LITE_ENSURE_STATUS(op_resolver.AddCustom( + "simple_stateful_op", SimpleStatefulOp::GetMutableRegistration())); + TF_LITE_ENSURE_STATUS(op_resolver.AddCustom( + "multiple_inputs_op", MultipleInputs::GetMutableRegistration())); + TF_LITE_ENSURE_STATUS( + op_resolver.AddCustom("no_op", NoOp::GetMutableRegistration())); + TF_LITE_ENSURE_STATUS(op_resolver.AddCustom( + "custom_packer_op", PackerOp::GetMutableRegistration())); + TF_LITE_ENSURE_STATUS(op_resolver.AddCustom( + "broadcast_add_op", BroadcastAddOp::GetMutableRegistration())); + TF_LITE_ENSURE_STATUS(op_resolver.AddIf()); + return kTfLiteOk; +} + +const Model* GetModelWithUnusedInputs() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildModelWithUnusedInputs()); + } + return model; +} + +const Model* GetModelWithUnusedOperatorOutputs() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildModelWithUnusedOperatorOutputs()); + } + return model; +} + +const Model* GetModelWith256x256Tensor() { + static const Model* model = BuildModelWith256x256Tensor(); + return model; } const Model* GetSimpleMockModel() { @@ -843,6 +1818,18 @@ const Model* GetSimpleMockModel() { return model; } +#ifdef USE_TFLM_COMPRESSION + +const Model* GetSimpleMockModelCompressed() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleMockModelCompressed()); + } + return model; +} + +#endif // USE_TFLM_COMPRESSION + const Model* GetSimpleMultipleInputsModel() { static Model* model = nullptr; if (!model) { @@ -851,6 +1838,46 @@ const Model* GetSimpleMultipleInputsModel() { return model; } +const Model* GetSimpleModelWithSubgraphsAndIf() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleModelWithSubgraphsAndIf()); + } + return model; +} + +const Model* GetSimpleModelWithIfAndEmptySubgraph() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleModelWithIfAndEmptySubgraph()); + } + return model; +} + +const Model* GetSimpleModelWithSubgraphsAndWhile() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleModelWithSubgraphsAndWhile()); + } + return model; +} + +const Model* GetModelWithIfAndSubgraphInputTensorOverlap() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildModelWithIfAndSubgraphInputTensorOverlap()); + } + return model; +} + +const Model* GetSimpleModelWithNullInputsAndOutputs() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleMockModelWithNullInputsOutputs()); + } + return model; +} + const Model* GetComplexMockModel() { static Model* model = nullptr; if (!model) { @@ -903,13 +1930,21 @@ const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) { const Tensor* CreateQuantizedFlatbufferTensor(int size) { using flatbuffers::Offset; flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + constexpr size_t quant_params_size = 1; + const float min_array[quant_params_size] = {0.1f}; + const float max_array[quant_params_size] = {0.2f}; + const float scale_array[quant_params_size] = {0.3f}; + const int64_t zero_point_array[quant_params_size] = {100ll}; + const Offset quant_params = CreateQuantizationParameters( *builder, - /*min=*/builder->CreateVector({0.1f}), - /*max=*/builder->CreateVector({0.2f}), - /*scale=*/builder->CreateVector({0.3f}), - /*zero_point=*/builder->CreateVector({100ll})); + /*min=*/builder->CreateVector(min_array, quant_params_size), + /*max=*/builder->CreateVector(max_array, quant_params_size), + /*scale=*/ + builder->CreateVector(scale_array, quant_params_size), + /*zero_point=*/ + builder->CreateVector(zero_point_array, quant_params_size)); constexpr size_t tensor_shape_size = 1; const int32_t tensor_shape[tensor_shape_size] = {size}; @@ -971,22 +2006,10 @@ int TestStrcmp(const char* a, const char* b) { *reinterpret_cast(b); } -// Wrapper to forward kernel errors to the interpreter's error reporter. -void ReportOpError(struct TfLiteContext* context, const char* format, ...) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - ErrorReporter* error_reporter = static_cast(context->impl_); - va_list args; - va_start(args, format); - TF_LITE_REPORT_ERROR(error_reporter, format, args); - va_end(args); -#endif -} - // Create a TfLiteIntArray from an array of ints. The first element in the // supplied array must be the size of the array expressed as an int. TfLiteIntArray* IntArrayFromInts(const int* int_array) { - return const_cast( - reinterpret_cast(int_array)); + return reinterpret_cast(const_cast(int_array)); } // Create a TfLiteFloatArray from an array of floats. The first element in the @@ -999,60 +2022,17 @@ TfLiteFloatArray* FloatArrayFromFloats(const float* floats) { return reinterpret_cast(const_cast(floats)); } -TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, - TfLiteIntArray* dims, float input_scale, - float weights_scale, bool is_variable) { - float bias_scale = input_scale * weights_scale; - tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); - - // Quantized int32_t tensors always have a zero point of 0, since the range of - // int32_t values is large, and because zero point costs extra cycles during - // processing. - TfLiteTensor result = - CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable); - return result; -} - -// Quantizes int32_t bias tensor with per-channel weights determined by input -// scale multiplied by weight scale for each channel. -TfLiteTensor CreatePerChannelQuantizedBiasTensor( - const float* input, int32_t* quantized, TfLiteIntArray* dims, - float input_scale, float* weight_scales, float* scales, int* zero_points, - TfLiteAffineQuantization* affine_quant, int quantized_dimension, - bool is_variable) { - int input_size = ElementCount(*dims); - int num_channels = dims->data[quantized_dimension]; - // First element is reserved for array length - zero_points[0] = num_channels; - scales[0] = static_cast(num_channels); - float* scales_array = &scales[1]; - for (int i = 0; i < num_channels; i++) { - scales_array[i] = input_scale * weight_scales[i]; - zero_points[i + 1] = 0; - } - - SymmetricPerChannelQuantize(input, quantized, input_size, - num_channels, scales_array); - - affine_quant->scale = FloatArrayFromFloats(scales); - affine_quant->zero_point = IntArrayFromInts(zero_points); - affine_quant->quantized_dimension = quantized_dimension; - - TfLiteTensor result = CreateTensor(quantized, dims, is_variable); - result.quantization = {kTfLiteAffineQuantization, affine_quant}; - return result; -} - TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, int* zero_points, TfLiteAffineQuantization* affine_quant, - int quantized_dimension, bool is_variable) { + int quantized_dimension, bool is_variable, TfLiteType tensor_weight_type) { int channel_count = dims->data[quantized_dimension]; + scales[0] = static_cast(channel_count); zero_points[0] = channel_count; SignedSymmetricPerChannelQuantize(input, dims, quantized_dimension, quantized, - &scales[1]); + &scales[1], tensor_weight_type); for (int i = 0; i < channel_count; i++) { zero_points[i + 1] = 0; @@ -1061,8 +2041,8 @@ TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( affine_quant->scale = FloatArrayFromFloats(scales); affine_quant->zero_point = IntArrayFromInts(zero_points); affine_quant->quantized_dimension = quantized_dimension; - - TfLiteTensor result = CreateTensor(quantized, dims, is_variable); + TfLiteTensor result = + CreateTensor(quantized, dims, is_variable, tensor_weight_type); result.quantization = {kTfLiteAffineQuantization, affine_quant}; return result; } @@ -1075,5 +2055,18 @@ size_t GetModelTensorCount(const Model* model) { return 0; } +void PackInt4ValuesDenselyInPlace(uint8_t* src_buffer, int buffer_size) { + for (int i = 0; i < buffer_size; ++i) { + if (i % 2 == 0) { + src_buffer[i / 2] = src_buffer[i] & 0x0F; + } else { + src_buffer[i / 2] |= src_buffer[i] << 4; + } + } + // the rest of the buffer should be empty since half of it is packed with the + // values + memset(src_buffer + (buffer_size + 1) / 2, 0, buffer_size / 2); +} + } // namespace testing } // namespace tflite diff --git a/src/tensorflow/lite/micro/test_helpers.h b/src/tensorflow/lite/micro/test_helpers.h index 4c8b7c2..1ef9c74 100644 --- a/src/tensorflow/lite/micro/test_helpers.h +++ b/src/tensorflow/lite/micro/test_helpers.h @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,24 +16,33 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ #define TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ -// Useful functions for writing tests. - +#include +#include #include #include +#include -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite//kernels/internal/tensor_ctypes.h" +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/all_ops_resolver.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" #include "tensorflow/lite/micro/micro_utils.h" #include "tensorflow/lite/portable_type_to_tflitetype.h" #include "tensorflow/lite/schema/schema_generated.h" +#ifdef USE_TFLM_COMPRESSION + +#include "tensorflow/lite/micro/compression.h" +#include "tensorflow/lite/micro/micro_log.h" + +#endif // TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ + namespace tflite { namespace testing { constexpr int kOfflinePlannerHeaderSize = 3; +using TestingOpResolver = tflite::MicroMutableOpResolver<10>; struct NodeConnection_ { std::initializer_list input; @@ -57,8 +66,8 @@ class SimpleStatefulOp { }; public: - static const TfLiteRegistration* getRegistration(); - static TfLiteRegistration* GetMutableRegistration(); + static const TFLMRegistration* getRegistration(); + static TFLMRegistration* GetMutableRegistration(); static void* Init(TfLiteContext* context, const char* buffer, size_t length); static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); @@ -66,8 +75,8 @@ class SimpleStatefulOp { class MockCustom { public: - static const TfLiteRegistration* getRegistration(); - static TfLiteRegistration* GetMutableRegistration(); + static const TFLMRegistration* getRegistration(); + static TFLMRegistration* GetMutableRegistration(); static void* Init(TfLiteContext* context, const char* buffer, size_t length); static void Free(TfLiteContext* context, void* buffer); static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); @@ -80,8 +89,21 @@ class MockCustom { // the sum of the inputs. class MultipleInputs { public: - static const TfLiteRegistration* getRegistration(); - static TfLiteRegistration* GetMutableRegistration(); + static const TFLMRegistration* getRegistration(); + static TFLMRegistration* GetMutableRegistration(); + static void* Init(TfLiteContext* context, const char* buffer, size_t length); + static void Free(TfLiteContext* context, void* buffer); + static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); + static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); + + static bool freed_; +}; + +// A simple no-op operator. +class NoOp { + public: + static const TFLMRegistration* getRegistration(); + static TFLMRegistration* GetMutableRegistration(); static void* Init(TfLiteContext* context, const char* buffer, size_t length); static void Free(TfLiteContext* context, void* buffer); static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); @@ -91,16 +113,31 @@ class MultipleInputs { }; // Returns an Op Resolver that can be used in the testing code. -AllOpsResolver GetOpResolver(); +TfLiteStatus GetTestingOpResolver(TestingOpResolver& op_resolver); // Returns a simple example flatbuffer TensorFlow Lite model. Contains 1 input, // 1 layer of weights, 1 output Tensor, and 1 operator. const Model* GetSimpleMockModel(); +#ifdef USE_TFLM_COMPRESSION + +// Returns a simple example flatbuffer TensorFlow Lite model. Contains 1 input, +// 1 layer of weights, 1 output Tensor, and 1 operator (BroadcastAddOp). The +// weights tensor is compressed. +const Model* GetSimpleMockModelCompressed(); + +#endif // USE_TFLM_COMPRESSION + // Returns a flatbuffer TensorFlow Lite model with more inputs, variable // tensors, and operators. const Model* GetComplexMockModel(); +// Returns a simple example flatbuffer TensorFlow Lite model. Contains 1 input, +// 1 layer of weights, 1 output Tensor, and 1 operator. +// The size of all three tensors is 256 x 256, which is larger than what other +// models provide from this test helper. +const Model* GetModelWith256x256Tensor(); + // Returns a simple flatbuffer model with two branches. const Model* GetSimpleModelWithBranch(); @@ -126,9 +163,33 @@ const Model* GetModelWithOfflinePlanning(int num_tensors, int num_conns, int num_subgraph_inputs = 0); +// Returns a flatbuffer with a single operator, two inputs (one unused) and one +// output. +const Model* GetModelWithUnusedInputs(); + +// Returns a flatbuffer with a single operator, zero inputs and two outputs +// (one unused). +const Model* GetModelWithUnusedOperatorOutputs(); + // Returns a flatbuffer model with `simple_stateful_op` const Model* GetSimpleStatefulModel(); +// Returns a flatbuffer model with "if" and two subgraphs. +const Model* GetSimpleModelWithSubgraphsAndIf(); + +// Returns a flatbuffer model with "if" and two subgraphs one of which is empty. +const Model* GetSimpleModelWithIfAndEmptySubgraph(); + +// Returns a flatbuffer model with "while" and three subgraphs. +const Model* GetSimpleModelWithSubgraphsAndWhile(); + +// Returns a flatbuffer model with "if" and two subgraphs and the input tensor 1 +// of "if" subgraph overlaps with the input tensor 2 of subgraph 1. +const Model* GetModelWithIfAndSubgraphInputTensorOverlap(); + +// Returns a flatbuffer model with null subgraph/operator inputs and outputs. +const Model* GetSimpleModelWithNullInputsAndOutputs(); + // Builds a one-dimensional flatbuffer tensor of the given size. const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false); @@ -146,9 +207,6 @@ CreateFlatbufferBuffers(); // Performs a simple string comparison without requiring standard C library. int TestStrcmp(const char* a, const char* b); -// Wrapper to forward kernel errors to the interpreter's error reporter. -void ReportOpError(struct TfLiteContext* context, const char* format, ...); - void PopulateContext(TfLiteTensor* tensors, int tensors_size, TfLiteContext* context); @@ -160,31 +218,52 @@ TfLiteIntArray* IntArrayFromInts(const int* int_array); // supplied array must be the size of the array expressed as a float. TfLiteFloatArray* FloatArrayFromFloats(const float* floats); +// Assumes that `src_tensor` is a buffer where each element is a 4-bit value +// stored in 8-bit. +// Returns a new buffer that is packed densely with 2 4-bit values in a byte. +// The packing format is low-bits-first, i.e. the lower nibble of a byte is +// filled first, followed by the upper nibble. +void PackInt4ValuesDenselyInPlace(uint8_t* src_buffer, int buffer_size); + template TfLiteTensor CreateTensor(const T* data, TfLiteIntArray* dims, - const bool is_variable = false) { + const bool is_variable = false, + TfLiteType type = kTfLiteNoType) { TfLiteTensor result; result.dims = dims; result.params = {}; result.quantization = {kTfLiteNoQuantization, nullptr}; result.is_variable = is_variable; result.allocation_type = kTfLiteMemNone; - result.type = typeToTfLiteType(); - // Const cast is used to allow passing in const and non-const arrays within a - // single CreateTensor method. A Const array should be used for immutable - // input tensors and non-const array should be used for mutable and output - // tensors. result.data.data = const_cast(data); - result.quantization = {kTfLiteAffineQuantization, nullptr}; - result.bytes = ElementCount(*dims) * sizeof(T); + + if (type == kTfLiteInt4) { + result.type = kTfLiteInt4; + PackInt4ValuesDenselyInPlace(tflite::GetTensorData(&result), + ElementCount(*dims)); + result.bytes = ((ElementCount(*dims) + 1) / 2); + } else { + // Const cast is used to allow passing in const and non-const arrays within + // a single CreateTensor method. A Const array should be used for immutable + // input tensors and non-const array should be used for mutable and output + // tensors. + if (type == kTfLiteNoType) { + result.type = typeToTfLiteType(); + } else { + result.type = type; + } + + result.bytes = ElementCount(*dims) * TfLiteTypeGetSize(result.type); + } return result; } template TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims, const float scale, const int zero_point = 0, - const bool is_variable = false) { - TfLiteTensor result = CreateTensor(data, dims, is_variable); + const bool is_variable = false, + TfLiteType type = kTfLiteNoType) { + TfLiteTensor result = CreateTensor(data, dims, is_variable, type); result.params = {scale, zero_point}; result.quantization = {kTfLiteAffineQuantization, nullptr}; return result; @@ -193,34 +272,114 @@ TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims, template TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized, TfLiteIntArray* dims, float scale, - int zero_point, bool is_variable = false) { + int zero_point, bool is_variable = false, + TfLiteType type = kTfLiteNoType) { int input_size = ElementCount(*dims); tflite::Quantize(input, quantized, input_size, scale, zero_point); - return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable); + return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable, + type); } -TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, +template +TfLiteTensor CreateQuantizedBiasTensor(const float* data, T* quantized, TfLiteIntArray* dims, float input_scale, float weights_scale, - bool is_variable = false); + bool is_variable = false) { + float bias_scale = input_scale * weights_scale; + tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); + + // Quantized bias tensors always have a zero point of 0, since the range of + // values is large, and because zero point costs extra cycles during + // processing. + TfLiteTensor result = + CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable); + return result; +} + +// Creates bias tensor with input data, and per-channel weights determined by +// input scale multiplied by weight scale for each channel. Input data will not +// be quantized. +template +TfLiteTensor CreatePerChannelQuantizedBiasTensor( + const T* input_data, TfLiteIntArray* dims, float input_scale, + const TfLiteFloatArray* weight_scales, TfLiteFloatArray* scales, + TfLiteIntArray* zero_points, TfLiteAffineQuantization* affine_quant, + int quantized_dimension, bool is_variable = false, + TfLiteType type = kTfLiteNoType) { + int num_channels = dims->data[quantized_dimension]; + zero_points->size = num_channels; + scales->size = num_channels; + for (int i = 0; i < num_channels; i++) { + scales->data[i] = input_scale * weight_scales->data[i]; + zero_points->data[i] = 0; + } + + affine_quant->scale = scales; + affine_quant->zero_point = zero_points; + affine_quant->quantized_dimension = quantized_dimension; + + TfLiteTensor result = CreateTensor(input_data, dims, is_variable, type); + result.quantization = {kTfLiteAffineQuantization, affine_quant}; + return result; +} -// Quantizes int32_t bias tensor with per-channel weights determined by input +// Quantizes bias tensor with per-channel weights determined by input // scale multiplied by weight scale for each channel. +template TfLiteTensor CreatePerChannelQuantizedBiasTensor( - const float* input, int32_t* quantized, TfLiteIntArray* dims, - float input_scale, float* weight_scales, float* scales, int* zero_points, + const float* input, T* quantized, TfLiteIntArray* dims, float input_scale, + const float* weight_scales, float* scales, int* zero_points, TfLiteAffineQuantization* affine_quant, int quantized_dimension, - bool is_variable = false); + bool is_variable = false) { + int input_size = ElementCount(*dims); + int num_channels = dims->data[quantized_dimension]; + // First element is reserved for array length + zero_points[0] = num_channels; + scales[0] = static_cast(num_channels); + float* scales_array = &scales[1]; + for (int i = 0; i < num_channels; i++) { + scales_array[i] = input_scale * weight_scales[i]; + zero_points[i + 1] = 0; + } + + SymmetricPerChannelQuantize(input, quantized, input_size, num_channels, + scales_array); + + affine_quant->scale = FloatArrayFromFloats(scales); + affine_quant->zero_point = IntArrayFromInts(zero_points); + affine_quant->quantized_dimension = quantized_dimension; + + TfLiteTensor result = CreateTensor(quantized, dims, is_variable); + result.quantization = {kTfLiteAffineQuantization, affine_quant}; + + return result; +} + +template +TfLiteTensor CreatePerChannelQuantizedTensor( + const T* quantized, TfLiteIntArray* dims, TfLiteFloatArray* scales, + TfLiteIntArray* zero_points, TfLiteAffineQuantization* affine_quant, + int quantized_dimension, bool is_variable = false, + TfLiteType type = kTfLiteNoType) { + affine_quant->scale = scales; + affine_quant->zero_point = zero_points; + affine_quant->quantized_dimension = quantized_dimension; + + TfLiteTensor result = CreateTensor(quantized, dims, is_variable, type); + result.quantization = {kTfLiteAffineQuantization, affine_quant}; + return result; +} TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, int* zero_points, TfLiteAffineQuantization* affine_quant, - int quantized_dimension, bool is_variable = false); + int quantized_dimension, bool is_variable = false, + TfLiteType tensor_weight_type = kTfLiteNoType); // Returns the number of tensors in the default subgraph for a tflite::Model. size_t GetModelTensorCount(const Model* model); -// Derives the quantization scaling factor from a min and max range. +// Derives the asymmetric quantization scaling factor from a min and max range. template inline float ScaleFromMinMax(const float min, const float max) { return (max - min) / @@ -228,13 +387,148 @@ inline float ScaleFromMinMax(const float min, const float max) { std::numeric_limits::min()); } +// Derives the symmetric quantization scaling factor from a min and max range. +template +inline float SymmetricScaleFromMinMax(const float min, const float max) { + const int32_t kScale = + std::numeric_limits::type>::max(); + const float range = std::max(std::abs(min), std::abs(max)); + if (range == 0) { + return 1.0f; + } else { + return range / kScale; + } +} + // Derives the quantization zero point from a min and max range. template inline int ZeroPointFromMinMax(const float min, const float max) { return static_cast(std::numeric_limits::min()) + - static_cast(-min / ScaleFromMinMax(min, max) + 0.5f); + static_cast(roundf(-min / ScaleFromMinMax(min, max))); } +#ifdef USE_TFLM_COMPRESSION + +template +struct TestCompressionInfo { + T* value_table; + size_t value_table_stride; + int bit_width; + CompressionScheme scheme; +}; + +template +struct TestCompressionQuantizedInfo : TestCompressionInfo { + const uint8_t* compressed; + const float* data; + const int* dims_data; // TfLiteIntArray + const float* scales; // TfLiteFloatArray (may be computed) + const int* zero_points; // TfLiteIntArray (may be computed) +}; + +template +class TestCompressedList { + public: + template + TfLiteStatus AddInput(const TestCompressionInfo& tci, + const TfLiteTensor& tensor, const size_t tensor_index) { + if (next_input_index_ >= NINPUTS) { + MicroPrintf("TestCompressedList: too many inputs, max %u", NINPUTS); + return kTfLiteError; + } + inputs_comp_data_[next_input_index_].data.lut_data = + &inputs_ltd_[next_input_index_]; + inputs_comp_data_[next_input_index_].scheme = tci.scheme; + inputs_comp_data_[next_input_index_].data.lut_data->compressed_bit_width = + tci.bit_width; + inputs_comp_data_[next_input_index_].data.lut_data->value_table = + tci.value_table; + inputs_comp_data_[next_input_index_] + .data.lut_data->value_table_channel_stride = tci.value_table_stride; + inputs_comp_data_[next_input_index_] + .data.lut_data->is_per_channel_quantized = + IsPerChannelQuantized(tensor); + inputs_comp_data_[next_input_index_].data.lut_data->use_alternate_axis = + UsesAltAxis(tensor); + return SetCompressionData(tensor_index, + inputs_comp_data_[next_input_index_++]); + } + + const CompressedTensorList* GetCompressedTensorList() { + if (next_input_index_ == 0) { + return nullptr; + } + + return &ctl_; + } + + private: + size_t next_input_index_ = 0; + LookupTableData inputs_ltd_[NINPUTS] = {}; + CompressionTensorData inputs_comp_data_[NINPUTS] = {}; + const CompressionTensorData* ctdp_[NTENSORS] = {}; + const CompressedTensorList ctl_ = {ctdp_}; + + TfLiteStatus SetCompressionData(const size_t tensor_index, + const CompressionTensorData& cd) { + if (tensor_index >= NTENSORS) { + MicroPrintf("TestCompressedList: bad tensor index %u", tensor_index); + return kTfLiteError; + } + if (cd.data.lut_data->value_table == nullptr) { + MicroPrintf("TestCompressedList: null value_table pointer"); + return kTfLiteError; + } + if (cd.data.lut_data->value_table_channel_stride == 0) { + MicroPrintf("TestCompressedList: value_table_channel_stride not set"); + return kTfLiteError; + } + if (cd.scheme != CompressionScheme::kBinQuant) { + MicroPrintf("TestCompressedList: unsupported compression scheme"); + return kTfLiteError; + } + if (ctdp_[tensor_index] != nullptr) { + MicroPrintf("TestCompressedList: tensor index %d already in use", + tensor_index); + return kTfLiteError; + } + + ctdp_[tensor_index] = &cd; + return kTfLiteOk; + } + + bool IsPerChannelQuantized(const TfLiteTensor& tensor) { + if (tensor.quantization.type == kTfLiteAffineQuantization && + tensor.quantization.params != nullptr) { + const TfLiteAffineQuantization* qp = + static_cast( + tensor.quantization.params); + if (qp->scale->size > 1) { + return true; + } + } + + return false; + } + + bool UsesAltAxis(const TfLiteTensor& tensor) { + if (tensor.quantization.type == kTfLiteAffineQuantization && + tensor.quantization.params != nullptr) { + const TfLiteAffineQuantization* qp = + static_cast( + tensor.quantization.params); + if (qp->quantized_dimension != 0) { + TFLITE_DCHECK_EQ(qp->quantized_dimension, tensor.dims->size - 1); + return true; + } + } + + return false; + } +}; + +#endif // USE_TFLM_COMPRESSION + } // namespace testing } // namespace tflite diff --git a/src/tensorflow/lite/micro/testing/micro_test.h b/src/tensorflow/lite/micro/testing/micro_test.h index d74d8f4..e8cb2e0 100644 --- a/src/tensorflow/lite/micro/testing/micro_test.h +++ b/src/tensorflow/lite/micro/testing/micro_test.h @@ -14,8 +14,7 @@ limitations under the License. ==============================================================================*/ // An ultra-lightweight testing framework designed for use with microcontroller -// applications. Its only dependency is on TensorFlow Lite's ErrorReporter -// interface, where log messages are output. This is designed to be usable even +// applications. This is designed to be usable even // when no standard C or C++ libraries are available, and without any dynamic // memory allocation or reliance on global constructors. // @@ -35,8 +34,7 @@ limitations under the License. // ---------------------------------------------------------------------------- // If you compile this for your platform, you'll get a normal binary that you // should be able to run. Executing it will output logging information like this -// to stderr (or whatever equivalent is available and written to by -// ErrorReporter): +// to stderr: // ---------------------------------------------------------------------------- // Testing SomeTest // 1/1 tests passed @@ -53,186 +51,216 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_ #define TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_ +#include +#include #include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/micro/micro_log.h" +#include "tensorflow/lite/micro/system_setup.h" namespace micro_test { extern int tests_passed; extern int tests_failed; extern bool is_test_complete; extern bool did_test_fail; -extern tflite::ErrorReporter* reporter; } // namespace micro_test -#define TF_LITE_MICRO_TESTS_BEGIN \ - namespace micro_test { \ - int tests_passed; \ - int tests_failed; \ - bool is_test_complete; \ - bool did_test_fail; \ - tflite::ErrorReporter* reporter; \ - } \ - \ - int main(int argc, char** argv) { \ - micro_test::tests_passed = 0; \ - micro_test::tests_failed = 0; \ - tflite::MicroErrorReporter error_reporter; \ - micro_test::reporter = &error_reporter; - -#define TF_LITE_MICRO_TESTS_END \ - micro_test::reporter->Report( \ - "%d/%d tests passed", micro_test::tests_passed, \ - (micro_test::tests_failed + micro_test::tests_passed)); \ - if (micro_test::tests_failed == 0) { \ - micro_test::reporter->Report("~~~ALL TESTS PASSED~~~\n"); \ - return kTfLiteOk; \ - } else { \ - micro_test::reporter->Report("~~~SOME TESTS FAILED~~~\n"); \ - return kTfLiteError; \ - } \ +namespace tflite { + +// This additional helper function is used (instead of directly calling +// tflite::InitializeTarget from the TF_LITE_MICRO_TESTS_BEGIN macro) to avoid +// adding a dependency from every bazel test target to micro:system_setp (which +// is the target that implements InitializeTarget(). +// +// The underlying issue here is that the use of the macros results in +// dependencies that can be containted within the micro/testing:micro_test +// target bleeding on to all the tests. +inline void InitializeTest() { InitializeTarget(); } +} // namespace tflite + +#define TF_LITE_MICRO_TESTS_BEGIN \ + namespace micro_test { \ + int tests_passed; \ + int tests_failed; \ + bool is_test_complete; \ + bool did_test_fail; \ + } \ + \ + int main(int argc, char** argv) { \ + tflite::InitializeTest(); \ + while (true) { \ + micro_test::tests_passed = 0; \ + micro_test::tests_failed = 0; + +#define TF_LITE_MICRO_TESTS_END \ + MicroPrintf("%d/%d tests passed", micro_test::tests_passed, \ + (micro_test::tests_failed + micro_test::tests_passed)); \ + if (micro_test::tests_failed == 0) { \ + MicroPrintf("~~~ALL TESTS PASSED~~~\n"); \ + } else { \ + MicroPrintf("~~~SOME TESTS FAILED~~~\n"); \ + } \ + } \ } // TODO(petewarden): I'm going to hell for what I'm doing to this poor for loop. #define TF_LITE_MICRO_TEST(name) \ - micro_test::reporter->Report("Testing " #name); \ + MicroPrintf("Testing " #name); \ for (micro_test::is_test_complete = false, \ micro_test::did_test_fail = false; \ !micro_test::is_test_complete; micro_test::is_test_complete = true, \ micro_test::tests_passed += (micro_test::did_test_fail) ? 0 : 1, \ micro_test::tests_failed += (micro_test::did_test_fail) ? 1 : 0) -#define TF_LITE_MICRO_EXPECT(x) \ - do { \ - if (!(x)) { \ - micro_test::reporter->Report(#x " failed at %s:%d", __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ +#define TF_LITE_MICRO_EXPECT(x) \ + do { \ + if (!(x)) { \ + MicroPrintf(#x " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ } while (false) -// TODO(b/139142772): this macro is used with types other than ints even though -// the printf specifier is %d. -#define TF_LITE_MICRO_EXPECT_EQ(x, y) \ - do { \ - auto vx = x; \ - auto vy = y; \ - if ((vx) != (vy)) { \ - micro_test::reporter->Report(#x " == " #y " failed at %s:%d (%d vs %d)", \ - __FILE__, __LINE__, static_cast(vx), \ - static_cast(vy)); \ - micro_test::did_test_fail = true; \ - } \ +#define TF_LITE_MICRO_EXPECT_EQ(x, y) \ + do { \ + auto vx = x; \ + auto vy = y; \ + bool isFloatingX = (std::is_floating_point::value); \ + bool isFloatingY = (std::is_floating_point::value); \ + if (isFloatingX && isFloatingY) { \ + auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ + if (delta > std::numeric_limits::epsilon()) { \ + MicroPrintf(#x " == " #y " failed at %s:%d (%f vs %f)", __FILE__, \ + __LINE__, static_cast(vx), \ + static_cast(vy)); \ + micro_test::did_test_fail = true; \ + } \ + } else if ((vx) != (vy)) { \ + MicroPrintf(#x " == " #y " failed at %s:%d (%d vs %d)", __FILE__, \ + __LINE__, static_cast(vx), static_cast(vy)); \ + if (isFloatingX || isFloatingY) { \ + MicroPrintf("-----------WARNING-----------"); \ + MicroPrintf("Only one of the values is floating point value."); \ + } \ + micro_test::did_test_fail = true; \ + } \ } while (false) -#define TF_LITE_MICRO_EXPECT_NE(x, y) \ - do { \ - if ((x) == (y)) { \ - micro_test::reporter->Report(#x " != " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ +#define TF_LITE_MICRO_EXPECT_NE(x, y) \ + do { \ + auto vx = x; \ + auto vy = y; \ + bool isFloatingX = (std::is_floating_point::value); \ + bool isFloatingY = (std::is_floating_point::value); \ + if (isFloatingX && isFloatingY) { \ + auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ + if (delta <= std::numeric_limits::epsilon()) { \ + MicroPrintf(#x " != " #y " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } else if ((vx) == (vy)) { \ + MicroPrintf(#x " != " #y " failed at %s:%d", __FILE__, __LINE__); \ + if (isFloatingX || isFloatingY) { \ + MicroPrintf("-----------WARNING-----------"); \ + MicroPrintf("Only one of the values is floating point value."); \ + } \ + micro_test::did_test_fail = true; \ + } \ } while (false) // TODO(wangtz): Making it more generic once needed. -#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \ - epsilon) \ - do { \ - auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \ - ? ((arr1)[(idx1)] - (arr2)[(idx2)]) \ - : ((arr2)[(idx2)] - (arr1)[(idx1)]); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \ - static_cast(idx1), static_cast((arr1)[(idx1)]), \ - static_cast(idx2), static_cast((arr2)[(idx2)]), \ - __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ - do { \ - auto vx = (x); \ - auto vy = (y); \ - auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #x " (%f) near " #y " (%f) failed at %s:%d", \ - static_cast(vx), static_cast(vy), __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_GT(x, y) \ - do { \ - if ((x) <= (y)) { \ - micro_test::reporter->Report(#x " > " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_LT(x, y) \ - do { \ - if ((x) >= (y)) { \ - micro_test::reporter->Report(#x " < " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_GE(x, y) \ +#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \ + epsilon) \ do { \ - if ((x) < (y)) { \ - micro_test::reporter->Report(#x " >= " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ + auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \ + ? ((arr1)[(idx1)] - (arr2)[(idx2)]) \ + : ((arr2)[(idx2)] - (arr1)[(idx1)]); \ + if (delta > epsilon) { \ + MicroPrintf(#arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \ + static_cast(idx1), static_cast((arr1)[(idx1)]), \ + static_cast(idx2), static_cast((arr2)[(idx2)]), \ + __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_LE(x, y) \ +// The check vx != vy is needed to properly handle the case where both +// x and y evaluate to infinity. See #46960 for more details. +#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ do { \ - if ((x) > (y)) { \ - micro_test::reporter->Report(#x " <= " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ + auto vx = (x); \ + auto vy = (y); \ + auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ + if (vx != vy && delta > epsilon) { \ + MicroPrintf(#x " (%f) near " #y " (%f) failed at %s:%d", \ + static_cast(vx), static_cast(vy), __FILE__, \ + __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_TRUE(x) \ +#define TF_LITE_MICRO_EXPECT_GT(x, y) \ + do { \ + if ((x) <= (y)) { \ + MicroPrintf(#x " > " #y " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } while (false) + +#define TF_LITE_MICRO_EXPECT_LT(x, y) \ do { \ - if (!(x)) { \ - micro_test::reporter->Report(#x " was not true failed at %s:%d", \ - __FILE__, __LINE__); \ + if ((x) >= (y)) { \ + MicroPrintf(#x " < " #y " failed at %s:%d", __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_FALSE(x) \ +#define TF_LITE_MICRO_EXPECT_GE(x, y) \ do { \ - if (x) { \ - micro_test::reporter->Report(#x " was not false failed at %s:%d", \ - __FILE__, __LINE__); \ + if ((x) < (y)) { \ + MicroPrintf(#x " >= " #y " failed at %s:%d", __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_FAIL(msg) \ - do { \ - micro_test::reporter->Report("FAIL: %s", msg, __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ +#define TF_LITE_MICRO_EXPECT_LE(x, y) \ + do { \ + if ((x) > (y)) { \ + MicroPrintf(#x " <= " #y " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ } while (false) -#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \ +#define TF_LITE_MICRO_EXPECT_TRUE(x) \ do { \ - for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \ - if (string1[i] != string2[i]) { \ - micro_test::reporter->Report("FAIL: %s did not match %s", string1, \ - string2, __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ + if (!(x)) { \ + MicroPrintf(#x " was not true failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ } \ } while (false) +#define TF_LITE_MICRO_EXPECT_FALSE(x) \ + do { \ + if (x) { \ + MicroPrintf(#x " was not false failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } while (false) + +#define TF_LITE_MICRO_FAIL(msg) \ + do { \ + MicroPrintf("FAIL: %s", msg, __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } while (false) + +#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \ + do { \ + for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \ + if (string1[i] != string2[i]) { \ + MicroPrintf("FAIL: %s did not match %s", string1, string2, __FILE__, \ + __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } \ + } while (false) + #endif // TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_ diff --git a/src/tensorflow/lite/micro/testing/util_test.cpp b/src/tensorflow/lite/micro/testing/util_test.cpp deleted file mode 100644 index 1720c81..0000000 --- a/src/tensorflow/lite/micro/testing/util_test.cpp +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/testing/micro_test.h" - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(ArgumentsExecutedOnlyOnce) { - float count = 0.; - // Make sure either argument is executed once after macro expansion. - TF_LITE_MICRO_EXPECT_NEAR(0, count++, 0.1f); - TF_LITE_MICRO_EXPECT_NEAR(1, count++, 0.1f); - TF_LITE_MICRO_EXPECT_NEAR(count++, 2, 0.1f); - TF_LITE_MICRO_EXPECT_NEAR(count++, 3, 0.1f); -} - -TF_LITE_MICRO_TESTS_END diff --git a/src/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.cpp b/src/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.cpp new file mode 100644 index 0000000..20e4ae4 --- /dev/null +++ b/src/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.cpp @@ -0,0 +1,34 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h" + +#include "tensorflow/lite/c/c_api_types.h" +#include "tensorflow/lite/core/api/error_reporter.h" +#include "tensorflow/lite/core/api/flatbuffer_conversions.h" +#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type) { + return ConvertTensorType(tensor_type, type, tflite::GetMicroErrorReporter()); +} + +TfLiteStatus CallBuiltinParseFunction(TfLiteBridgeBuiltinParseFunction parser, + const Operator* op, + BuiltinDataAllocator* allocator, + void** builtin_data) { + return parser(op, tflite::GetMicroErrorReporter(), allocator, builtin_data); +} +} // namespace tflite diff --git a/src/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h b/src/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h new file mode 100644 index 0000000..2c0f369 --- /dev/null +++ b/src/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h @@ -0,0 +1,45 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_FLATBUFFER_CONVERSIONS_BRIDGE_H_ +#define TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_FLATBUFFER_CONVERSIONS_BRIDGE_H_ + +#include "tensorflow/lite/c/c_api_types.h" +#include "tensorflow/lite/core/api/flatbuffer_conversions.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +// Forward declaration of the ErrorReporter class to hide it from the TFLM code. +class ErrorReporter; + +using TfLiteBridgeBuiltinDataAllocator = BuiltinDataAllocator; + +using TfLiteBridgeBuiltinParseFunction = + TfLiteStatus (*)(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +// Converts the tensor data type used in the flatbuffer to the representation +// used by the runtime. +TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type); + +// CallBuiltinParseFunction is a wrapper function to wrap the parser function +// calls to Call parser(op, allocator, builtin_data) +TfLiteStatus CallBuiltinParseFunction(TfLiteBridgeBuiltinParseFunction parser, + const Operator* op, + BuiltinDataAllocator* allocator, + void** builtin_data); +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_FLATBUFFER_CONVERSIONS_BRIDGE_H_ diff --git a/src/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.cpp b/src/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.cpp new file mode 100644 index 0000000..d5d77c3 --- /dev/null +++ b/src/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.cpp @@ -0,0 +1,43 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h" + +#include +#include +#include + +#include "tensorflow/lite/micro/micro_log.h" + +namespace { +uint8_t micro_error_reporter_buffer[sizeof(tflite::MicroErrorReporter)]; +tflite::MicroErrorReporter* error_reporter_ = nullptr; + +} // namespace + +namespace tflite { +ErrorReporter* GetMicroErrorReporter() { + if (error_reporter_ == nullptr) { + error_reporter_ = new (micro_error_reporter_buffer) MicroErrorReporter(); + } + return error_reporter_; +} + +int MicroErrorReporter::Report(const char* format, va_list args) { + VMicroPrintf(format, args); + return 0; +} + +} // namespace tflite diff --git a/src/tensorflow/lite/micro/micro_error_reporter.h b/src/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h similarity index 77% rename from src/tensorflow/lite/micro/micro_error_reporter.h rename to src/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h index e2c073a..d3702f4 100644 --- a/src/tensorflow/lite/micro/micro_error_reporter.h +++ b/src/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ +#ifndef TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_MICRO_ERROR_REPORTER_H_ +#define TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_MICRO_ERROR_REPORTER_H_ #include @@ -21,7 +21,8 @@ limitations under the License. #include "tensorflow/lite/micro/compatibility.h" namespace tflite { - +// Get a pointer to a singleton global error reporter. +ErrorReporter* GetMicroErrorReporter(); class MicroErrorReporter : public ErrorReporter { public: ~MicroErrorReporter() override {} @@ -33,4 +34,4 @@ class MicroErrorReporter : public ErrorReporter { } // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ +#endif // TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_MICRO_ERROR_REPORTER_H_ diff --git a/src/tensorflow/lite/portable_type_to_tflitetype.h b/src/tensorflow/lite/portable_type_to_tflitetype.h index fd502b8..03c5b87 100644 --- a/src/tensorflow/lite/portable_type_to_tflitetype.h +++ b/src/tensorflow/lite/portable_type_to_tflitetype.h @@ -16,8 +16,8 @@ limitations under the License. #define TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ // Most of the definitions have been moved to this subheader so that Micro -// can include it without relying on , which isn't available on all -// platforms. +// can include it without relying on and , which isn't +// available on all platforms. // Arduino build defines abs as a macro here. That is invalid C++, and breaks // libc++'s header, undefine it. @@ -25,9 +25,9 @@ limitations under the License. #undef abs #endif -#include +#include -#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/core/c/common.h" namespace tflite { @@ -52,6 +52,10 @@ struct TfLiteTypeToType {}; // Specializations below return TFLITE_TYPE_ENUM; \ } \ template <> \ + constexpr TfLiteType typeToTfLiteType() { \ + return TFLITE_TYPE_ENUM; \ + } \ + template <> \ struct TfLiteTypeToType { \ using Type = CPP_TYPE; \ } @@ -59,15 +63,16 @@ struct TfLiteTypeToType {}; // Specializations below // No string mapping is included here, since the TF Lite packed representation // doesn't correspond to a C++ type well. MATCH_TYPE_AND_TFLITE_TYPE(int32_t, kTfLiteInt32); +MATCH_TYPE_AND_TFLITE_TYPE(uint32_t, kTfLiteUInt32); MATCH_TYPE_AND_TFLITE_TYPE(int16_t, kTfLiteInt16); +MATCH_TYPE_AND_TFLITE_TYPE(uint16_t, kTfLiteUInt16); MATCH_TYPE_AND_TFLITE_TYPE(int64_t, kTfLiteInt64); MATCH_TYPE_AND_TFLITE_TYPE(float, kTfLiteFloat32); MATCH_TYPE_AND_TFLITE_TYPE(unsigned char, kTfLiteUInt8); MATCH_TYPE_AND_TFLITE_TYPE(int8_t, kTfLiteInt8); MATCH_TYPE_AND_TFLITE_TYPE(bool, kTfLiteBool); -MATCH_TYPE_AND_TFLITE_TYPE(std::complex, kTfLiteComplex64); -MATCH_TYPE_AND_TFLITE_TYPE(std::complex, kTfLiteComplex128); MATCH_TYPE_AND_TFLITE_TYPE(TfLiteFloat16, kTfLiteFloat16); +MATCH_TYPE_AND_TFLITE_TYPE(TfLiteBFloat16, kTfLiteBFloat16); MATCH_TYPE_AND_TFLITE_TYPE(double, kTfLiteFloat64); MATCH_TYPE_AND_TFLITE_TYPE(uint64_t, kTfLiteUInt64); diff --git a/src/tensorflow/lite/schema/schema_generated.h b/src/tensorflow/lite/schema/schema_generated.h old mode 100755 new mode 100644 index 16c4ee1..79f0d99 --- a/src/tensorflow/lite/schema/schema_generated.h +++ b/src/tensorflow/lite/schema/schema_generated.h @@ -1,391 +1,689 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ // automatically generated by the FlatBuffers compiler, do not modify #ifndef FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ #define FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ -#include "flatbuffers/flatbuffers.h" +#include "third_party/flatbuffers/include/flatbuffers/flatbuffers.h" + +// Ensure the included flatbuffers.h is the same version as when this file was +// generated, otherwise it may not be compatible. +static_assert(FLATBUFFERS_VERSION_MAJOR == 23 && + FLATBUFFERS_VERSION_MINOR == 5 && + FLATBUFFERS_VERSION_REVISION == 26, + "Non-compatible flatbuffers version included"); namespace tflite { struct CustomQuantization; +struct CustomQuantizationBuilder; struct CustomQuantizationT; +struct BlockwiseQuantization; +struct BlockwiseQuantizationBuilder; +struct BlockwiseQuantizationT; + struct QuantizationParameters; +struct QuantizationParametersBuilder; struct QuantizationParametersT; struct Int32Vector; +struct Int32VectorBuilder; struct Int32VectorT; struct Uint16Vector; +struct Uint16VectorBuilder; struct Uint16VectorT; struct Uint8Vector; +struct Uint8VectorBuilder; struct Uint8VectorT; struct DimensionMetadata; +struct DimensionMetadataBuilder; struct DimensionMetadataT; struct SparsityParameters; +struct SparsityParametersBuilder; struct SparsityParametersT; +struct VariantSubType; +struct VariantSubTypeBuilder; +struct VariantSubTypeT; + struct Tensor; +struct TensorBuilder; struct TensorT; +struct StablehloGatherOptions; +struct StablehloGatherOptionsBuilder; +struct StablehloGatherOptionsT; + +struct StablehloTransposeOptions; +struct StablehloTransposeOptionsBuilder; +struct StablehloTransposeOptionsT; + +struct StablehloDotGeneralOptions; +struct StablehloDotGeneralOptionsBuilder; +struct StablehloDotGeneralOptionsT; + +struct StablehloReduceWindowOptions; +struct StablehloReduceWindowOptionsBuilder; +struct StablehloReduceWindowOptionsT; + +struct StablehloWhileOptions; +struct StablehloWhileOptionsBuilder; +struct StablehloWhileOptionsT; + +struct StablehloSortOptions; +struct StablehloSortOptionsBuilder; +struct StablehloSortOptionsT; + +struct StablehloConcatenateOptions; +struct StablehloConcatenateOptionsBuilder; +struct StablehloConcatenateOptionsT; + +struct StablehloBroadcastInDimOptions; +struct StablehloBroadcastInDimOptionsBuilder; +struct StablehloBroadcastInDimOptionsT; + +struct StablehloCompareOptions; +struct StablehloCompareOptionsBuilder; +struct StablehloCompareOptionsT; + +struct StablehloDynamicSliceOptions; +struct StablehloDynamicSliceOptionsBuilder; +struct StablehloDynamicSliceOptionsT; + +struct StablehloPadOptions; +struct StablehloPadOptionsBuilder; +struct StablehloPadOptionsT; + +struct StablehloIotaOptions; +struct StablehloIotaOptionsBuilder; +struct StablehloIotaOptionsT; + +struct StablehloCustomCallOptions; +struct StablehloCustomCallOptionsBuilder; +struct StablehloCustomCallOptionsT; + +struct StablehloReduceOptions; +struct StablehloReduceOptionsBuilder; +struct StablehloReduceOptionsT; + +struct StablehloSliceOptions; +struct StablehloSliceOptionsBuilder; +struct StablehloSliceOptionsT; + +struct StablehloConvolutionOptions; +struct StablehloConvolutionOptionsBuilder; +struct StablehloConvolutionOptionsT; + +struct StablehloScatterOptions; +struct StablehloScatterOptionsBuilder; +struct StablehloScatterOptionsT; + +struct StablehloCaseOptions; +struct StablehloCaseOptionsBuilder; +struct StablehloCaseOptionsT; + +struct StablehloRngBitGeneratorOptions; +struct StablehloRngBitGeneratorOptionsBuilder; +struct StablehloRngBitGeneratorOptionsT; + struct Conv2DOptions; +struct Conv2DOptionsBuilder; struct Conv2DOptionsT; +struct Conv3DOptions; +struct Conv3DOptionsBuilder; +struct Conv3DOptionsT; + struct Pool2DOptions; +struct Pool2DOptionsBuilder; struct Pool2DOptionsT; struct DepthwiseConv2DOptions; +struct DepthwiseConv2DOptionsBuilder; struct DepthwiseConv2DOptionsT; struct ConcatEmbeddingsOptions; +struct ConcatEmbeddingsOptionsBuilder; struct ConcatEmbeddingsOptionsT; struct LSHProjectionOptions; +struct LSHProjectionOptionsBuilder; struct LSHProjectionOptionsT; struct SVDFOptions; +struct SVDFOptionsBuilder; struct SVDFOptionsT; struct RNNOptions; +struct RNNOptionsBuilder; struct RNNOptionsT; struct SequenceRNNOptions; +struct SequenceRNNOptionsBuilder; struct SequenceRNNOptionsT; struct BidirectionalSequenceRNNOptions; +struct BidirectionalSequenceRNNOptionsBuilder; struct BidirectionalSequenceRNNOptionsT; struct FullyConnectedOptions; +struct FullyConnectedOptionsBuilder; struct FullyConnectedOptionsT; struct SoftmaxOptions; +struct SoftmaxOptionsBuilder; struct SoftmaxOptionsT; struct ConcatenationOptions; +struct ConcatenationOptionsBuilder; struct ConcatenationOptionsT; struct AddOptions; +struct AddOptionsBuilder; struct AddOptionsT; struct MulOptions; +struct MulOptionsBuilder; struct MulOptionsT; struct L2NormOptions; +struct L2NormOptionsBuilder; struct L2NormOptionsT; struct LocalResponseNormalizationOptions; +struct LocalResponseNormalizationOptionsBuilder; struct LocalResponseNormalizationOptionsT; struct LSTMOptions; +struct LSTMOptionsBuilder; struct LSTMOptionsT; struct UnidirectionalSequenceLSTMOptions; +struct UnidirectionalSequenceLSTMOptionsBuilder; struct UnidirectionalSequenceLSTMOptionsT; struct BidirectionalSequenceLSTMOptions; +struct BidirectionalSequenceLSTMOptionsBuilder; struct BidirectionalSequenceLSTMOptionsT; struct ResizeBilinearOptions; +struct ResizeBilinearOptionsBuilder; struct ResizeBilinearOptionsT; struct ResizeNearestNeighborOptions; +struct ResizeNearestNeighborOptionsBuilder; struct ResizeNearestNeighborOptionsT; struct CallOptions; +struct CallOptionsBuilder; struct CallOptionsT; struct PadOptions; +struct PadOptionsBuilder; struct PadOptionsT; struct PadV2Options; +struct PadV2OptionsBuilder; struct PadV2OptionsT; struct ReshapeOptions; +struct ReshapeOptionsBuilder; struct ReshapeOptionsT; struct SpaceToBatchNDOptions; +struct SpaceToBatchNDOptionsBuilder; struct SpaceToBatchNDOptionsT; struct BatchToSpaceNDOptions; +struct BatchToSpaceNDOptionsBuilder; struct BatchToSpaceNDOptionsT; struct SkipGramOptions; +struct SkipGramOptionsBuilder; struct SkipGramOptionsT; struct SpaceToDepthOptions; +struct SpaceToDepthOptionsBuilder; struct SpaceToDepthOptionsT; struct DepthToSpaceOptions; +struct DepthToSpaceOptionsBuilder; struct DepthToSpaceOptionsT; struct SubOptions; +struct SubOptionsBuilder; struct SubOptionsT; struct DivOptions; +struct DivOptionsBuilder; struct DivOptionsT; struct TopKV2Options; +struct TopKV2OptionsBuilder; struct TopKV2OptionsT; struct EmbeddingLookupSparseOptions; +struct EmbeddingLookupSparseOptionsBuilder; struct EmbeddingLookupSparseOptionsT; struct GatherOptions; +struct GatherOptionsBuilder; struct GatherOptionsT; struct TransposeOptions; +struct TransposeOptionsBuilder; struct TransposeOptionsT; struct ExpOptions; +struct ExpOptionsBuilder; struct ExpOptionsT; struct CosOptions; +struct CosOptionsBuilder; struct CosOptionsT; struct ReducerOptions; +struct ReducerOptionsBuilder; struct ReducerOptionsT; struct SqueezeOptions; +struct SqueezeOptionsBuilder; struct SqueezeOptionsT; struct SplitOptions; +struct SplitOptionsBuilder; struct SplitOptionsT; struct SplitVOptions; +struct SplitVOptionsBuilder; struct SplitVOptionsT; struct StridedSliceOptions; +struct StridedSliceOptionsBuilder; struct StridedSliceOptionsT; struct LogSoftmaxOptions; +struct LogSoftmaxOptionsBuilder; struct LogSoftmaxOptionsT; struct CastOptions; +struct CastOptionsBuilder; struct CastOptionsT; struct DequantizeOptions; +struct DequantizeOptionsBuilder; struct DequantizeOptionsT; struct MaximumMinimumOptions; +struct MaximumMinimumOptionsBuilder; struct MaximumMinimumOptionsT; struct TileOptions; +struct TileOptionsBuilder; struct TileOptionsT; struct ArgMaxOptions; +struct ArgMaxOptionsBuilder; struct ArgMaxOptionsT; struct ArgMinOptions; +struct ArgMinOptionsBuilder; struct ArgMinOptionsT; struct GreaterOptions; +struct GreaterOptionsBuilder; struct GreaterOptionsT; struct GreaterEqualOptions; +struct GreaterEqualOptionsBuilder; struct GreaterEqualOptionsT; struct LessOptions; +struct LessOptionsBuilder; struct LessOptionsT; struct LessEqualOptions; +struct LessEqualOptionsBuilder; struct LessEqualOptionsT; struct NegOptions; +struct NegOptionsBuilder; struct NegOptionsT; struct SelectOptions; +struct SelectOptionsBuilder; struct SelectOptionsT; struct SliceOptions; +struct SliceOptionsBuilder; struct SliceOptionsT; struct TransposeConvOptions; +struct TransposeConvOptionsBuilder; struct TransposeConvOptionsT; struct ExpandDimsOptions; +struct ExpandDimsOptionsBuilder; struct ExpandDimsOptionsT; struct SparseToDenseOptions; +struct SparseToDenseOptionsBuilder; struct SparseToDenseOptionsT; struct EqualOptions; +struct EqualOptionsBuilder; struct EqualOptionsT; struct NotEqualOptions; +struct NotEqualOptionsBuilder; struct NotEqualOptionsT; struct ShapeOptions; +struct ShapeOptionsBuilder; struct ShapeOptionsT; struct RankOptions; +struct RankOptionsBuilder; struct RankOptionsT; struct PowOptions; +struct PowOptionsBuilder; struct PowOptionsT; struct FakeQuantOptions; +struct FakeQuantOptionsBuilder; struct FakeQuantOptionsT; struct PackOptions; +struct PackOptionsBuilder; struct PackOptionsT; struct LogicalOrOptions; +struct LogicalOrOptionsBuilder; struct LogicalOrOptionsT; struct OneHotOptions; +struct OneHotOptionsBuilder; struct OneHotOptionsT; struct AbsOptions; +struct AbsOptionsBuilder; struct AbsOptionsT; struct HardSwishOptions; +struct HardSwishOptionsBuilder; struct HardSwishOptionsT; struct LogicalAndOptions; +struct LogicalAndOptionsBuilder; struct LogicalAndOptionsT; struct LogicalNotOptions; +struct LogicalNotOptionsBuilder; struct LogicalNotOptionsT; struct UnpackOptions; +struct UnpackOptionsBuilder; struct UnpackOptionsT; struct FloorDivOptions; +struct FloorDivOptionsBuilder; struct FloorDivOptionsT; struct SquareOptions; +struct SquareOptionsBuilder; struct SquareOptionsT; struct ZerosLikeOptions; +struct ZerosLikeOptionsBuilder; struct ZerosLikeOptionsT; struct FillOptions; +struct FillOptionsBuilder; struct FillOptionsT; struct FloorModOptions; +struct FloorModOptionsBuilder; struct FloorModOptionsT; struct RangeOptions; +struct RangeOptionsBuilder; struct RangeOptionsT; struct LeakyReluOptions; +struct LeakyReluOptionsBuilder; struct LeakyReluOptionsT; struct SquaredDifferenceOptions; +struct SquaredDifferenceOptionsBuilder; struct SquaredDifferenceOptionsT; struct MirrorPadOptions; +struct MirrorPadOptionsBuilder; struct MirrorPadOptionsT; struct UniqueOptions; +struct UniqueOptionsBuilder; struct UniqueOptionsT; struct ReverseV2Options; +struct ReverseV2OptionsBuilder; struct ReverseV2OptionsT; struct AddNOptions; +struct AddNOptionsBuilder; struct AddNOptionsT; struct GatherNdOptions; +struct GatherNdOptionsBuilder; struct GatherNdOptionsT; struct WhereOptions; +struct WhereOptionsBuilder; struct WhereOptionsT; struct ReverseSequenceOptions; +struct ReverseSequenceOptionsBuilder; struct ReverseSequenceOptionsT; struct MatrixDiagOptions; +struct MatrixDiagOptionsBuilder; struct MatrixDiagOptionsT; struct QuantizeOptions; +struct QuantizeOptionsBuilder; struct QuantizeOptionsT; struct MatrixSetDiagOptions; +struct MatrixSetDiagOptionsBuilder; struct MatrixSetDiagOptionsT; struct IfOptions; +struct IfOptionsBuilder; struct IfOptionsT; struct CallOnceOptions; +struct CallOnceOptionsBuilder; struct CallOnceOptionsT; struct WhileOptions; +struct WhileOptionsBuilder; struct WhileOptionsT; struct NonMaxSuppressionV4Options; +struct NonMaxSuppressionV4OptionsBuilder; struct NonMaxSuppressionV4OptionsT; struct NonMaxSuppressionV5Options; +struct NonMaxSuppressionV5OptionsBuilder; struct NonMaxSuppressionV5OptionsT; struct ScatterNdOptions; +struct ScatterNdOptionsBuilder; struct ScatterNdOptionsT; struct SelectV2Options; +struct SelectV2OptionsBuilder; struct SelectV2OptionsT; struct DensifyOptions; +struct DensifyOptionsBuilder; struct DensifyOptionsT; struct SegmentSumOptions; +struct SegmentSumOptionsBuilder; struct SegmentSumOptionsT; struct BatchMatMulOptions; +struct BatchMatMulOptionsBuilder; struct BatchMatMulOptionsT; struct CumsumOptions; +struct CumsumOptionsBuilder; struct CumsumOptionsT; struct BroadcastToOptions; +struct BroadcastToOptionsBuilder; struct BroadcastToOptionsT; struct Rfft2dOptions; +struct Rfft2dOptionsBuilder; struct Rfft2dOptionsT; +struct HashtableOptions; +struct HashtableOptionsBuilder; +struct HashtableOptionsT; + +struct HashtableFindOptions; +struct HashtableFindOptionsBuilder; +struct HashtableFindOptionsT; + +struct HashtableImportOptions; +struct HashtableImportOptionsBuilder; +struct HashtableImportOptionsT; + +struct HashtableSizeOptions; +struct HashtableSizeOptionsBuilder; +struct HashtableSizeOptionsT; + +struct VarHandleOptions; +struct VarHandleOptionsBuilder; +struct VarHandleOptionsT; + +struct ReadVariableOptions; +struct ReadVariableOptionsBuilder; +struct ReadVariableOptionsT; + +struct AssignVariableOptions; +struct AssignVariableOptionsBuilder; +struct AssignVariableOptionsT; + +struct RandomOptions; +struct RandomOptionsBuilder; +struct RandomOptionsT; + +struct BucketizeOptions; +struct BucketizeOptionsBuilder; +struct BucketizeOptionsT; + +struct GeluOptions; +struct GeluOptionsBuilder; +struct GeluOptionsT; + +struct DynamicUpdateSliceOptions; +struct DynamicUpdateSliceOptionsBuilder; +struct DynamicUpdateSliceOptionsT; + +struct UnsortedSegmentProdOptions; +struct UnsortedSegmentProdOptionsBuilder; +struct UnsortedSegmentProdOptionsT; + +struct UnsortedSegmentMaxOptions; +struct UnsortedSegmentMaxOptionsBuilder; +struct UnsortedSegmentMaxOptionsT; + +struct UnsortedSegmentSumOptions; +struct UnsortedSegmentSumOptionsBuilder; +struct UnsortedSegmentSumOptionsT; + +struct ATan2Options; +struct ATan2OptionsBuilder; +struct ATan2OptionsT; + +struct UnsortedSegmentMinOptions; +struct UnsortedSegmentMinOptionsBuilder; +struct UnsortedSegmentMinOptionsT; + +struct SignOptions; +struct SignOptionsBuilder; +struct SignOptionsT; + +struct BitcastOptions; +struct BitcastOptionsBuilder; +struct BitcastOptionsT; + +struct BitwiseXorOptions; +struct BitwiseXorOptionsBuilder; +struct BitwiseXorOptionsT; + +struct RightShiftOptions; +struct RightShiftOptionsBuilder; +struct RightShiftOptionsT; + +struct DilateOptions; +struct DilateOptionsBuilder; +struct DilateOptionsT; + +struct ReduceWindowOptions; +struct ReduceWindowOptionsBuilder; +struct ReduceWindowOptionsT; + struct OperatorCode; +struct OperatorCodeBuilder; struct OperatorCodeT; +struct StableHLOCompositeOptions; +struct StableHLOCompositeOptionsBuilder; +struct StableHLOCompositeOptionsT; + +struct StablehloShiftLeftOptions; +struct StablehloShiftLeftOptionsBuilder; +struct StablehloShiftLeftOptionsT; + struct Operator; +struct OperatorBuilder; struct OperatorT; struct SubGraph; +struct SubGraphBuilder; struct SubGraphT; struct Buffer; +struct BufferBuilder; struct BufferT; struct Metadata; +struct MetadataBuilder; struct MetadataT; struct TensorMap; +struct TensorMapBuilder; struct TensorMapT; struct SignatureDef; +struct SignatureDefBuilder; struct SignatureDefT; struct Model; +struct ModelBuilder; struct ModelT; -enum TensorType { +enum TensorType : int8_t { TensorType_FLOAT32 = 0, TensorType_FLOAT16 = 1, TensorType_INT32 = 2, @@ -399,11 +697,17 @@ enum TensorType { TensorType_FLOAT64 = 10, TensorType_COMPLEX128 = 11, TensorType_UINT64 = 12, + TensorType_RESOURCE = 13, + TensorType_VARIANT = 14, + TensorType_UINT32 = 15, + TensorType_UINT16 = 16, + TensorType_INT4 = 17, + TensorType_BFLOAT16 = 18, TensorType_MIN = TensorType_FLOAT32, - TensorType_MAX = TensorType_UINT64 + TensorType_MAX = TensorType_BFLOAT16 }; -inline const TensorType (&EnumValuesTensorType())[13] { +inline const TensorType (&EnumValuesTensorType())[19] { static const TensorType values[] = { TensorType_FLOAT32, TensorType_FLOAT16, @@ -417,13 +721,19 @@ inline const TensorType (&EnumValuesTensorType())[13] { TensorType_INT8, TensorType_FLOAT64, TensorType_COMPLEX128, - TensorType_UINT64 + TensorType_UINT64, + TensorType_RESOURCE, + TensorType_VARIANT, + TensorType_UINT32, + TensorType_UINT16, + TensorType_INT4, + TensorType_BFLOAT16 }; return values; } inline const char * const *EnumNamesTensorType() { - static const char * const names[14] = { + static const char * const names[20] = { "FLOAT32", "FLOAT16", "INT32", @@ -437,43 +747,52 @@ inline const char * const *EnumNamesTensorType() { "FLOAT64", "COMPLEX128", "UINT64", + "RESOURCE", + "VARIANT", + "UINT32", + "UINT16", + "INT4", + "BFLOAT16", nullptr }; return names; } inline const char *EnumNameTensorType(TensorType e) { - if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_UINT64)) return ""; + if (::flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_BFLOAT16)) return ""; const size_t index = static_cast(e); return EnumNamesTensorType()[index]; } -enum QuantizationDetails { +enum QuantizationDetails : uint8_t { QuantizationDetails_NONE = 0, QuantizationDetails_CustomQuantization = 1, + QuantizationDetails_BlockwiseQuantization = 2, QuantizationDetails_MIN = QuantizationDetails_NONE, - QuantizationDetails_MAX = QuantizationDetails_CustomQuantization + QuantizationDetails_MAX = QuantizationDetails_BlockwiseQuantization }; -inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2] { +inline const QuantizationDetails (&EnumValuesQuantizationDetails())[3] { static const QuantizationDetails values[] = { QuantizationDetails_NONE, - QuantizationDetails_CustomQuantization + QuantizationDetails_CustomQuantization, + QuantizationDetails_BlockwiseQuantization }; return values; } inline const char * const *EnumNamesQuantizationDetails() { - static const char * const names[3] = { + static const char * const names[4] = { "NONE", "CustomQuantization", + "BlockwiseQuantization", nullptr }; return names; } inline const char *EnumNameQuantizationDetails(QuantizationDetails e) { - if (flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_CustomQuantization)) return ""; + if (::flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_BlockwiseQuantization)) return ""; const size_t index = static_cast(e); return EnumNamesQuantizationDetails()[index]; } @@ -486,6 +805,22 @@ template<> struct QuantizationDetailsTraits { static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; }; +template<> struct QuantizationDetailsTraits { + static const QuantizationDetails enum_value = QuantizationDetails_BlockwiseQuantization; +}; + +template struct QuantizationDetailsUnionTraits { + static const QuantizationDetails enum_value = QuantizationDetails_NONE; +}; + +template<> struct QuantizationDetailsUnionTraits { + static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; +}; + +template<> struct QuantizationDetailsUnionTraits { + static const QuantizationDetails enum_value = QuantizationDetails_BlockwiseQuantization; +}; + struct QuantizationDetailsUnion { QuantizationDetails type; void *value; @@ -494,8 +829,8 @@ struct QuantizationDetailsUnion { QuantizationDetailsUnion(QuantizationDetailsUnion&& u) FLATBUFFERS_NOEXCEPT : type(QuantizationDetails_NONE), value(nullptr) { std::swap(type, u.type); std::swap(value, u.value); } - QuantizationDetailsUnion(const QuantizationDetailsUnion &) FLATBUFFERS_NOEXCEPT; - QuantizationDetailsUnion &operator=(const QuantizationDetailsUnion &u) FLATBUFFERS_NOEXCEPT + QuantizationDetailsUnion(const QuantizationDetailsUnion &); + QuantizationDetailsUnion &operator=(const QuantizationDetailsUnion &u) { QuantizationDetailsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } QuantizationDetailsUnion &operator=(QuantizationDetailsUnion &&u) FLATBUFFERS_NOEXCEPT { std::swap(type, u.type); std::swap(value, u.value); return *this; } @@ -503,20 +838,18 @@ struct QuantizationDetailsUnion { void Reset(); -#ifndef FLATBUFFERS_CPP98_STL template void Set(T&& val) { - using RT = typename std::remove_reference::type; + typedef typename std::remove_reference::type RT; Reset(); - type = QuantizationDetailsTraits::enum_value; + type = QuantizationDetailsUnionTraits::enum_value; if (type != QuantizationDetails_NONE) { value = new RT(std::forward(val)); } } -#endif // FLATBUFFERS_CPP98_STL - static void *UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + static void *UnPack(const void *obj, QuantizationDetails type, const ::flatbuffers::resolver_function_t *resolver); + ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr) const; tflite::CustomQuantizationT *AsCustomQuantization() { return type == QuantizationDetails_CustomQuantization ? @@ -526,12 +859,20 @@ struct QuantizationDetailsUnion { return type == QuantizationDetails_CustomQuantization ? reinterpret_cast(value) : nullptr; } + tflite::BlockwiseQuantizationT *AsBlockwiseQuantization() { + return type == QuantizationDetails_BlockwiseQuantization ? + reinterpret_cast(value) : nullptr; + } + const tflite::BlockwiseQuantizationT *AsBlockwiseQuantization() const { + return type == QuantizationDetails_BlockwiseQuantization ? + reinterpret_cast(value) : nullptr; + } }; -bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type); -bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); +bool VerifyQuantizationDetails(::flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type); +bool VerifyQuantizationDetailsVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types); -enum DimensionType { +enum DimensionType : int8_t { DimensionType_DENSE = 0, DimensionType_SPARSE_CSR = 1, DimensionType_MIN = DimensionType_DENSE, @@ -556,12 +897,12 @@ inline const char * const *EnumNamesDimensionType() { } inline const char *EnumNameDimensionType(DimensionType e) { - if (flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR)) return ""; + if (::flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR)) return ""; const size_t index = static_cast(e); return EnumNamesDimensionType()[index]; } -enum SparseIndexVector { +enum SparseIndexVector : uint8_t { SparseIndexVector_NONE = 0, SparseIndexVector_Int32Vector = 1, SparseIndexVector_Uint16Vector = 2, @@ -592,7 +933,7 @@ inline const char * const *EnumNamesSparseIndexVector() { } inline const char *EnumNameSparseIndexVector(SparseIndexVector e) { - if (flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector)) return ""; + if (::flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector)) return ""; const size_t index = static_cast(e); return EnumNamesSparseIndexVector()[index]; } @@ -613,6 +954,22 @@ template<> struct SparseIndexVectorTraits { static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; }; +template struct SparseIndexVectorUnionTraits { + static const SparseIndexVector enum_value = SparseIndexVector_NONE; +}; + +template<> struct SparseIndexVectorUnionTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; +}; + +template<> struct SparseIndexVectorUnionTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; +}; + +template<> struct SparseIndexVectorUnionTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; +}; + struct SparseIndexVectorUnion { SparseIndexVector type; void *value; @@ -621,8 +978,8 @@ struct SparseIndexVectorUnion { SparseIndexVectorUnion(SparseIndexVectorUnion&& u) FLATBUFFERS_NOEXCEPT : type(SparseIndexVector_NONE), value(nullptr) { std::swap(type, u.type); std::swap(value, u.value); } - SparseIndexVectorUnion(const SparseIndexVectorUnion &) FLATBUFFERS_NOEXCEPT; - SparseIndexVectorUnion &operator=(const SparseIndexVectorUnion &u) FLATBUFFERS_NOEXCEPT + SparseIndexVectorUnion(const SparseIndexVectorUnion &); + SparseIndexVectorUnion &operator=(const SparseIndexVectorUnion &u) { SparseIndexVectorUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } SparseIndexVectorUnion &operator=(SparseIndexVectorUnion &&u) FLATBUFFERS_NOEXCEPT { std::swap(type, u.type); std::swap(value, u.value); return *this; } @@ -630,20 +987,18 @@ struct SparseIndexVectorUnion { void Reset(); -#ifndef FLATBUFFERS_CPP98_STL template void Set(T&& val) { - using RT = typename std::remove_reference::type; + typedef typename std::remove_reference::type RT; Reset(); - type = SparseIndexVectorTraits::enum_value; + type = SparseIndexVectorUnionTraits::enum_value; if (type != SparseIndexVector_NONE) { value = new RT(std::forward(val)); } } -#endif // FLATBUFFERS_CPP98_STL - static void *UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + static void *UnPack(const void *obj, SparseIndexVector type, const ::flatbuffers::resolver_function_t *resolver); + ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr) const; tflite::Int32VectorT *AsInt32Vector() { return type == SparseIndexVector_Int32Vector ? @@ -671,10 +1026,10 @@ struct SparseIndexVectorUnion { } }; -bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type); -bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); +bool VerifySparseIndexVector(::flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type); +bool VerifySparseIndexVectorVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types); -enum BuiltinOperator { +enum BuiltinOperator : int32_t { BuiltinOperator_ADD = 0, BuiltinOperator_AVERAGE_POOL_2D = 1, BuiltinOperator_CONCATENATION = 2, @@ -807,292 +1162,528 @@ enum BuiltinOperator { BuiltinOperator_CALL_ONCE = 129, BuiltinOperator_BROADCAST_TO = 130, BuiltinOperator_RFFT2D = 131, + BuiltinOperator_CONV_3D = 132, + BuiltinOperator_IMAG = 133, + BuiltinOperator_REAL = 134, + BuiltinOperator_COMPLEX_ABS = 135, + BuiltinOperator_HASHTABLE = 136, + BuiltinOperator_HASHTABLE_FIND = 137, + BuiltinOperator_HASHTABLE_IMPORT = 138, + BuiltinOperator_HASHTABLE_SIZE = 139, + BuiltinOperator_REDUCE_ALL = 140, + BuiltinOperator_CONV_3D_TRANSPOSE = 141, + BuiltinOperator_VAR_HANDLE = 142, + BuiltinOperator_READ_VARIABLE = 143, + BuiltinOperator_ASSIGN_VARIABLE = 144, + BuiltinOperator_BROADCAST_ARGS = 145, + BuiltinOperator_RANDOM_STANDARD_NORMAL = 146, + BuiltinOperator_BUCKETIZE = 147, + BuiltinOperator_RANDOM_UNIFORM = 148, + BuiltinOperator_MULTINOMIAL = 149, + BuiltinOperator_GELU = 150, + BuiltinOperator_DYNAMIC_UPDATE_SLICE = 151, + BuiltinOperator_RELU_0_TO_1 = 152, + BuiltinOperator_UNSORTED_SEGMENT_PROD = 153, + BuiltinOperator_UNSORTED_SEGMENT_MAX = 154, + BuiltinOperator_UNSORTED_SEGMENT_SUM = 155, + BuiltinOperator_ATAN2 = 156, + BuiltinOperator_UNSORTED_SEGMENT_MIN = 157, + BuiltinOperator_SIGN = 158, + BuiltinOperator_BITCAST = 159, + BuiltinOperator_BITWISE_XOR = 160, + BuiltinOperator_RIGHT_SHIFT = 161, + BuiltinOperator_STABLEHLO_LOGISTIC = 162, + BuiltinOperator_STABLEHLO_ADD = 163, + BuiltinOperator_STABLEHLO_DIVIDE = 164, + BuiltinOperator_STABLEHLO_MULTIPLY = 165, + BuiltinOperator_STABLEHLO_MAXIMUM = 166, + BuiltinOperator_STABLEHLO_RESHAPE = 167, + BuiltinOperator_STABLEHLO_CLAMP = 168, + BuiltinOperator_STABLEHLO_CONCATENATE = 169, + BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM = 170, + BuiltinOperator_STABLEHLO_CONVOLUTION = 171, + BuiltinOperator_STABLEHLO_SLICE = 172, + BuiltinOperator_STABLEHLO_CUSTOM_CALL = 173, + BuiltinOperator_STABLEHLO_REDUCE = 174, + BuiltinOperator_STABLEHLO_ABS = 175, + BuiltinOperator_STABLEHLO_AND = 176, + BuiltinOperator_STABLEHLO_COSINE = 177, + BuiltinOperator_STABLEHLO_EXPONENTIAL = 178, + BuiltinOperator_STABLEHLO_FLOOR = 179, + BuiltinOperator_STABLEHLO_LOG = 180, + BuiltinOperator_STABLEHLO_MINIMUM = 181, + BuiltinOperator_STABLEHLO_NEGATE = 182, + BuiltinOperator_STABLEHLO_OR = 183, + BuiltinOperator_STABLEHLO_POWER = 184, + BuiltinOperator_STABLEHLO_REMAINDER = 185, + BuiltinOperator_STABLEHLO_RSQRT = 186, + BuiltinOperator_STABLEHLO_SELECT = 187, + BuiltinOperator_STABLEHLO_SUBTRACT = 188, + BuiltinOperator_STABLEHLO_TANH = 189, + BuiltinOperator_STABLEHLO_SCATTER = 190, + BuiltinOperator_STABLEHLO_COMPARE = 191, + BuiltinOperator_STABLEHLO_CONVERT = 192, + BuiltinOperator_STABLEHLO_DYNAMIC_SLICE = 193, + BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE = 194, + BuiltinOperator_STABLEHLO_PAD = 195, + BuiltinOperator_STABLEHLO_IOTA = 196, + BuiltinOperator_STABLEHLO_DOT_GENERAL = 197, + BuiltinOperator_STABLEHLO_REDUCE_WINDOW = 198, + BuiltinOperator_STABLEHLO_SORT = 199, + BuiltinOperator_STABLEHLO_WHILE = 200, + BuiltinOperator_STABLEHLO_GATHER = 201, + BuiltinOperator_STABLEHLO_TRANSPOSE = 202, + BuiltinOperator_DILATE = 203, + BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR = 204, + BuiltinOperator_REDUCE_WINDOW = 205, + BuiltinOperator_STABLEHLO_COMPOSITE = 206, + BuiltinOperator_STABLEHLO_SHIFT_LEFT = 207, + BuiltinOperator_STABLEHLO_CBRT = 208, + BuiltinOperator_STABLEHLO_CASE = 209, BuiltinOperator_MIN = BuiltinOperator_ADD, - BuiltinOperator_MAX = BuiltinOperator_RFFT2D + BuiltinOperator_MAX = BuiltinOperator_STABLEHLO_CASE }; -inline const BuiltinOperator (&EnumValuesBuiltinOperator())[132] { +inline const BuiltinOperator (&EnumValuesBuiltinOperator())[210] { static const BuiltinOperator values[] = { - BuiltinOperator_ADD, - BuiltinOperator_AVERAGE_POOL_2D, - BuiltinOperator_CONCATENATION, - BuiltinOperator_CONV_2D, - BuiltinOperator_DEPTHWISE_CONV_2D, - BuiltinOperator_DEPTH_TO_SPACE, - BuiltinOperator_DEQUANTIZE, - BuiltinOperator_EMBEDDING_LOOKUP, - BuiltinOperator_FLOOR, - BuiltinOperator_FULLY_CONNECTED, - BuiltinOperator_HASHTABLE_LOOKUP, - BuiltinOperator_L2_NORMALIZATION, - BuiltinOperator_L2_POOL_2D, - BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, - BuiltinOperator_LOGISTIC, - BuiltinOperator_LSH_PROJECTION, - BuiltinOperator_LSTM, - BuiltinOperator_MAX_POOL_2D, - BuiltinOperator_MUL, - BuiltinOperator_RELU, - BuiltinOperator_RELU_N1_TO_1, - BuiltinOperator_RELU6, - BuiltinOperator_RESHAPE, - BuiltinOperator_RESIZE_BILINEAR, - BuiltinOperator_RNN, - BuiltinOperator_SOFTMAX, - BuiltinOperator_SPACE_TO_DEPTH, - BuiltinOperator_SVDF, - BuiltinOperator_TANH, - BuiltinOperator_CONCAT_EMBEDDINGS, - BuiltinOperator_SKIP_GRAM, - BuiltinOperator_CALL, - BuiltinOperator_CUSTOM, - BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, - BuiltinOperator_PAD, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, - BuiltinOperator_GATHER, - BuiltinOperator_BATCH_TO_SPACE_ND, - BuiltinOperator_SPACE_TO_BATCH_ND, - BuiltinOperator_TRANSPOSE, - BuiltinOperator_MEAN, - BuiltinOperator_SUB, - BuiltinOperator_DIV, - BuiltinOperator_SQUEEZE, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, - BuiltinOperator_STRIDED_SLICE, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, - BuiltinOperator_EXP, - BuiltinOperator_TOPK_V2, - BuiltinOperator_SPLIT, - BuiltinOperator_LOG_SOFTMAX, - BuiltinOperator_DELEGATE, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, - BuiltinOperator_CAST, - BuiltinOperator_PRELU, - BuiltinOperator_MAXIMUM, - BuiltinOperator_ARG_MAX, - BuiltinOperator_MINIMUM, - BuiltinOperator_LESS, - BuiltinOperator_NEG, - BuiltinOperator_PADV2, - BuiltinOperator_GREATER, - BuiltinOperator_GREATER_EQUAL, - BuiltinOperator_LESS_EQUAL, - BuiltinOperator_SELECT, - BuiltinOperator_SLICE, - BuiltinOperator_SIN, - BuiltinOperator_TRANSPOSE_CONV, - BuiltinOperator_SPARSE_TO_DENSE, - BuiltinOperator_TILE, - BuiltinOperator_EXPAND_DIMS, - BuiltinOperator_EQUAL, - BuiltinOperator_NOT_EQUAL, - BuiltinOperator_LOG, - BuiltinOperator_SUM, - BuiltinOperator_SQRT, - BuiltinOperator_RSQRT, - BuiltinOperator_SHAPE, - BuiltinOperator_POW, - BuiltinOperator_ARG_MIN, - BuiltinOperator_FAKE_QUANT, - BuiltinOperator_REDUCE_PROD, - BuiltinOperator_REDUCE_MAX, - BuiltinOperator_PACK, - BuiltinOperator_LOGICAL_OR, - BuiltinOperator_ONE_HOT, - BuiltinOperator_LOGICAL_AND, - BuiltinOperator_LOGICAL_NOT, - BuiltinOperator_UNPACK, - BuiltinOperator_REDUCE_MIN, - BuiltinOperator_FLOOR_DIV, - BuiltinOperator_REDUCE_ANY, - BuiltinOperator_SQUARE, - BuiltinOperator_ZEROS_LIKE, - BuiltinOperator_FILL, - BuiltinOperator_FLOOR_MOD, - BuiltinOperator_RANGE, - BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, - BuiltinOperator_LEAKY_RELU, - BuiltinOperator_SQUARED_DIFFERENCE, - BuiltinOperator_MIRROR_PAD, - BuiltinOperator_ABS, - BuiltinOperator_SPLIT_V, - BuiltinOperator_UNIQUE, - BuiltinOperator_CEIL, - BuiltinOperator_REVERSE_V2, - BuiltinOperator_ADD_N, - BuiltinOperator_GATHER_ND, - BuiltinOperator_COS, - BuiltinOperator_WHERE, - BuiltinOperator_RANK, - BuiltinOperator_ELU, - BuiltinOperator_REVERSE_SEQUENCE, - BuiltinOperator_MATRIX_DIAG, - BuiltinOperator_QUANTIZE, - BuiltinOperator_MATRIX_SET_DIAG, - BuiltinOperator_ROUND, - BuiltinOperator_HARD_SWISH, - BuiltinOperator_IF, - BuiltinOperator_WHILE, - BuiltinOperator_NON_MAX_SUPPRESSION_V4, - BuiltinOperator_NON_MAX_SUPPRESSION_V5, - BuiltinOperator_SCATTER_ND, - BuiltinOperator_SELECT_V2, - BuiltinOperator_DENSIFY, - BuiltinOperator_SEGMENT_SUM, - BuiltinOperator_BATCH_MATMUL, - BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES, - BuiltinOperator_CUMSUM, - BuiltinOperator_CALL_ONCE, - BuiltinOperator_BROADCAST_TO, - BuiltinOperator_RFFT2D}; + BuiltinOperator_ADD, + BuiltinOperator_AVERAGE_POOL_2D, + BuiltinOperator_CONCATENATION, + BuiltinOperator_CONV_2D, + BuiltinOperator_DEPTHWISE_CONV_2D, + BuiltinOperator_DEPTH_TO_SPACE, + BuiltinOperator_DEQUANTIZE, + BuiltinOperator_EMBEDDING_LOOKUP, + BuiltinOperator_FLOOR, + BuiltinOperator_FULLY_CONNECTED, + BuiltinOperator_HASHTABLE_LOOKUP, + BuiltinOperator_L2_NORMALIZATION, + BuiltinOperator_L2_POOL_2D, + BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, + BuiltinOperator_LOGISTIC, + BuiltinOperator_LSH_PROJECTION, + BuiltinOperator_LSTM, + BuiltinOperator_MAX_POOL_2D, + BuiltinOperator_MUL, + BuiltinOperator_RELU, + BuiltinOperator_RELU_N1_TO_1, + BuiltinOperator_RELU6, + BuiltinOperator_RESHAPE, + BuiltinOperator_RESIZE_BILINEAR, + BuiltinOperator_RNN, + BuiltinOperator_SOFTMAX, + BuiltinOperator_SPACE_TO_DEPTH, + BuiltinOperator_SVDF, + BuiltinOperator_TANH, + BuiltinOperator_CONCAT_EMBEDDINGS, + BuiltinOperator_SKIP_GRAM, + BuiltinOperator_CALL, + BuiltinOperator_CUSTOM, + BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, + BuiltinOperator_PAD, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, + BuiltinOperator_GATHER, + BuiltinOperator_BATCH_TO_SPACE_ND, + BuiltinOperator_SPACE_TO_BATCH_ND, + BuiltinOperator_TRANSPOSE, + BuiltinOperator_MEAN, + BuiltinOperator_SUB, + BuiltinOperator_DIV, + BuiltinOperator_SQUEEZE, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, + BuiltinOperator_STRIDED_SLICE, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, + BuiltinOperator_EXP, + BuiltinOperator_TOPK_V2, + BuiltinOperator_SPLIT, + BuiltinOperator_LOG_SOFTMAX, + BuiltinOperator_DELEGATE, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, + BuiltinOperator_CAST, + BuiltinOperator_PRELU, + BuiltinOperator_MAXIMUM, + BuiltinOperator_ARG_MAX, + BuiltinOperator_MINIMUM, + BuiltinOperator_LESS, + BuiltinOperator_NEG, + BuiltinOperator_PADV2, + BuiltinOperator_GREATER, + BuiltinOperator_GREATER_EQUAL, + BuiltinOperator_LESS_EQUAL, + BuiltinOperator_SELECT, + BuiltinOperator_SLICE, + BuiltinOperator_SIN, + BuiltinOperator_TRANSPOSE_CONV, + BuiltinOperator_SPARSE_TO_DENSE, + BuiltinOperator_TILE, + BuiltinOperator_EXPAND_DIMS, + BuiltinOperator_EQUAL, + BuiltinOperator_NOT_EQUAL, + BuiltinOperator_LOG, + BuiltinOperator_SUM, + BuiltinOperator_SQRT, + BuiltinOperator_RSQRT, + BuiltinOperator_SHAPE, + BuiltinOperator_POW, + BuiltinOperator_ARG_MIN, + BuiltinOperator_FAKE_QUANT, + BuiltinOperator_REDUCE_PROD, + BuiltinOperator_REDUCE_MAX, + BuiltinOperator_PACK, + BuiltinOperator_LOGICAL_OR, + BuiltinOperator_ONE_HOT, + BuiltinOperator_LOGICAL_AND, + BuiltinOperator_LOGICAL_NOT, + BuiltinOperator_UNPACK, + BuiltinOperator_REDUCE_MIN, + BuiltinOperator_FLOOR_DIV, + BuiltinOperator_REDUCE_ANY, + BuiltinOperator_SQUARE, + BuiltinOperator_ZEROS_LIKE, + BuiltinOperator_FILL, + BuiltinOperator_FLOOR_MOD, + BuiltinOperator_RANGE, + BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, + BuiltinOperator_LEAKY_RELU, + BuiltinOperator_SQUARED_DIFFERENCE, + BuiltinOperator_MIRROR_PAD, + BuiltinOperator_ABS, + BuiltinOperator_SPLIT_V, + BuiltinOperator_UNIQUE, + BuiltinOperator_CEIL, + BuiltinOperator_REVERSE_V2, + BuiltinOperator_ADD_N, + BuiltinOperator_GATHER_ND, + BuiltinOperator_COS, + BuiltinOperator_WHERE, + BuiltinOperator_RANK, + BuiltinOperator_ELU, + BuiltinOperator_REVERSE_SEQUENCE, + BuiltinOperator_MATRIX_DIAG, + BuiltinOperator_QUANTIZE, + BuiltinOperator_MATRIX_SET_DIAG, + BuiltinOperator_ROUND, + BuiltinOperator_HARD_SWISH, + BuiltinOperator_IF, + BuiltinOperator_WHILE, + BuiltinOperator_NON_MAX_SUPPRESSION_V4, + BuiltinOperator_NON_MAX_SUPPRESSION_V5, + BuiltinOperator_SCATTER_ND, + BuiltinOperator_SELECT_V2, + BuiltinOperator_DENSIFY, + BuiltinOperator_SEGMENT_SUM, + BuiltinOperator_BATCH_MATMUL, + BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES, + BuiltinOperator_CUMSUM, + BuiltinOperator_CALL_ONCE, + BuiltinOperator_BROADCAST_TO, + BuiltinOperator_RFFT2D, + BuiltinOperator_CONV_3D, + BuiltinOperator_IMAG, + BuiltinOperator_REAL, + BuiltinOperator_COMPLEX_ABS, + BuiltinOperator_HASHTABLE, + BuiltinOperator_HASHTABLE_FIND, + BuiltinOperator_HASHTABLE_IMPORT, + BuiltinOperator_HASHTABLE_SIZE, + BuiltinOperator_REDUCE_ALL, + BuiltinOperator_CONV_3D_TRANSPOSE, + BuiltinOperator_VAR_HANDLE, + BuiltinOperator_READ_VARIABLE, + BuiltinOperator_ASSIGN_VARIABLE, + BuiltinOperator_BROADCAST_ARGS, + BuiltinOperator_RANDOM_STANDARD_NORMAL, + BuiltinOperator_BUCKETIZE, + BuiltinOperator_RANDOM_UNIFORM, + BuiltinOperator_MULTINOMIAL, + BuiltinOperator_GELU, + BuiltinOperator_DYNAMIC_UPDATE_SLICE, + BuiltinOperator_RELU_0_TO_1, + BuiltinOperator_UNSORTED_SEGMENT_PROD, + BuiltinOperator_UNSORTED_SEGMENT_MAX, + BuiltinOperator_UNSORTED_SEGMENT_SUM, + BuiltinOperator_ATAN2, + BuiltinOperator_UNSORTED_SEGMENT_MIN, + BuiltinOperator_SIGN, + BuiltinOperator_BITCAST, + BuiltinOperator_BITWISE_XOR, + BuiltinOperator_RIGHT_SHIFT, + BuiltinOperator_STABLEHLO_LOGISTIC, + BuiltinOperator_STABLEHLO_ADD, + BuiltinOperator_STABLEHLO_DIVIDE, + BuiltinOperator_STABLEHLO_MULTIPLY, + BuiltinOperator_STABLEHLO_MAXIMUM, + BuiltinOperator_STABLEHLO_RESHAPE, + BuiltinOperator_STABLEHLO_CLAMP, + BuiltinOperator_STABLEHLO_CONCATENATE, + BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM, + BuiltinOperator_STABLEHLO_CONVOLUTION, + BuiltinOperator_STABLEHLO_SLICE, + BuiltinOperator_STABLEHLO_CUSTOM_CALL, + BuiltinOperator_STABLEHLO_REDUCE, + BuiltinOperator_STABLEHLO_ABS, + BuiltinOperator_STABLEHLO_AND, + BuiltinOperator_STABLEHLO_COSINE, + BuiltinOperator_STABLEHLO_EXPONENTIAL, + BuiltinOperator_STABLEHLO_FLOOR, + BuiltinOperator_STABLEHLO_LOG, + BuiltinOperator_STABLEHLO_MINIMUM, + BuiltinOperator_STABLEHLO_NEGATE, + BuiltinOperator_STABLEHLO_OR, + BuiltinOperator_STABLEHLO_POWER, + BuiltinOperator_STABLEHLO_REMAINDER, + BuiltinOperator_STABLEHLO_RSQRT, + BuiltinOperator_STABLEHLO_SELECT, + BuiltinOperator_STABLEHLO_SUBTRACT, + BuiltinOperator_STABLEHLO_TANH, + BuiltinOperator_STABLEHLO_SCATTER, + BuiltinOperator_STABLEHLO_COMPARE, + BuiltinOperator_STABLEHLO_CONVERT, + BuiltinOperator_STABLEHLO_DYNAMIC_SLICE, + BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE, + BuiltinOperator_STABLEHLO_PAD, + BuiltinOperator_STABLEHLO_IOTA, + BuiltinOperator_STABLEHLO_DOT_GENERAL, + BuiltinOperator_STABLEHLO_REDUCE_WINDOW, + BuiltinOperator_STABLEHLO_SORT, + BuiltinOperator_STABLEHLO_WHILE, + BuiltinOperator_STABLEHLO_GATHER, + BuiltinOperator_STABLEHLO_TRANSPOSE, + BuiltinOperator_DILATE, + BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR, + BuiltinOperator_REDUCE_WINDOW, + BuiltinOperator_STABLEHLO_COMPOSITE, + BuiltinOperator_STABLEHLO_SHIFT_LEFT, + BuiltinOperator_STABLEHLO_CBRT, + BuiltinOperator_STABLEHLO_CASE + }; return values; } inline const char * const *EnumNamesBuiltinOperator() { - static const char *const names[133] = {"ADD", - "AVERAGE_POOL_2D", - "CONCATENATION", - "CONV_2D", - "DEPTHWISE_CONV_2D", - "DEPTH_TO_SPACE", - "DEQUANTIZE", - "EMBEDDING_LOOKUP", - "FLOOR", - "FULLY_CONNECTED", - "HASHTABLE_LOOKUP", - "L2_NORMALIZATION", - "L2_POOL_2D", - "LOCAL_RESPONSE_NORMALIZATION", - "LOGISTIC", - "LSH_PROJECTION", - "LSTM", - "MAX_POOL_2D", - "MUL", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "RESHAPE", - "RESIZE_BILINEAR", - "RNN", - "SOFTMAX", - "SPACE_TO_DEPTH", - "SVDF", - "TANH", - "CONCAT_EMBEDDINGS", - "SKIP_GRAM", - "CALL", - "CUSTOM", - "EMBEDDING_LOOKUP_SPARSE", - "PAD", - "UNIDIRECTIONAL_SEQUENCE_RNN", - "GATHER", - "BATCH_TO_SPACE_ND", - "SPACE_TO_BATCH_ND", - "TRANSPOSE", - "MEAN", - "SUB", - "DIV", - "SQUEEZE", - "UNIDIRECTIONAL_SEQUENCE_LSTM", - "STRIDED_SLICE", - "BIDIRECTIONAL_SEQUENCE_RNN", - "EXP", - "TOPK_V2", - "SPLIT", - "LOG_SOFTMAX", - "DELEGATE", - "BIDIRECTIONAL_SEQUENCE_LSTM", - "CAST", - "PRELU", - "MAXIMUM", - "ARG_MAX", - "MINIMUM", - "LESS", - "NEG", - "PADV2", - "GREATER", - "GREATER_EQUAL", - "LESS_EQUAL", - "SELECT", - "SLICE", - "SIN", - "TRANSPOSE_CONV", - "SPARSE_TO_DENSE", - "TILE", - "EXPAND_DIMS", - "EQUAL", - "NOT_EQUAL", - "LOG", - "SUM", - "SQRT", - "RSQRT", - "SHAPE", - "POW", - "ARG_MIN", - "FAKE_QUANT", - "REDUCE_PROD", - "REDUCE_MAX", - "PACK", - "LOGICAL_OR", - "ONE_HOT", - "LOGICAL_AND", - "LOGICAL_NOT", - "UNPACK", - "REDUCE_MIN", - "FLOOR_DIV", - "REDUCE_ANY", - "SQUARE", - "ZEROS_LIKE", - "FILL", - "FLOOR_MOD", - "RANGE", - "RESIZE_NEAREST_NEIGHBOR", - "LEAKY_RELU", - "SQUARED_DIFFERENCE", - "MIRROR_PAD", - "ABS", - "SPLIT_V", - "UNIQUE", - "CEIL", - "REVERSE_V2", - "ADD_N", - "GATHER_ND", - "COS", - "WHERE", - "RANK", - "ELU", - "REVERSE_SEQUENCE", - "MATRIX_DIAG", - "QUANTIZE", - "MATRIX_SET_DIAG", - "ROUND", - "HARD_SWISH", - "IF", - "WHILE", - "NON_MAX_SUPPRESSION_V4", - "NON_MAX_SUPPRESSION_V5", - "SCATTER_ND", - "SELECT_V2", - "DENSIFY", - "SEGMENT_SUM", - "BATCH_MATMUL", - "PLACEHOLDER_FOR_GREATER_OP_CODES", - "CUMSUM", - "CALL_ONCE", - "BROADCAST_TO", - "RFFT2D", - nullptr}; + static const char * const names[211] = { + "ADD", + "AVERAGE_POOL_2D", + "CONCATENATION", + "CONV_2D", + "DEPTHWISE_CONV_2D", + "DEPTH_TO_SPACE", + "DEQUANTIZE", + "EMBEDDING_LOOKUP", + "FLOOR", + "FULLY_CONNECTED", + "HASHTABLE_LOOKUP", + "L2_NORMALIZATION", + "L2_POOL_2D", + "LOCAL_RESPONSE_NORMALIZATION", + "LOGISTIC", + "LSH_PROJECTION", + "LSTM", + "MAX_POOL_2D", + "MUL", + "RELU", + "RELU_N1_TO_1", + "RELU6", + "RESHAPE", + "RESIZE_BILINEAR", + "RNN", + "SOFTMAX", + "SPACE_TO_DEPTH", + "SVDF", + "TANH", + "CONCAT_EMBEDDINGS", + "SKIP_GRAM", + "CALL", + "CUSTOM", + "EMBEDDING_LOOKUP_SPARSE", + "PAD", + "UNIDIRECTIONAL_SEQUENCE_RNN", + "GATHER", + "BATCH_TO_SPACE_ND", + "SPACE_TO_BATCH_ND", + "TRANSPOSE", + "MEAN", + "SUB", + "DIV", + "SQUEEZE", + "UNIDIRECTIONAL_SEQUENCE_LSTM", + "STRIDED_SLICE", + "BIDIRECTIONAL_SEQUENCE_RNN", + "EXP", + "TOPK_V2", + "SPLIT", + "LOG_SOFTMAX", + "DELEGATE", + "BIDIRECTIONAL_SEQUENCE_LSTM", + "CAST", + "PRELU", + "MAXIMUM", + "ARG_MAX", + "MINIMUM", + "LESS", + "NEG", + "PADV2", + "GREATER", + "GREATER_EQUAL", + "LESS_EQUAL", + "SELECT", + "SLICE", + "SIN", + "TRANSPOSE_CONV", + "SPARSE_TO_DENSE", + "TILE", + "EXPAND_DIMS", + "EQUAL", + "NOT_EQUAL", + "LOG", + "SUM", + "SQRT", + "RSQRT", + "SHAPE", + "POW", + "ARG_MIN", + "FAKE_QUANT", + "REDUCE_PROD", + "REDUCE_MAX", + "PACK", + "LOGICAL_OR", + "ONE_HOT", + "LOGICAL_AND", + "LOGICAL_NOT", + "UNPACK", + "REDUCE_MIN", + "FLOOR_DIV", + "REDUCE_ANY", + "SQUARE", + "ZEROS_LIKE", + "FILL", + "FLOOR_MOD", + "RANGE", + "RESIZE_NEAREST_NEIGHBOR", + "LEAKY_RELU", + "SQUARED_DIFFERENCE", + "MIRROR_PAD", + "ABS", + "SPLIT_V", + "UNIQUE", + "CEIL", + "REVERSE_V2", + "ADD_N", + "GATHER_ND", + "COS", + "WHERE", + "RANK", + "ELU", + "REVERSE_SEQUENCE", + "MATRIX_DIAG", + "QUANTIZE", + "MATRIX_SET_DIAG", + "ROUND", + "HARD_SWISH", + "IF", + "WHILE", + "NON_MAX_SUPPRESSION_V4", + "NON_MAX_SUPPRESSION_V5", + "SCATTER_ND", + "SELECT_V2", + "DENSIFY", + "SEGMENT_SUM", + "BATCH_MATMUL", + "PLACEHOLDER_FOR_GREATER_OP_CODES", + "CUMSUM", + "CALL_ONCE", + "BROADCAST_TO", + "RFFT2D", + "CONV_3D", + "IMAG", + "REAL", + "COMPLEX_ABS", + "HASHTABLE", + "HASHTABLE_FIND", + "HASHTABLE_IMPORT", + "HASHTABLE_SIZE", + "REDUCE_ALL", + "CONV_3D_TRANSPOSE", + "VAR_HANDLE", + "READ_VARIABLE", + "ASSIGN_VARIABLE", + "BROADCAST_ARGS", + "RANDOM_STANDARD_NORMAL", + "BUCKETIZE", + "RANDOM_UNIFORM", + "MULTINOMIAL", + "GELU", + "DYNAMIC_UPDATE_SLICE", + "RELU_0_TO_1", + "UNSORTED_SEGMENT_PROD", + "UNSORTED_SEGMENT_MAX", + "UNSORTED_SEGMENT_SUM", + "ATAN2", + "UNSORTED_SEGMENT_MIN", + "SIGN", + "BITCAST", + "BITWISE_XOR", + "RIGHT_SHIFT", + "STABLEHLO_LOGISTIC", + "STABLEHLO_ADD", + "STABLEHLO_DIVIDE", + "STABLEHLO_MULTIPLY", + "STABLEHLO_MAXIMUM", + "STABLEHLO_RESHAPE", + "STABLEHLO_CLAMP", + "STABLEHLO_CONCATENATE", + "STABLEHLO_BROADCAST_IN_DIM", + "STABLEHLO_CONVOLUTION", + "STABLEHLO_SLICE", + "STABLEHLO_CUSTOM_CALL", + "STABLEHLO_REDUCE", + "STABLEHLO_ABS", + "STABLEHLO_AND", + "STABLEHLO_COSINE", + "STABLEHLO_EXPONENTIAL", + "STABLEHLO_FLOOR", + "STABLEHLO_LOG", + "STABLEHLO_MINIMUM", + "STABLEHLO_NEGATE", + "STABLEHLO_OR", + "STABLEHLO_POWER", + "STABLEHLO_REMAINDER", + "STABLEHLO_RSQRT", + "STABLEHLO_SELECT", + "STABLEHLO_SUBTRACT", + "STABLEHLO_TANH", + "STABLEHLO_SCATTER", + "STABLEHLO_COMPARE", + "STABLEHLO_CONVERT", + "STABLEHLO_DYNAMIC_SLICE", + "STABLEHLO_DYNAMIC_UPDATE_SLICE", + "STABLEHLO_PAD", + "STABLEHLO_IOTA", + "STABLEHLO_DOT_GENERAL", + "STABLEHLO_REDUCE_WINDOW", + "STABLEHLO_SORT", + "STABLEHLO_WHILE", + "STABLEHLO_GATHER", + "STABLEHLO_TRANSPOSE", + "DILATE", + "STABLEHLO_RNG_BIT_GENERATOR", + "REDUCE_WINDOW", + "STABLEHLO_COMPOSITE", + "STABLEHLO_SHIFT_LEFT", + "STABLEHLO_CBRT", + "STABLEHLO_CASE", + nullptr + }; return names; } inline const char *EnumNameBuiltinOperator(BuiltinOperator e) { - if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_RFFT2D)) - return ""; + if (::flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_STABLEHLO_CASE)) return ""; const size_t index = static_cast(e); return EnumNamesBuiltinOperator()[index]; } -enum BuiltinOptions { +enum BuiltinOptions : uint8_t { BuiltinOptions_NONE = 0, BuiltinOptions_Conv2DOptions = 1, BuiltinOptions_DepthwiseConv2DOptions = 2, @@ -1199,236 +1790,300 @@ enum BuiltinOptions { BuiltinOptions_CallOnceOptions = 103, BuiltinOptions_BroadcastToOptions = 104, BuiltinOptions_Rfft2dOptions = 105, + BuiltinOptions_Conv3DOptions = 106, + BuiltinOptions_HashtableOptions = 107, + BuiltinOptions_HashtableFindOptions = 108, + BuiltinOptions_HashtableImportOptions = 109, + BuiltinOptions_HashtableSizeOptions = 110, + BuiltinOptions_VarHandleOptions = 111, + BuiltinOptions_ReadVariableOptions = 112, + BuiltinOptions_AssignVariableOptions = 113, + BuiltinOptions_RandomOptions = 114, + BuiltinOptions_BucketizeOptions = 115, + BuiltinOptions_GeluOptions = 116, + BuiltinOptions_DynamicUpdateSliceOptions = 117, + BuiltinOptions_UnsortedSegmentProdOptions = 118, + BuiltinOptions_UnsortedSegmentMaxOptions = 119, + BuiltinOptions_UnsortedSegmentMinOptions = 120, + BuiltinOptions_UnsortedSegmentSumOptions = 121, + BuiltinOptions_ATan2Options = 122, + BuiltinOptions_SignOptions = 123, + BuiltinOptions_BitcastOptions = 124, + BuiltinOptions_BitwiseXorOptions = 125, + BuiltinOptions_RightShiftOptions = 126, BuiltinOptions_MIN = BuiltinOptions_NONE, - BuiltinOptions_MAX = BuiltinOptions_Rfft2dOptions + BuiltinOptions_MAX = BuiltinOptions_RightShiftOptions }; -inline const BuiltinOptions (&EnumValuesBuiltinOptions())[106] { +inline const BuiltinOptions (&EnumValuesBuiltinOptions())[127] { static const BuiltinOptions values[] = { - BuiltinOptions_NONE, - BuiltinOptions_Conv2DOptions, - BuiltinOptions_DepthwiseConv2DOptions, - BuiltinOptions_ConcatEmbeddingsOptions, - BuiltinOptions_LSHProjectionOptions, - BuiltinOptions_Pool2DOptions, - BuiltinOptions_SVDFOptions, - BuiltinOptions_RNNOptions, - BuiltinOptions_FullyConnectedOptions, - BuiltinOptions_SoftmaxOptions, - BuiltinOptions_ConcatenationOptions, - BuiltinOptions_AddOptions, - BuiltinOptions_L2NormOptions, - BuiltinOptions_LocalResponseNormalizationOptions, - BuiltinOptions_LSTMOptions, - BuiltinOptions_ResizeBilinearOptions, - BuiltinOptions_CallOptions, - BuiltinOptions_ReshapeOptions, - BuiltinOptions_SkipGramOptions, - BuiltinOptions_SpaceToDepthOptions, - BuiltinOptions_EmbeddingLookupSparseOptions, - BuiltinOptions_MulOptions, - BuiltinOptions_PadOptions, - BuiltinOptions_GatherOptions, - BuiltinOptions_BatchToSpaceNDOptions, - BuiltinOptions_SpaceToBatchNDOptions, - BuiltinOptions_TransposeOptions, - BuiltinOptions_ReducerOptions, - BuiltinOptions_SubOptions, - BuiltinOptions_DivOptions, - BuiltinOptions_SqueezeOptions, - BuiltinOptions_SequenceRNNOptions, - BuiltinOptions_StridedSliceOptions, - BuiltinOptions_ExpOptions, - BuiltinOptions_TopKV2Options, - BuiltinOptions_SplitOptions, - BuiltinOptions_LogSoftmaxOptions, - BuiltinOptions_CastOptions, - BuiltinOptions_DequantizeOptions, - BuiltinOptions_MaximumMinimumOptions, - BuiltinOptions_ArgMaxOptions, - BuiltinOptions_LessOptions, - BuiltinOptions_NegOptions, - BuiltinOptions_PadV2Options, - BuiltinOptions_GreaterOptions, - BuiltinOptions_GreaterEqualOptions, - BuiltinOptions_LessEqualOptions, - BuiltinOptions_SelectOptions, - BuiltinOptions_SliceOptions, - BuiltinOptions_TransposeConvOptions, - BuiltinOptions_SparseToDenseOptions, - BuiltinOptions_TileOptions, - BuiltinOptions_ExpandDimsOptions, - BuiltinOptions_EqualOptions, - BuiltinOptions_NotEqualOptions, - BuiltinOptions_ShapeOptions, - BuiltinOptions_PowOptions, - BuiltinOptions_ArgMinOptions, - BuiltinOptions_FakeQuantOptions, - BuiltinOptions_PackOptions, - BuiltinOptions_LogicalOrOptions, - BuiltinOptions_OneHotOptions, - BuiltinOptions_LogicalAndOptions, - BuiltinOptions_LogicalNotOptions, - BuiltinOptions_UnpackOptions, - BuiltinOptions_FloorDivOptions, - BuiltinOptions_SquareOptions, - BuiltinOptions_ZerosLikeOptions, - BuiltinOptions_FillOptions, - BuiltinOptions_BidirectionalSequenceLSTMOptions, - BuiltinOptions_BidirectionalSequenceRNNOptions, - BuiltinOptions_UnidirectionalSequenceLSTMOptions, - BuiltinOptions_FloorModOptions, - BuiltinOptions_RangeOptions, - BuiltinOptions_ResizeNearestNeighborOptions, - BuiltinOptions_LeakyReluOptions, - BuiltinOptions_SquaredDifferenceOptions, - BuiltinOptions_MirrorPadOptions, - BuiltinOptions_AbsOptions, - BuiltinOptions_SplitVOptions, - BuiltinOptions_UniqueOptions, - BuiltinOptions_ReverseV2Options, - BuiltinOptions_AddNOptions, - BuiltinOptions_GatherNdOptions, - BuiltinOptions_CosOptions, - BuiltinOptions_WhereOptions, - BuiltinOptions_RankOptions, - BuiltinOptions_ReverseSequenceOptions, - BuiltinOptions_MatrixDiagOptions, - BuiltinOptions_QuantizeOptions, - BuiltinOptions_MatrixSetDiagOptions, - BuiltinOptions_HardSwishOptions, - BuiltinOptions_IfOptions, - BuiltinOptions_WhileOptions, - BuiltinOptions_DepthToSpaceOptions, - BuiltinOptions_NonMaxSuppressionV4Options, - BuiltinOptions_NonMaxSuppressionV5Options, - BuiltinOptions_ScatterNdOptions, - BuiltinOptions_SelectV2Options, - BuiltinOptions_DensifyOptions, - BuiltinOptions_SegmentSumOptions, - BuiltinOptions_BatchMatMulOptions, - BuiltinOptions_CumsumOptions, - BuiltinOptions_CallOnceOptions, - BuiltinOptions_BroadcastToOptions, - BuiltinOptions_Rfft2dOptions}; + BuiltinOptions_NONE, + BuiltinOptions_Conv2DOptions, + BuiltinOptions_DepthwiseConv2DOptions, + BuiltinOptions_ConcatEmbeddingsOptions, + BuiltinOptions_LSHProjectionOptions, + BuiltinOptions_Pool2DOptions, + BuiltinOptions_SVDFOptions, + BuiltinOptions_RNNOptions, + BuiltinOptions_FullyConnectedOptions, + BuiltinOptions_SoftmaxOptions, + BuiltinOptions_ConcatenationOptions, + BuiltinOptions_AddOptions, + BuiltinOptions_L2NormOptions, + BuiltinOptions_LocalResponseNormalizationOptions, + BuiltinOptions_LSTMOptions, + BuiltinOptions_ResizeBilinearOptions, + BuiltinOptions_CallOptions, + BuiltinOptions_ReshapeOptions, + BuiltinOptions_SkipGramOptions, + BuiltinOptions_SpaceToDepthOptions, + BuiltinOptions_EmbeddingLookupSparseOptions, + BuiltinOptions_MulOptions, + BuiltinOptions_PadOptions, + BuiltinOptions_GatherOptions, + BuiltinOptions_BatchToSpaceNDOptions, + BuiltinOptions_SpaceToBatchNDOptions, + BuiltinOptions_TransposeOptions, + BuiltinOptions_ReducerOptions, + BuiltinOptions_SubOptions, + BuiltinOptions_DivOptions, + BuiltinOptions_SqueezeOptions, + BuiltinOptions_SequenceRNNOptions, + BuiltinOptions_StridedSliceOptions, + BuiltinOptions_ExpOptions, + BuiltinOptions_TopKV2Options, + BuiltinOptions_SplitOptions, + BuiltinOptions_LogSoftmaxOptions, + BuiltinOptions_CastOptions, + BuiltinOptions_DequantizeOptions, + BuiltinOptions_MaximumMinimumOptions, + BuiltinOptions_ArgMaxOptions, + BuiltinOptions_LessOptions, + BuiltinOptions_NegOptions, + BuiltinOptions_PadV2Options, + BuiltinOptions_GreaterOptions, + BuiltinOptions_GreaterEqualOptions, + BuiltinOptions_LessEqualOptions, + BuiltinOptions_SelectOptions, + BuiltinOptions_SliceOptions, + BuiltinOptions_TransposeConvOptions, + BuiltinOptions_SparseToDenseOptions, + BuiltinOptions_TileOptions, + BuiltinOptions_ExpandDimsOptions, + BuiltinOptions_EqualOptions, + BuiltinOptions_NotEqualOptions, + BuiltinOptions_ShapeOptions, + BuiltinOptions_PowOptions, + BuiltinOptions_ArgMinOptions, + BuiltinOptions_FakeQuantOptions, + BuiltinOptions_PackOptions, + BuiltinOptions_LogicalOrOptions, + BuiltinOptions_OneHotOptions, + BuiltinOptions_LogicalAndOptions, + BuiltinOptions_LogicalNotOptions, + BuiltinOptions_UnpackOptions, + BuiltinOptions_FloorDivOptions, + BuiltinOptions_SquareOptions, + BuiltinOptions_ZerosLikeOptions, + BuiltinOptions_FillOptions, + BuiltinOptions_BidirectionalSequenceLSTMOptions, + BuiltinOptions_BidirectionalSequenceRNNOptions, + BuiltinOptions_UnidirectionalSequenceLSTMOptions, + BuiltinOptions_FloorModOptions, + BuiltinOptions_RangeOptions, + BuiltinOptions_ResizeNearestNeighborOptions, + BuiltinOptions_LeakyReluOptions, + BuiltinOptions_SquaredDifferenceOptions, + BuiltinOptions_MirrorPadOptions, + BuiltinOptions_AbsOptions, + BuiltinOptions_SplitVOptions, + BuiltinOptions_UniqueOptions, + BuiltinOptions_ReverseV2Options, + BuiltinOptions_AddNOptions, + BuiltinOptions_GatherNdOptions, + BuiltinOptions_CosOptions, + BuiltinOptions_WhereOptions, + BuiltinOptions_RankOptions, + BuiltinOptions_ReverseSequenceOptions, + BuiltinOptions_MatrixDiagOptions, + BuiltinOptions_QuantizeOptions, + BuiltinOptions_MatrixSetDiagOptions, + BuiltinOptions_HardSwishOptions, + BuiltinOptions_IfOptions, + BuiltinOptions_WhileOptions, + BuiltinOptions_DepthToSpaceOptions, + BuiltinOptions_NonMaxSuppressionV4Options, + BuiltinOptions_NonMaxSuppressionV5Options, + BuiltinOptions_ScatterNdOptions, + BuiltinOptions_SelectV2Options, + BuiltinOptions_DensifyOptions, + BuiltinOptions_SegmentSumOptions, + BuiltinOptions_BatchMatMulOptions, + BuiltinOptions_CumsumOptions, + BuiltinOptions_CallOnceOptions, + BuiltinOptions_BroadcastToOptions, + BuiltinOptions_Rfft2dOptions, + BuiltinOptions_Conv3DOptions, + BuiltinOptions_HashtableOptions, + BuiltinOptions_HashtableFindOptions, + BuiltinOptions_HashtableImportOptions, + BuiltinOptions_HashtableSizeOptions, + BuiltinOptions_VarHandleOptions, + BuiltinOptions_ReadVariableOptions, + BuiltinOptions_AssignVariableOptions, + BuiltinOptions_RandomOptions, + BuiltinOptions_BucketizeOptions, + BuiltinOptions_GeluOptions, + BuiltinOptions_DynamicUpdateSliceOptions, + BuiltinOptions_UnsortedSegmentProdOptions, + BuiltinOptions_UnsortedSegmentMaxOptions, + BuiltinOptions_UnsortedSegmentMinOptions, + BuiltinOptions_UnsortedSegmentSumOptions, + BuiltinOptions_ATan2Options, + BuiltinOptions_SignOptions, + BuiltinOptions_BitcastOptions, + BuiltinOptions_BitwiseXorOptions, + BuiltinOptions_RightShiftOptions + }; return values; } inline const char * const *EnumNamesBuiltinOptions() { - static const char *const names[107] = {"NONE", - "Conv2DOptions", - "DepthwiseConv2DOptions", - "ConcatEmbeddingsOptions", - "LSHProjectionOptions", - "Pool2DOptions", - "SVDFOptions", - "RNNOptions", - "FullyConnectedOptions", - "SoftmaxOptions", - "ConcatenationOptions", - "AddOptions", - "L2NormOptions", - "LocalResponseNormalizationOptions", - "LSTMOptions", - "ResizeBilinearOptions", - "CallOptions", - "ReshapeOptions", - "SkipGramOptions", - "SpaceToDepthOptions", - "EmbeddingLookupSparseOptions", - "MulOptions", - "PadOptions", - "GatherOptions", - "BatchToSpaceNDOptions", - "SpaceToBatchNDOptions", - "TransposeOptions", - "ReducerOptions", - "SubOptions", - "DivOptions", - "SqueezeOptions", - "SequenceRNNOptions", - "StridedSliceOptions", - "ExpOptions", - "TopKV2Options", - "SplitOptions", - "LogSoftmaxOptions", - "CastOptions", - "DequantizeOptions", - "MaximumMinimumOptions", - "ArgMaxOptions", - "LessOptions", - "NegOptions", - "PadV2Options", - "GreaterOptions", - "GreaterEqualOptions", - "LessEqualOptions", - "SelectOptions", - "SliceOptions", - "TransposeConvOptions", - "SparseToDenseOptions", - "TileOptions", - "ExpandDimsOptions", - "EqualOptions", - "NotEqualOptions", - "ShapeOptions", - "PowOptions", - "ArgMinOptions", - "FakeQuantOptions", - "PackOptions", - "LogicalOrOptions", - "OneHotOptions", - "LogicalAndOptions", - "LogicalNotOptions", - "UnpackOptions", - "FloorDivOptions", - "SquareOptions", - "ZerosLikeOptions", - "FillOptions", - "BidirectionalSequenceLSTMOptions", - "BidirectionalSequenceRNNOptions", - "UnidirectionalSequenceLSTMOptions", - "FloorModOptions", - "RangeOptions", - "ResizeNearestNeighborOptions", - "LeakyReluOptions", - "SquaredDifferenceOptions", - "MirrorPadOptions", - "AbsOptions", - "SplitVOptions", - "UniqueOptions", - "ReverseV2Options", - "AddNOptions", - "GatherNdOptions", - "CosOptions", - "WhereOptions", - "RankOptions", - "ReverseSequenceOptions", - "MatrixDiagOptions", - "QuantizeOptions", - "MatrixSetDiagOptions", - "HardSwishOptions", - "IfOptions", - "WhileOptions", - "DepthToSpaceOptions", - "NonMaxSuppressionV4Options", - "NonMaxSuppressionV5Options", - "ScatterNdOptions", - "SelectV2Options", - "DensifyOptions", - "SegmentSumOptions", - "BatchMatMulOptions", - "CumsumOptions", - "CallOnceOptions", - "BroadcastToOptions", - "Rfft2dOptions", - nullptr}; + static const char * const names[128] = { + "NONE", + "Conv2DOptions", + "DepthwiseConv2DOptions", + "ConcatEmbeddingsOptions", + "LSHProjectionOptions", + "Pool2DOptions", + "SVDFOptions", + "RNNOptions", + "FullyConnectedOptions", + "SoftmaxOptions", + "ConcatenationOptions", + "AddOptions", + "L2NormOptions", + "LocalResponseNormalizationOptions", + "LSTMOptions", + "ResizeBilinearOptions", + "CallOptions", + "ReshapeOptions", + "SkipGramOptions", + "SpaceToDepthOptions", + "EmbeddingLookupSparseOptions", + "MulOptions", + "PadOptions", + "GatherOptions", + "BatchToSpaceNDOptions", + "SpaceToBatchNDOptions", + "TransposeOptions", + "ReducerOptions", + "SubOptions", + "DivOptions", + "SqueezeOptions", + "SequenceRNNOptions", + "StridedSliceOptions", + "ExpOptions", + "TopKV2Options", + "SplitOptions", + "LogSoftmaxOptions", + "CastOptions", + "DequantizeOptions", + "MaximumMinimumOptions", + "ArgMaxOptions", + "LessOptions", + "NegOptions", + "PadV2Options", + "GreaterOptions", + "GreaterEqualOptions", + "LessEqualOptions", + "SelectOptions", + "SliceOptions", + "TransposeConvOptions", + "SparseToDenseOptions", + "TileOptions", + "ExpandDimsOptions", + "EqualOptions", + "NotEqualOptions", + "ShapeOptions", + "PowOptions", + "ArgMinOptions", + "FakeQuantOptions", + "PackOptions", + "LogicalOrOptions", + "OneHotOptions", + "LogicalAndOptions", + "LogicalNotOptions", + "UnpackOptions", + "FloorDivOptions", + "SquareOptions", + "ZerosLikeOptions", + "FillOptions", + "BidirectionalSequenceLSTMOptions", + "BidirectionalSequenceRNNOptions", + "UnidirectionalSequenceLSTMOptions", + "FloorModOptions", + "RangeOptions", + "ResizeNearestNeighborOptions", + "LeakyReluOptions", + "SquaredDifferenceOptions", + "MirrorPadOptions", + "AbsOptions", + "SplitVOptions", + "UniqueOptions", + "ReverseV2Options", + "AddNOptions", + "GatherNdOptions", + "CosOptions", + "WhereOptions", + "RankOptions", + "ReverseSequenceOptions", + "MatrixDiagOptions", + "QuantizeOptions", + "MatrixSetDiagOptions", + "HardSwishOptions", + "IfOptions", + "WhileOptions", + "DepthToSpaceOptions", + "NonMaxSuppressionV4Options", + "NonMaxSuppressionV5Options", + "ScatterNdOptions", + "SelectV2Options", + "DensifyOptions", + "SegmentSumOptions", + "BatchMatMulOptions", + "CumsumOptions", + "CallOnceOptions", + "BroadcastToOptions", + "Rfft2dOptions", + "Conv3DOptions", + "HashtableOptions", + "HashtableFindOptions", + "HashtableImportOptions", + "HashtableSizeOptions", + "VarHandleOptions", + "ReadVariableOptions", + "AssignVariableOptions", + "RandomOptions", + "BucketizeOptions", + "GeluOptions", + "DynamicUpdateSliceOptions", + "UnsortedSegmentProdOptions", + "UnsortedSegmentMaxOptions", + "UnsortedSegmentMinOptions", + "UnsortedSegmentSumOptions", + "ATan2Options", + "SignOptions", + "BitcastOptions", + "BitwiseXorOptions", + "RightShiftOptions", + nullptr + }; return names; } inline const char *EnumNameBuiltinOptions(BuiltinOptions e) { - if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, - BuiltinOptions_Rfft2dOptions)) - return ""; + if (::flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_RightShiftOptions)) return ""; const size_t index = static_cast(e); return EnumNamesBuiltinOptions()[index]; } @@ -1853,12523 +2508,18872 @@ template<> struct BuiltinOptionsTraits { static const BuiltinOptions enum_value = BuiltinOptions_BroadcastToOptions; }; -template <> -struct BuiltinOptionsTraits { +template<> struct BuiltinOptionsTraits { static const BuiltinOptions enum_value = BuiltinOptions_Rfft2dOptions; }; -struct BuiltinOptionsUnion { - BuiltinOptions type; - void *value; +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Conv3DOptions; +}; - BuiltinOptionsUnion() : type(BuiltinOptions_NONE), value(nullptr) {} - BuiltinOptionsUnion(BuiltinOptionsUnion&& u) FLATBUFFERS_NOEXCEPT : - type(BuiltinOptions_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - BuiltinOptionsUnion(const BuiltinOptionsUnion &) FLATBUFFERS_NOEXCEPT; - BuiltinOptionsUnion &operator=(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT - { BuiltinOptionsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - BuiltinOptionsUnion &operator=(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~BuiltinOptionsUnion() { Reset(); } +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableOptions; +}; - void Reset(); +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableFindOptions; +}; -#ifndef FLATBUFFERS_CPP98_STL - template - void Set(T&& val) { - using RT = typename std::remove_reference::type; - Reset(); - type = BuiltinOptionsTraits::enum_value; - if (type != BuiltinOptions_NONE) { - value = new RT(std::forward(val)); - } - } -#endif // FLATBUFFERS_CPP98_STL +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableImportOptions; +}; - static void *UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableSizeOptions; +}; - tflite::Conv2DOptionsT *AsConv2DOptions() { - return type == BuiltinOptions_Conv2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Conv2DOptionsT *AsConv2DOptions() const { - return type == BuiltinOptions_Conv2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() { - return type == BuiltinOptions_DepthwiseConv2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() const { - return type == BuiltinOptions_DepthwiseConv2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() { - return type == BuiltinOptions_ConcatEmbeddingsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() const { - return type == BuiltinOptions_ConcatEmbeddingsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() { - return type == BuiltinOptions_LSHProjectionOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() const { - return type == BuiltinOptions_LSHProjectionOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Pool2DOptionsT *AsPool2DOptions() { - return type == BuiltinOptions_Pool2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Pool2DOptionsT *AsPool2DOptions() const { - return type == BuiltinOptions_Pool2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SVDFOptionsT *AsSVDFOptions() { - return type == BuiltinOptions_SVDFOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SVDFOptionsT *AsSVDFOptions() const { - return type == BuiltinOptions_SVDFOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RNNOptionsT *AsRNNOptions() { - return type == BuiltinOptions_RNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RNNOptionsT *AsRNNOptions() const { - return type == BuiltinOptions_RNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() { - return type == BuiltinOptions_FullyConnectedOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() const { - return type == BuiltinOptions_FullyConnectedOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SoftmaxOptionsT *AsSoftmaxOptions() { - return type == BuiltinOptions_SoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SoftmaxOptionsT *AsSoftmaxOptions() const { - return type == BuiltinOptions_SoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ConcatenationOptionsT *AsConcatenationOptions() { - return type == BuiltinOptions_ConcatenationOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ConcatenationOptionsT *AsConcatenationOptions() const { - return type == BuiltinOptions_ConcatenationOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::AddOptionsT *AsAddOptions() { - return type == BuiltinOptions_AddOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AddOptionsT *AsAddOptions() const { - return type == BuiltinOptions_AddOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::L2NormOptionsT *AsL2NormOptions() { - return type == BuiltinOptions_L2NormOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::L2NormOptionsT *AsL2NormOptions() const { - return type == BuiltinOptions_L2NormOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() { - return type == BuiltinOptions_LocalResponseNormalizationOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() const { - return type == BuiltinOptions_LocalResponseNormalizationOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LSTMOptionsT *AsLSTMOptions() { - return type == BuiltinOptions_LSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LSTMOptionsT *AsLSTMOptions() const { - return type == BuiltinOptions_LSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() { - return type == BuiltinOptions_ResizeBilinearOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() const { - return type == BuiltinOptions_ResizeBilinearOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CallOptionsT *AsCallOptions() { - return type == BuiltinOptions_CallOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CallOptionsT *AsCallOptions() const { - return type == BuiltinOptions_CallOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReshapeOptionsT *AsReshapeOptions() { - return type == BuiltinOptions_ReshapeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReshapeOptionsT *AsReshapeOptions() const { - return type == BuiltinOptions_ReshapeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SkipGramOptionsT *AsSkipGramOptions() { - return type == BuiltinOptions_SkipGramOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SkipGramOptionsT *AsSkipGramOptions() const { - return type == BuiltinOptions_SkipGramOptions ? - reinterpret_cast(value) : nullptr; +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_VarHandleOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReadVariableOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AssignVariableOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RandomOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BucketizeOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GeluOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DynamicUpdateSliceOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentProdOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMaxOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMinOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentSumOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ATan2Options; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SignOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BitcastOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BitwiseXorOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RightShiftOptions; +}; + +template struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NONE; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AddOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CallOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MulOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_PadOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SubOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DivOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CastOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LessOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NegOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_TileOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_PowOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_PackOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FillOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CosOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RankOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_IfOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CumsumOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CallOnceOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BroadcastToOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Rfft2dOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Conv3DOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableFindOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableImportOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableSizeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_VarHandleOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReadVariableOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AssignVariableOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RandomOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BucketizeOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GeluOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DynamicUpdateSliceOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentProdOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMaxOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMinOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentSumOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ATan2Options; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SignOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BitcastOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BitwiseXorOptions; +}; + +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RightShiftOptions; +}; + +struct BuiltinOptionsUnion { + BuiltinOptions type; + void *value; + + BuiltinOptionsUnion() : type(BuiltinOptions_NONE), value(nullptr) {} + BuiltinOptionsUnion(BuiltinOptionsUnion&& u) FLATBUFFERS_NOEXCEPT : + type(BuiltinOptions_NONE), value(nullptr) + { std::swap(type, u.type); std::swap(value, u.value); } + BuiltinOptionsUnion(const BuiltinOptionsUnion &); + BuiltinOptionsUnion &operator=(const BuiltinOptionsUnion &u) + { BuiltinOptionsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } + BuiltinOptionsUnion &operator=(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT + { std::swap(type, u.type); std::swap(value, u.value); return *this; } + ~BuiltinOptionsUnion() { Reset(); } + + void Reset(); + + template + void Set(T&& val) { + typedef typename std::remove_reference::type RT; + Reset(); + type = BuiltinOptionsUnionTraits::enum_value; + if (type != BuiltinOptions_NONE) { + value = new RT(std::forward(val)); + } + } + + static void *UnPack(const void *obj, BuiltinOptions type, const ::flatbuffers::resolver_function_t *resolver); + ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + + tflite::Conv2DOptionsT *AsConv2DOptions() { + return type == BuiltinOptions_Conv2DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Conv2DOptionsT *AsConv2DOptions() const { + return type == BuiltinOptions_Conv2DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() { + return type == BuiltinOptions_DepthwiseConv2DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() const { + return type == BuiltinOptions_DepthwiseConv2DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() { + return type == BuiltinOptions_ConcatEmbeddingsOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() const { + return type == BuiltinOptions_ConcatEmbeddingsOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() { + return type == BuiltinOptions_LSHProjectionOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() const { + return type == BuiltinOptions_LSHProjectionOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::Pool2DOptionsT *AsPool2DOptions() { + return type == BuiltinOptions_Pool2DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Pool2DOptionsT *AsPool2DOptions() const { + return type == BuiltinOptions_Pool2DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SVDFOptionsT *AsSVDFOptions() { + return type == BuiltinOptions_SVDFOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SVDFOptionsT *AsSVDFOptions() const { + return type == BuiltinOptions_SVDFOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RNNOptionsT *AsRNNOptions() { + return type == BuiltinOptions_RNNOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RNNOptionsT *AsRNNOptions() const { + return type == BuiltinOptions_RNNOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() { + return type == BuiltinOptions_FullyConnectedOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() const { + return type == BuiltinOptions_FullyConnectedOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SoftmaxOptionsT *AsSoftmaxOptions() { + return type == BuiltinOptions_SoftmaxOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SoftmaxOptionsT *AsSoftmaxOptions() const { + return type == BuiltinOptions_SoftmaxOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ConcatenationOptionsT *AsConcatenationOptions() { + return type == BuiltinOptions_ConcatenationOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ConcatenationOptionsT *AsConcatenationOptions() const { + return type == BuiltinOptions_ConcatenationOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::AddOptionsT *AsAddOptions() { + return type == BuiltinOptions_AddOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::AddOptionsT *AsAddOptions() const { + return type == BuiltinOptions_AddOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::L2NormOptionsT *AsL2NormOptions() { + return type == BuiltinOptions_L2NormOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::L2NormOptionsT *AsL2NormOptions() const { + return type == BuiltinOptions_L2NormOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() { + return type == BuiltinOptions_LocalResponseNormalizationOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() const { + return type == BuiltinOptions_LocalResponseNormalizationOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LSTMOptionsT *AsLSTMOptions() { + return type == BuiltinOptions_LSTMOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LSTMOptionsT *AsLSTMOptions() const { + return type == BuiltinOptions_LSTMOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() { + return type == BuiltinOptions_ResizeBilinearOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() const { + return type == BuiltinOptions_ResizeBilinearOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CallOptionsT *AsCallOptions() { + return type == BuiltinOptions_CallOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CallOptionsT *AsCallOptions() const { + return type == BuiltinOptions_CallOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReshapeOptionsT *AsReshapeOptions() { + return type == BuiltinOptions_ReshapeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReshapeOptionsT *AsReshapeOptions() const { + return type == BuiltinOptions_ReshapeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SkipGramOptionsT *AsSkipGramOptions() { + return type == BuiltinOptions_SkipGramOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SkipGramOptionsT *AsSkipGramOptions() const { + return type == BuiltinOptions_SkipGramOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() { + return type == BuiltinOptions_SpaceToDepthOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() const { + return type == BuiltinOptions_SpaceToDepthOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() { + return type == BuiltinOptions_EmbeddingLookupSparseOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() const { + return type == BuiltinOptions_EmbeddingLookupSparseOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MulOptionsT *AsMulOptions() { + return type == BuiltinOptions_MulOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MulOptionsT *AsMulOptions() const { + return type == BuiltinOptions_MulOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::PadOptionsT *AsPadOptions() { + return type == BuiltinOptions_PadOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::PadOptionsT *AsPadOptions() const { + return type == BuiltinOptions_PadOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::GatherOptionsT *AsGatherOptions() { + return type == BuiltinOptions_GatherOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GatherOptionsT *AsGatherOptions() const { + return type == BuiltinOptions_GatherOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() { + return type == BuiltinOptions_BatchToSpaceNDOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() const { + return type == BuiltinOptions_BatchToSpaceNDOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() { + return type == BuiltinOptions_SpaceToBatchNDOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() const { + return type == BuiltinOptions_SpaceToBatchNDOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::TransposeOptionsT *AsTransposeOptions() { + return type == BuiltinOptions_TransposeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::TransposeOptionsT *AsTransposeOptions() const { + return type == BuiltinOptions_TransposeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReducerOptionsT *AsReducerOptions() { + return type == BuiltinOptions_ReducerOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReducerOptionsT *AsReducerOptions() const { + return type == BuiltinOptions_ReducerOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SubOptionsT *AsSubOptions() { + return type == BuiltinOptions_SubOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SubOptionsT *AsSubOptions() const { + return type == BuiltinOptions_SubOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DivOptionsT *AsDivOptions() { + return type == BuiltinOptions_DivOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DivOptionsT *AsDivOptions() const { + return type == BuiltinOptions_DivOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SqueezeOptionsT *AsSqueezeOptions() { + return type == BuiltinOptions_SqueezeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SqueezeOptionsT *AsSqueezeOptions() const { + return type == BuiltinOptions_SqueezeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() { + return type == BuiltinOptions_SequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() const { + return type == BuiltinOptions_SequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StridedSliceOptionsT *AsStridedSliceOptions() { + return type == BuiltinOptions_StridedSliceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StridedSliceOptionsT *AsStridedSliceOptions() const { + return type == BuiltinOptions_StridedSliceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ExpOptionsT *AsExpOptions() { + return type == BuiltinOptions_ExpOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ExpOptionsT *AsExpOptions() const { + return type == BuiltinOptions_ExpOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::TopKV2OptionsT *AsTopKV2Options() { + return type == BuiltinOptions_TopKV2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::TopKV2OptionsT *AsTopKV2Options() const { + return type == BuiltinOptions_TopKV2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::SplitOptionsT *AsSplitOptions() { + return type == BuiltinOptions_SplitOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SplitOptionsT *AsSplitOptions() const { + return type == BuiltinOptions_SplitOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() { + return type == BuiltinOptions_LogSoftmaxOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() const { + return type == BuiltinOptions_LogSoftmaxOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CastOptionsT *AsCastOptions() { + return type == BuiltinOptions_CastOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CastOptionsT *AsCastOptions() const { + return type == BuiltinOptions_CastOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DequantizeOptionsT *AsDequantizeOptions() { + return type == BuiltinOptions_DequantizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DequantizeOptionsT *AsDequantizeOptions() const { + return type == BuiltinOptions_DequantizeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() { + return type == BuiltinOptions_MaximumMinimumOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() const { + return type == BuiltinOptions_MaximumMinimumOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ArgMaxOptionsT *AsArgMaxOptions() { + return type == BuiltinOptions_ArgMaxOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ArgMaxOptionsT *AsArgMaxOptions() const { + return type == BuiltinOptions_ArgMaxOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LessOptionsT *AsLessOptions() { + return type == BuiltinOptions_LessOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LessOptionsT *AsLessOptions() const { + return type == BuiltinOptions_LessOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::NegOptionsT *AsNegOptions() { + return type == BuiltinOptions_NegOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::NegOptionsT *AsNegOptions() const { + return type == BuiltinOptions_NegOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::PadV2OptionsT *AsPadV2Options() { + return type == BuiltinOptions_PadV2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::PadV2OptionsT *AsPadV2Options() const { + return type == BuiltinOptions_PadV2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::GreaterOptionsT *AsGreaterOptions() { + return type == BuiltinOptions_GreaterOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GreaterOptionsT *AsGreaterOptions() const { + return type == BuiltinOptions_GreaterOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() { + return type == BuiltinOptions_GreaterEqualOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() const { + return type == BuiltinOptions_GreaterEqualOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LessEqualOptionsT *AsLessEqualOptions() { + return type == BuiltinOptions_LessEqualOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LessEqualOptionsT *AsLessEqualOptions() const { + return type == BuiltinOptions_LessEqualOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SelectOptionsT *AsSelectOptions() { + return type == BuiltinOptions_SelectOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SelectOptionsT *AsSelectOptions() const { + return type == BuiltinOptions_SelectOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SliceOptionsT *AsSliceOptions() { + return type == BuiltinOptions_SliceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SliceOptionsT *AsSliceOptions() const { + return type == BuiltinOptions_SliceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::TransposeConvOptionsT *AsTransposeConvOptions() { + return type == BuiltinOptions_TransposeConvOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::TransposeConvOptionsT *AsTransposeConvOptions() const { + return type == BuiltinOptions_TransposeConvOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() { + return type == BuiltinOptions_SparseToDenseOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() const { + return type == BuiltinOptions_SparseToDenseOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::TileOptionsT *AsTileOptions() { + return type == BuiltinOptions_TileOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::TileOptionsT *AsTileOptions() const { + return type == BuiltinOptions_TileOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ExpandDimsOptionsT *AsExpandDimsOptions() { + return type == BuiltinOptions_ExpandDimsOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ExpandDimsOptionsT *AsExpandDimsOptions() const { + return type == BuiltinOptions_ExpandDimsOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::EqualOptionsT *AsEqualOptions() { + return type == BuiltinOptions_EqualOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::EqualOptionsT *AsEqualOptions() const { + return type == BuiltinOptions_EqualOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::NotEqualOptionsT *AsNotEqualOptions() { + return type == BuiltinOptions_NotEqualOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::NotEqualOptionsT *AsNotEqualOptions() const { + return type == BuiltinOptions_NotEqualOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ShapeOptionsT *AsShapeOptions() { + return type == BuiltinOptions_ShapeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ShapeOptionsT *AsShapeOptions() const { + return type == BuiltinOptions_ShapeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::PowOptionsT *AsPowOptions() { + return type == BuiltinOptions_PowOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::PowOptionsT *AsPowOptions() const { + return type == BuiltinOptions_PowOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ArgMinOptionsT *AsArgMinOptions() { + return type == BuiltinOptions_ArgMinOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ArgMinOptionsT *AsArgMinOptions() const { + return type == BuiltinOptions_ArgMinOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FakeQuantOptionsT *AsFakeQuantOptions() { + return type == BuiltinOptions_FakeQuantOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FakeQuantOptionsT *AsFakeQuantOptions() const { + return type == BuiltinOptions_FakeQuantOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::PackOptionsT *AsPackOptions() { + return type == BuiltinOptions_PackOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::PackOptionsT *AsPackOptions() const { + return type == BuiltinOptions_PackOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LogicalOrOptionsT *AsLogicalOrOptions() { + return type == BuiltinOptions_LogicalOrOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LogicalOrOptionsT *AsLogicalOrOptions() const { + return type == BuiltinOptions_LogicalOrOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::OneHotOptionsT *AsOneHotOptions() { + return type == BuiltinOptions_OneHotOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::OneHotOptionsT *AsOneHotOptions() const { + return type == BuiltinOptions_OneHotOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LogicalAndOptionsT *AsLogicalAndOptions() { + return type == BuiltinOptions_LogicalAndOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LogicalAndOptionsT *AsLogicalAndOptions() const { + return type == BuiltinOptions_LogicalAndOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LogicalNotOptionsT *AsLogicalNotOptions() { + return type == BuiltinOptions_LogicalNotOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LogicalNotOptionsT *AsLogicalNotOptions() const { + return type == BuiltinOptions_LogicalNotOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnpackOptionsT *AsUnpackOptions() { + return type == BuiltinOptions_UnpackOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnpackOptionsT *AsUnpackOptions() const { + return type == BuiltinOptions_UnpackOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FloorDivOptionsT *AsFloorDivOptions() { + return type == BuiltinOptions_FloorDivOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FloorDivOptionsT *AsFloorDivOptions() const { + return type == BuiltinOptions_FloorDivOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SquareOptionsT *AsSquareOptions() { + return type == BuiltinOptions_SquareOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SquareOptionsT *AsSquareOptions() const { + return type == BuiltinOptions_SquareOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ZerosLikeOptionsT *AsZerosLikeOptions() { + return type == BuiltinOptions_ZerosLikeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ZerosLikeOptionsT *AsZerosLikeOptions() const { + return type == BuiltinOptions_ZerosLikeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FillOptionsT *AsFillOptions() { + return type == BuiltinOptions_FillOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FillOptionsT *AsFillOptions() const { + return type == BuiltinOptions_FillOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() { + return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() const { + return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() { + return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() const { + return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() { + return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() const { + return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FloorModOptionsT *AsFloorModOptions() { + return type == BuiltinOptions_FloorModOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FloorModOptionsT *AsFloorModOptions() const { + return type == BuiltinOptions_FloorModOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RangeOptionsT *AsRangeOptions() { + return type == BuiltinOptions_RangeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RangeOptionsT *AsRangeOptions() const { + return type == BuiltinOptions_RangeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() { + return type == BuiltinOptions_ResizeNearestNeighborOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() const { + return type == BuiltinOptions_ResizeNearestNeighborOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LeakyReluOptionsT *AsLeakyReluOptions() { + return type == BuiltinOptions_LeakyReluOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LeakyReluOptionsT *AsLeakyReluOptions() const { + return type == BuiltinOptions_LeakyReluOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() { + return type == BuiltinOptions_SquaredDifferenceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() const { + return type == BuiltinOptions_SquaredDifferenceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MirrorPadOptionsT *AsMirrorPadOptions() { + return type == BuiltinOptions_MirrorPadOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MirrorPadOptionsT *AsMirrorPadOptions() const { + return type == BuiltinOptions_MirrorPadOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::AbsOptionsT *AsAbsOptions() { + return type == BuiltinOptions_AbsOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::AbsOptionsT *AsAbsOptions() const { + return type == BuiltinOptions_AbsOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SplitVOptionsT *AsSplitVOptions() { + return type == BuiltinOptions_SplitVOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SplitVOptionsT *AsSplitVOptions() const { + return type == BuiltinOptions_SplitVOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UniqueOptionsT *AsUniqueOptions() { + return type == BuiltinOptions_UniqueOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UniqueOptionsT *AsUniqueOptions() const { + return type == BuiltinOptions_UniqueOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReverseV2OptionsT *AsReverseV2Options() { + return type == BuiltinOptions_ReverseV2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReverseV2OptionsT *AsReverseV2Options() const { + return type == BuiltinOptions_ReverseV2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::AddNOptionsT *AsAddNOptions() { + return type == BuiltinOptions_AddNOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::AddNOptionsT *AsAddNOptions() const { + return type == BuiltinOptions_AddNOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::GatherNdOptionsT *AsGatherNdOptions() { + return type == BuiltinOptions_GatherNdOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GatherNdOptionsT *AsGatherNdOptions() const { + return type == BuiltinOptions_GatherNdOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CosOptionsT *AsCosOptions() { + return type == BuiltinOptions_CosOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CosOptionsT *AsCosOptions() const { + return type == BuiltinOptions_CosOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::WhereOptionsT *AsWhereOptions() { + return type == BuiltinOptions_WhereOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::WhereOptionsT *AsWhereOptions() const { + return type == BuiltinOptions_WhereOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RankOptionsT *AsRankOptions() { + return type == BuiltinOptions_RankOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RankOptionsT *AsRankOptions() const { + return type == BuiltinOptions_RankOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() { + return type == BuiltinOptions_ReverseSequenceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() const { + return type == BuiltinOptions_ReverseSequenceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() { + return type == BuiltinOptions_MatrixDiagOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() const { + return type == BuiltinOptions_MatrixDiagOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::QuantizeOptionsT *AsQuantizeOptions() { + return type == BuiltinOptions_QuantizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::QuantizeOptionsT *AsQuantizeOptions() const { + return type == BuiltinOptions_QuantizeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() { + return type == BuiltinOptions_MatrixSetDiagOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() const { + return type == BuiltinOptions_MatrixSetDiagOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HardSwishOptionsT *AsHardSwishOptions() { + return type == BuiltinOptions_HardSwishOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HardSwishOptionsT *AsHardSwishOptions() const { + return type == BuiltinOptions_HardSwishOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::IfOptionsT *AsIfOptions() { + return type == BuiltinOptions_IfOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::IfOptionsT *AsIfOptions() const { + return type == BuiltinOptions_IfOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::WhileOptionsT *AsWhileOptions() { + return type == BuiltinOptions_WhileOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::WhileOptionsT *AsWhileOptions() const { + return type == BuiltinOptions_WhileOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() { + return type == BuiltinOptions_DepthToSpaceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() const { + return type == BuiltinOptions_DepthToSpaceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() { + return type == BuiltinOptions_NonMaxSuppressionV4Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() const { + return type == BuiltinOptions_NonMaxSuppressionV4Options ? + reinterpret_cast(value) : nullptr; + } + tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() { + return type == BuiltinOptions_NonMaxSuppressionV5Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() const { + return type == BuiltinOptions_NonMaxSuppressionV5Options ? + reinterpret_cast(value) : nullptr; + } + tflite::ScatterNdOptionsT *AsScatterNdOptions() { + return type == BuiltinOptions_ScatterNdOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ScatterNdOptionsT *AsScatterNdOptions() const { + return type == BuiltinOptions_ScatterNdOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SelectV2OptionsT *AsSelectV2Options() { + return type == BuiltinOptions_SelectV2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::SelectV2OptionsT *AsSelectV2Options() const { + return type == BuiltinOptions_SelectV2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::DensifyOptionsT *AsDensifyOptions() { + return type == BuiltinOptions_DensifyOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DensifyOptionsT *AsDensifyOptions() const { + return type == BuiltinOptions_DensifyOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SegmentSumOptionsT *AsSegmentSumOptions() { + return type == BuiltinOptions_SegmentSumOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SegmentSumOptionsT *AsSegmentSumOptions() const { + return type == BuiltinOptions_SegmentSumOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() { + return type == BuiltinOptions_BatchMatMulOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() const { + return type == BuiltinOptions_BatchMatMulOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CumsumOptionsT *AsCumsumOptions() { + return type == BuiltinOptions_CumsumOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CumsumOptionsT *AsCumsumOptions() const { + return type == BuiltinOptions_CumsumOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CallOnceOptionsT *AsCallOnceOptions() { + return type == BuiltinOptions_CallOnceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CallOnceOptionsT *AsCallOnceOptions() const { + return type == BuiltinOptions_CallOnceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BroadcastToOptionsT *AsBroadcastToOptions() { + return type == BuiltinOptions_BroadcastToOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BroadcastToOptionsT *AsBroadcastToOptions() const { + return type == BuiltinOptions_BroadcastToOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::Rfft2dOptionsT *AsRfft2dOptions() { + return type == BuiltinOptions_Rfft2dOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Rfft2dOptionsT *AsRfft2dOptions() const { + return type == BuiltinOptions_Rfft2dOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::Conv3DOptionsT *AsConv3DOptions() { + return type == BuiltinOptions_Conv3DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Conv3DOptionsT *AsConv3DOptions() const { + return type == BuiltinOptions_Conv3DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableOptionsT *AsHashtableOptions() { + return type == BuiltinOptions_HashtableOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableOptionsT *AsHashtableOptions() const { + return type == BuiltinOptions_HashtableOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableFindOptionsT *AsHashtableFindOptions() { + return type == BuiltinOptions_HashtableFindOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableFindOptionsT *AsHashtableFindOptions() const { + return type == BuiltinOptions_HashtableFindOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableImportOptionsT *AsHashtableImportOptions() { + return type == BuiltinOptions_HashtableImportOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableImportOptionsT *AsHashtableImportOptions() const { + return type == BuiltinOptions_HashtableImportOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() { + return type == BuiltinOptions_HashtableSizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() const { + return type == BuiltinOptions_HashtableSizeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::VarHandleOptionsT *AsVarHandleOptions() { + return type == BuiltinOptions_VarHandleOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::VarHandleOptionsT *AsVarHandleOptions() const { + return type == BuiltinOptions_VarHandleOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReadVariableOptionsT *AsReadVariableOptions() { + return type == BuiltinOptions_ReadVariableOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReadVariableOptionsT *AsReadVariableOptions() const { + return type == BuiltinOptions_ReadVariableOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::AssignVariableOptionsT *AsAssignVariableOptions() { + return type == BuiltinOptions_AssignVariableOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::AssignVariableOptionsT *AsAssignVariableOptions() const { + return type == BuiltinOptions_AssignVariableOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RandomOptionsT *AsRandomOptions() { + return type == BuiltinOptions_RandomOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RandomOptionsT *AsRandomOptions() const { + return type == BuiltinOptions_RandomOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BucketizeOptionsT *AsBucketizeOptions() { + return type == BuiltinOptions_BucketizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BucketizeOptionsT *AsBucketizeOptions() const { + return type == BuiltinOptions_BucketizeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::GeluOptionsT *AsGeluOptions() { + return type == BuiltinOptions_GeluOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GeluOptionsT *AsGeluOptions() const { + return type == BuiltinOptions_GeluOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DynamicUpdateSliceOptionsT *AsDynamicUpdateSliceOptions() { + return type == BuiltinOptions_DynamicUpdateSliceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DynamicUpdateSliceOptionsT *AsDynamicUpdateSliceOptions() const { + return type == BuiltinOptions_DynamicUpdateSliceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnsortedSegmentProdOptionsT *AsUnsortedSegmentProdOptions() { + return type == BuiltinOptions_UnsortedSegmentProdOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnsortedSegmentProdOptionsT *AsUnsortedSegmentProdOptions() const { + return type == BuiltinOptions_UnsortedSegmentProdOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnsortedSegmentMaxOptionsT *AsUnsortedSegmentMaxOptions() { + return type == BuiltinOptions_UnsortedSegmentMaxOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnsortedSegmentMaxOptionsT *AsUnsortedSegmentMaxOptions() const { + return type == BuiltinOptions_UnsortedSegmentMaxOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnsortedSegmentMinOptionsT *AsUnsortedSegmentMinOptions() { + return type == BuiltinOptions_UnsortedSegmentMinOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnsortedSegmentMinOptionsT *AsUnsortedSegmentMinOptions() const { + return type == BuiltinOptions_UnsortedSegmentMinOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnsortedSegmentSumOptionsT *AsUnsortedSegmentSumOptions() { + return type == BuiltinOptions_UnsortedSegmentSumOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnsortedSegmentSumOptionsT *AsUnsortedSegmentSumOptions() const { + return type == BuiltinOptions_UnsortedSegmentSumOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ATan2OptionsT *AsATan2Options() { + return type == BuiltinOptions_ATan2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::ATan2OptionsT *AsATan2Options() const { + return type == BuiltinOptions_ATan2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::SignOptionsT *AsSignOptions() { + return type == BuiltinOptions_SignOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SignOptionsT *AsSignOptions() const { + return type == BuiltinOptions_SignOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BitcastOptionsT *AsBitcastOptions() { + return type == BuiltinOptions_BitcastOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BitcastOptionsT *AsBitcastOptions() const { + return type == BuiltinOptions_BitcastOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BitwiseXorOptionsT *AsBitwiseXorOptions() { + return type == BuiltinOptions_BitwiseXorOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BitwiseXorOptionsT *AsBitwiseXorOptions() const { + return type == BuiltinOptions_BitwiseXorOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RightShiftOptionsT *AsRightShiftOptions() { + return type == BuiltinOptions_RightShiftOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RightShiftOptionsT *AsRightShiftOptions() const { + return type == BuiltinOptions_RightShiftOptions ? + reinterpret_cast(value) : nullptr; + } +}; + +bool VerifyBuiltinOptions(::flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); +bool VerifyBuiltinOptionsVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types); + +enum BuiltinOptions2 : uint8_t { + BuiltinOptions2_NONE = 0, + BuiltinOptions2_StablehloConcatenateOptions = 1, + BuiltinOptions2_StablehloBroadcastInDimOptions = 2, + BuiltinOptions2_StablehloSliceOptions = 3, + BuiltinOptions2_StablehloConvolutionOptions = 4, + BuiltinOptions2_StablehloCustomCallOptions = 5, + BuiltinOptions2_StablehloReduceOptions = 6, + BuiltinOptions2_StablehloScatterOptions = 7, + BuiltinOptions2_StablehloCompareOptions = 8, + BuiltinOptions2_StablehloDynamicSliceOptions = 9, + BuiltinOptions2_StablehloPadOptions = 10, + BuiltinOptions2_StablehloIotaOptions = 11, + BuiltinOptions2_StablehloDotGeneralOptions = 12, + BuiltinOptions2_StablehloReduceWindowOptions = 13, + BuiltinOptions2_StablehloSortOptions = 14, + BuiltinOptions2_StablehloWhileOptions = 15, + BuiltinOptions2_StablehloGatherOptions = 16, + BuiltinOptions2_StablehloTransposeOptions = 17, + BuiltinOptions2_DilateOptions = 18, + BuiltinOptions2_StablehloRngBitGeneratorOptions = 19, + BuiltinOptions2_ReduceWindowOptions = 20, + BuiltinOptions2_StableHLOCompositeOptions = 21, + BuiltinOptions2_StablehloShiftLeftOptions = 22, + BuiltinOptions2_StablehloCaseOptions = 23, + BuiltinOptions2_MIN = BuiltinOptions2_NONE, + BuiltinOptions2_MAX = BuiltinOptions2_StablehloCaseOptions +}; + +inline const BuiltinOptions2 (&EnumValuesBuiltinOptions2())[24] { + static const BuiltinOptions2 values[] = { + BuiltinOptions2_NONE, + BuiltinOptions2_StablehloConcatenateOptions, + BuiltinOptions2_StablehloBroadcastInDimOptions, + BuiltinOptions2_StablehloSliceOptions, + BuiltinOptions2_StablehloConvolutionOptions, + BuiltinOptions2_StablehloCustomCallOptions, + BuiltinOptions2_StablehloReduceOptions, + BuiltinOptions2_StablehloScatterOptions, + BuiltinOptions2_StablehloCompareOptions, + BuiltinOptions2_StablehloDynamicSliceOptions, + BuiltinOptions2_StablehloPadOptions, + BuiltinOptions2_StablehloIotaOptions, + BuiltinOptions2_StablehloDotGeneralOptions, + BuiltinOptions2_StablehloReduceWindowOptions, + BuiltinOptions2_StablehloSortOptions, + BuiltinOptions2_StablehloWhileOptions, + BuiltinOptions2_StablehloGatherOptions, + BuiltinOptions2_StablehloTransposeOptions, + BuiltinOptions2_DilateOptions, + BuiltinOptions2_StablehloRngBitGeneratorOptions, + BuiltinOptions2_ReduceWindowOptions, + BuiltinOptions2_StableHLOCompositeOptions, + BuiltinOptions2_StablehloShiftLeftOptions, + BuiltinOptions2_StablehloCaseOptions + }; + return values; +} + +inline const char * const *EnumNamesBuiltinOptions2() { + static const char * const names[25] = { + "NONE", + "StablehloConcatenateOptions", + "StablehloBroadcastInDimOptions", + "StablehloSliceOptions", + "StablehloConvolutionOptions", + "StablehloCustomCallOptions", + "StablehloReduceOptions", + "StablehloScatterOptions", + "StablehloCompareOptions", + "StablehloDynamicSliceOptions", + "StablehloPadOptions", + "StablehloIotaOptions", + "StablehloDotGeneralOptions", + "StablehloReduceWindowOptions", + "StablehloSortOptions", + "StablehloWhileOptions", + "StablehloGatherOptions", + "StablehloTransposeOptions", + "DilateOptions", + "StablehloRngBitGeneratorOptions", + "ReduceWindowOptions", + "StableHLOCompositeOptions", + "StablehloShiftLeftOptions", + "StablehloCaseOptions", + nullptr + }; + return names; +} + +inline const char *EnumNameBuiltinOptions2(BuiltinOptions2 e) { + if (::flatbuffers::IsOutRange(e, BuiltinOptions2_NONE, BuiltinOptions2_StablehloCaseOptions)) return ""; + const size_t index = static_cast(e); + return EnumNamesBuiltinOptions2()[index]; +} + +template struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_NONE; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloConcatenateOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloBroadcastInDimOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloSliceOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloConvolutionOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCustomCallOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloReduceOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloScatterOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCompareOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloDynamicSliceOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloPadOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloIotaOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloDotGeneralOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloReduceWindowOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloSortOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloWhileOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloGatherOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloTransposeOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_DilateOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloRngBitGeneratorOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_ReduceWindowOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StableHLOCompositeOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloShiftLeftOptions; +}; + +template<> struct BuiltinOptions2Traits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCaseOptions; +}; + +template struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_NONE; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloConcatenateOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloBroadcastInDimOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloSliceOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloConvolutionOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCustomCallOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloReduceOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloScatterOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCompareOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloDynamicSliceOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloPadOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloIotaOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloDotGeneralOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloReduceWindowOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloSortOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloWhileOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloGatherOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloTransposeOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_DilateOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloRngBitGeneratorOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_ReduceWindowOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StableHLOCompositeOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloShiftLeftOptions; +}; + +template<> struct BuiltinOptions2UnionTraits { + static const BuiltinOptions2 enum_value = BuiltinOptions2_StablehloCaseOptions; +}; + +struct BuiltinOptions2Union { + BuiltinOptions2 type; + void *value; + + BuiltinOptions2Union() : type(BuiltinOptions2_NONE), value(nullptr) {} + BuiltinOptions2Union(BuiltinOptions2Union&& u) FLATBUFFERS_NOEXCEPT : + type(BuiltinOptions2_NONE), value(nullptr) + { std::swap(type, u.type); std::swap(value, u.value); } + BuiltinOptions2Union(const BuiltinOptions2Union &); + BuiltinOptions2Union &operator=(const BuiltinOptions2Union &u) + { BuiltinOptions2Union t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } + BuiltinOptions2Union &operator=(BuiltinOptions2Union &&u) FLATBUFFERS_NOEXCEPT + { std::swap(type, u.type); std::swap(value, u.value); return *this; } + ~BuiltinOptions2Union() { Reset(); } + + void Reset(); + + template + void Set(T&& val) { + typedef typename std::remove_reference::type RT; + Reset(); + type = BuiltinOptions2UnionTraits::enum_value; + if (type != BuiltinOptions2_NONE) { + value = new RT(std::forward(val)); + } + } + + static void *UnPack(const void *obj, BuiltinOptions2 type, const ::flatbuffers::resolver_function_t *resolver); + ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + + tflite::StablehloConcatenateOptionsT *AsStablehloConcatenateOptions() { + return type == BuiltinOptions2_StablehloConcatenateOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloConcatenateOptionsT *AsStablehloConcatenateOptions() const { + return type == BuiltinOptions2_StablehloConcatenateOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloBroadcastInDimOptionsT *AsStablehloBroadcastInDimOptions() { + return type == BuiltinOptions2_StablehloBroadcastInDimOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloBroadcastInDimOptionsT *AsStablehloBroadcastInDimOptions() const { + return type == BuiltinOptions2_StablehloBroadcastInDimOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloSliceOptionsT *AsStablehloSliceOptions() { + return type == BuiltinOptions2_StablehloSliceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloSliceOptionsT *AsStablehloSliceOptions() const { + return type == BuiltinOptions2_StablehloSliceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloConvolutionOptionsT *AsStablehloConvolutionOptions() { + return type == BuiltinOptions2_StablehloConvolutionOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloConvolutionOptionsT *AsStablehloConvolutionOptions() const { + return type == BuiltinOptions2_StablehloConvolutionOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloCustomCallOptionsT *AsStablehloCustomCallOptions() { + return type == BuiltinOptions2_StablehloCustomCallOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloCustomCallOptionsT *AsStablehloCustomCallOptions() const { + return type == BuiltinOptions2_StablehloCustomCallOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloReduceOptionsT *AsStablehloReduceOptions() { + return type == BuiltinOptions2_StablehloReduceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloReduceOptionsT *AsStablehloReduceOptions() const { + return type == BuiltinOptions2_StablehloReduceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloScatterOptionsT *AsStablehloScatterOptions() { + return type == BuiltinOptions2_StablehloScatterOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloScatterOptionsT *AsStablehloScatterOptions() const { + return type == BuiltinOptions2_StablehloScatterOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloCompareOptionsT *AsStablehloCompareOptions() { + return type == BuiltinOptions2_StablehloCompareOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloCompareOptionsT *AsStablehloCompareOptions() const { + return type == BuiltinOptions2_StablehloCompareOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloDynamicSliceOptionsT *AsStablehloDynamicSliceOptions() { + return type == BuiltinOptions2_StablehloDynamicSliceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloDynamicSliceOptionsT *AsStablehloDynamicSliceOptions() const { + return type == BuiltinOptions2_StablehloDynamicSliceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloPadOptionsT *AsStablehloPadOptions() { + return type == BuiltinOptions2_StablehloPadOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloPadOptionsT *AsStablehloPadOptions() const { + return type == BuiltinOptions2_StablehloPadOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloIotaOptionsT *AsStablehloIotaOptions() { + return type == BuiltinOptions2_StablehloIotaOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloIotaOptionsT *AsStablehloIotaOptions() const { + return type == BuiltinOptions2_StablehloIotaOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloDotGeneralOptionsT *AsStablehloDotGeneralOptions() { + return type == BuiltinOptions2_StablehloDotGeneralOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloDotGeneralOptionsT *AsStablehloDotGeneralOptions() const { + return type == BuiltinOptions2_StablehloDotGeneralOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloReduceWindowOptionsT *AsStablehloReduceWindowOptions() { + return type == BuiltinOptions2_StablehloReduceWindowOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloReduceWindowOptionsT *AsStablehloReduceWindowOptions() const { + return type == BuiltinOptions2_StablehloReduceWindowOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloSortOptionsT *AsStablehloSortOptions() { + return type == BuiltinOptions2_StablehloSortOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloSortOptionsT *AsStablehloSortOptions() const { + return type == BuiltinOptions2_StablehloSortOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloWhileOptionsT *AsStablehloWhileOptions() { + return type == BuiltinOptions2_StablehloWhileOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloWhileOptionsT *AsStablehloWhileOptions() const { + return type == BuiltinOptions2_StablehloWhileOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloGatherOptionsT *AsStablehloGatherOptions() { + return type == BuiltinOptions2_StablehloGatherOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloGatherOptionsT *AsStablehloGatherOptions() const { + return type == BuiltinOptions2_StablehloGatherOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloTransposeOptionsT *AsStablehloTransposeOptions() { + return type == BuiltinOptions2_StablehloTransposeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloTransposeOptionsT *AsStablehloTransposeOptions() const { + return type == BuiltinOptions2_StablehloTransposeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DilateOptionsT *AsDilateOptions() { + return type == BuiltinOptions2_DilateOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DilateOptionsT *AsDilateOptions() const { + return type == BuiltinOptions2_DilateOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloRngBitGeneratorOptionsT *AsStablehloRngBitGeneratorOptions() { + return type == BuiltinOptions2_StablehloRngBitGeneratorOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloRngBitGeneratorOptionsT *AsStablehloRngBitGeneratorOptions() const { + return type == BuiltinOptions2_StablehloRngBitGeneratorOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReduceWindowOptionsT *AsReduceWindowOptions() { + return type == BuiltinOptions2_ReduceWindowOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReduceWindowOptionsT *AsReduceWindowOptions() const { + return type == BuiltinOptions2_ReduceWindowOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StableHLOCompositeOptionsT *AsStableHLOCompositeOptions() { + return type == BuiltinOptions2_StableHLOCompositeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StableHLOCompositeOptionsT *AsStableHLOCompositeOptions() const { + return type == BuiltinOptions2_StableHLOCompositeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloShiftLeftOptionsT *AsStablehloShiftLeftOptions() { + return type == BuiltinOptions2_StablehloShiftLeftOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloShiftLeftOptionsT *AsStablehloShiftLeftOptions() const { + return type == BuiltinOptions2_StablehloShiftLeftOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StablehloCaseOptionsT *AsStablehloCaseOptions() { + return type == BuiltinOptions2_StablehloCaseOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StablehloCaseOptionsT *AsStablehloCaseOptions() const { + return type == BuiltinOptions2_StablehloCaseOptions ? + reinterpret_cast(value) : nullptr; + } +}; + +bool VerifyBuiltinOptions2(::flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions2 type); +bool VerifyBuiltinOptions2Vector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types); + +enum StablehloPrecisionConfig : uint32_t { + StablehloPrecisionConfig_DEFAULT = 0, + StablehloPrecisionConfig_HIGH = 1, + StablehloPrecisionConfig_HIGHEST = 2, + StablehloPrecisionConfig_MIN = StablehloPrecisionConfig_DEFAULT, + StablehloPrecisionConfig_MAX = StablehloPrecisionConfig_HIGHEST +}; + +inline const StablehloPrecisionConfig (&EnumValuesStablehloPrecisionConfig())[3] { + static const StablehloPrecisionConfig values[] = { + StablehloPrecisionConfig_DEFAULT, + StablehloPrecisionConfig_HIGH, + StablehloPrecisionConfig_HIGHEST + }; + return values; +} + +inline const char * const *EnumNamesStablehloPrecisionConfig() { + static const char * const names[4] = { + "DEFAULT", + "HIGH", + "HIGHEST", + nullptr + }; + return names; +} + +inline const char *EnumNameStablehloPrecisionConfig(StablehloPrecisionConfig e) { + if (::flatbuffers::IsOutRange(e, StablehloPrecisionConfig_DEFAULT, StablehloPrecisionConfig_HIGHEST)) return ""; + const size_t index = static_cast(e); + return EnumNamesStablehloPrecisionConfig()[index]; +} + +enum StablehloComparisonDirection : uint32_t { + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ = 0, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_NE = 1, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_GE = 2, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_GT = 3, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LE = 4, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LT = 5, + StablehloComparisonDirection_MIN = StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ, + StablehloComparisonDirection_MAX = StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LT +}; + +inline const StablehloComparisonDirection (&EnumValuesStablehloComparisonDirection())[6] { + static const StablehloComparisonDirection values[] = { + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_NE, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_GE, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_GT, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LE, + StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LT + }; + return values; +} + +inline const char * const *EnumNamesStablehloComparisonDirection() { + static const char * const names[7] = { + "STABLEHLO_COMPARISON_DIRECTION_EQ", + "STABLEHLO_COMPARISON_DIRECTION_NE", + "STABLEHLO_COMPARISON_DIRECTION_GE", + "STABLEHLO_COMPARISON_DIRECTION_GT", + "STABLEHLO_COMPARISON_DIRECTION_LE", + "STABLEHLO_COMPARISON_DIRECTION_LT", + nullptr + }; + return names; +} + +inline const char *EnumNameStablehloComparisonDirection(StablehloComparisonDirection e) { + if (::flatbuffers::IsOutRange(e, StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ, StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_LT)) return ""; + const size_t index = static_cast(e); + return EnumNamesStablehloComparisonDirection()[index]; +} + +enum StablehloComparisonType : uint32_t { + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE = 0, + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_FLOAT = 1, + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER = 2, + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_SIGNED = 3, + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_UNSIGNED = 4, + StablehloComparisonType_MIN = StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE, + StablehloComparisonType_MAX = StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_UNSIGNED +}; + +inline const StablehloComparisonType (&EnumValuesStablehloComparisonType())[5] { + static const StablehloComparisonType values[] = { + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE, + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_FLOAT, + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER, + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_SIGNED, + StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_UNSIGNED + }; + return values; +} + +inline const char * const *EnumNamesStablehloComparisonType() { + static const char * const names[6] = { + "STABLEHLO_COMPARISON_TYPE_NOTYPE", + "STABLEHLO_COMPARISON_TYPE_FLOAT", + "STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER", + "STABLEHLO_COMPARISON_TYPE_SIGNED", + "STABLEHLO_COMPARISON_TYPE_UNSIGNED", + nullptr + }; + return names; +} + +inline const char *EnumNameStablehloComparisonType(StablehloComparisonType e) { + if (::flatbuffers::IsOutRange(e, StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE, StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_UNSIGNED)) return ""; + const size_t index = static_cast(e); + return EnumNamesStablehloComparisonType()[index]; +} + +enum RngAlgorithm : int8_t { + RngAlgorithm_DEFAULT = 0, + RngAlgorithm_PHILOX = 1, + RngAlgorithm_THREEFRY = 2, + RngAlgorithm_MIN = RngAlgorithm_DEFAULT, + RngAlgorithm_MAX = RngAlgorithm_THREEFRY +}; + +inline const RngAlgorithm (&EnumValuesRngAlgorithm())[3] { + static const RngAlgorithm values[] = { + RngAlgorithm_DEFAULT, + RngAlgorithm_PHILOX, + RngAlgorithm_THREEFRY + }; + return values; +} + +inline const char * const *EnumNamesRngAlgorithm() { + static const char * const names[4] = { + "DEFAULT", + "PHILOX", + "THREEFRY", + nullptr + }; + return names; +} + +inline const char *EnumNameRngAlgorithm(RngAlgorithm e) { + if (::flatbuffers::IsOutRange(e, RngAlgorithm_DEFAULT, RngAlgorithm_THREEFRY)) return ""; + const size_t index = static_cast(e); + return EnumNamesRngAlgorithm()[index]; +} + +enum Padding : int8_t { + Padding_SAME = 0, + Padding_VALID = 1, + Padding_MIN = Padding_SAME, + Padding_MAX = Padding_VALID +}; + +inline const Padding (&EnumValuesPadding())[2] { + static const Padding values[] = { + Padding_SAME, + Padding_VALID + }; + return values; +} + +inline const char * const *EnumNamesPadding() { + static const char * const names[3] = { + "SAME", + "VALID", + nullptr + }; + return names; +} + +inline const char *EnumNamePadding(Padding e) { + if (::flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID)) return ""; + const size_t index = static_cast(e); + return EnumNamesPadding()[index]; +} + +enum ActivationFunctionType : int8_t { + ActivationFunctionType_NONE = 0, + ActivationFunctionType_RELU = 1, + ActivationFunctionType_RELU_N1_TO_1 = 2, + ActivationFunctionType_RELU6 = 3, + ActivationFunctionType_TANH = 4, + ActivationFunctionType_SIGN_BIT = 5, + ActivationFunctionType_MIN = ActivationFunctionType_NONE, + ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT +}; + +inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6] { + static const ActivationFunctionType values[] = { + ActivationFunctionType_NONE, + ActivationFunctionType_RELU, + ActivationFunctionType_RELU_N1_TO_1, + ActivationFunctionType_RELU6, + ActivationFunctionType_TANH, + ActivationFunctionType_SIGN_BIT + }; + return values; +} + +inline const char * const *EnumNamesActivationFunctionType() { + static const char * const names[7] = { + "NONE", + "RELU", + "RELU_N1_TO_1", + "RELU6", + "TANH", + "SIGN_BIT", + nullptr + }; + return names; +} + +inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) { + if (::flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT)) return ""; + const size_t index = static_cast(e); + return EnumNamesActivationFunctionType()[index]; +} + +enum LSHProjectionType : int8_t { + LSHProjectionType_UNKNOWN = 0, + LSHProjectionType_SPARSE = 1, + LSHProjectionType_DENSE = 2, + LSHProjectionType_MIN = LSHProjectionType_UNKNOWN, + LSHProjectionType_MAX = LSHProjectionType_DENSE +}; + +inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3] { + static const LSHProjectionType values[] = { + LSHProjectionType_UNKNOWN, + LSHProjectionType_SPARSE, + LSHProjectionType_DENSE + }; + return values; +} + +inline const char * const *EnumNamesLSHProjectionType() { + static const char * const names[4] = { + "UNKNOWN", + "SPARSE", + "DENSE", + nullptr + }; + return names; +} + +inline const char *EnumNameLSHProjectionType(LSHProjectionType e) { + if (::flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE)) return ""; + const size_t index = static_cast(e); + return EnumNamesLSHProjectionType()[index]; +} + +enum FullyConnectedOptionsWeightsFormat : int8_t { + FullyConnectedOptionsWeightsFormat_DEFAULT = 0, + FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1, + FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT, + FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 +}; + +inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] { + static const FullyConnectedOptionsWeightsFormat values[] = { + FullyConnectedOptionsWeightsFormat_DEFAULT, + FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 + }; + return values; +} + +inline const char * const *EnumNamesFullyConnectedOptionsWeightsFormat() { + static const char * const names[3] = { + "DEFAULT", + "SHUFFLED4x16INT8", + nullptr + }; + return names; +} + +inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) { + if (::flatbuffers::IsOutRange(e, FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8)) return ""; + const size_t index = static_cast(e); + return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; +} + +enum LSTMKernelType : int8_t { + LSTMKernelType_FULL = 0, + LSTMKernelType_BASIC = 1, + LSTMKernelType_MIN = LSTMKernelType_FULL, + LSTMKernelType_MAX = LSTMKernelType_BASIC +}; + +inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2] { + static const LSTMKernelType values[] = { + LSTMKernelType_FULL, + LSTMKernelType_BASIC + }; + return values; +} + +inline const char * const *EnumNamesLSTMKernelType() { + static const char * const names[3] = { + "FULL", + "BASIC", + nullptr + }; + return names; +} + +inline const char *EnumNameLSTMKernelType(LSTMKernelType e) { + if (::flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC)) return ""; + const size_t index = static_cast(e); + return EnumNamesLSTMKernelType()[index]; +} + +enum CombinerType : int8_t { + CombinerType_SUM = 0, + CombinerType_MEAN = 1, + CombinerType_SQRTN = 2, + CombinerType_MIN = CombinerType_SUM, + CombinerType_MAX = CombinerType_SQRTN +}; + +inline const CombinerType (&EnumValuesCombinerType())[3] { + static const CombinerType values[] = { + CombinerType_SUM, + CombinerType_MEAN, + CombinerType_SQRTN + }; + return values; +} + +inline const char * const *EnumNamesCombinerType() { + static const char * const names[4] = { + "SUM", + "MEAN", + "SQRTN", + nullptr + }; + return names; +} + +inline const char *EnumNameCombinerType(CombinerType e) { + if (::flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN)) return ""; + const size_t index = static_cast(e); + return EnumNamesCombinerType()[index]; +} + +enum MirrorPadMode : int8_t { + MirrorPadMode_REFLECT = 0, + MirrorPadMode_SYMMETRIC = 1, + MirrorPadMode_MIN = MirrorPadMode_REFLECT, + MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC +}; + +inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] { + static const MirrorPadMode values[] = { + MirrorPadMode_REFLECT, + MirrorPadMode_SYMMETRIC + }; + return values; +} + +inline const char * const *EnumNamesMirrorPadMode() { + static const char * const names[3] = { + "REFLECT", + "SYMMETRIC", + nullptr + }; + return names; +} + +inline const char *EnumNameMirrorPadMode(MirrorPadMode e) { + if (::flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC)) return ""; + const size_t index = static_cast(e); + return EnumNamesMirrorPadMode()[index]; +} + +enum ReduceWindowFunction : int32_t { + ReduceWindowFunction_UNSUPPORTED = 0, + ReduceWindowFunction_ADD = 1, + ReduceWindowFunction_MUL = 2, + ReduceWindowFunction_MINIMUM = 3, + ReduceWindowFunction_MAXIMUM = 4, + ReduceWindowFunction_ALL = 5, + ReduceWindowFunction_ANY = 6, + ReduceWindowFunction_MIN = ReduceWindowFunction_UNSUPPORTED, + ReduceWindowFunction_MAX = ReduceWindowFunction_ANY +}; + +inline const ReduceWindowFunction (&EnumValuesReduceWindowFunction())[7] { + static const ReduceWindowFunction values[] = { + ReduceWindowFunction_UNSUPPORTED, + ReduceWindowFunction_ADD, + ReduceWindowFunction_MUL, + ReduceWindowFunction_MINIMUM, + ReduceWindowFunction_MAXIMUM, + ReduceWindowFunction_ALL, + ReduceWindowFunction_ANY + }; + return values; +} + +inline const char * const *EnumNamesReduceWindowFunction() { + static const char * const names[8] = { + "UNSUPPORTED", + "ADD", + "MUL", + "MINIMUM", + "MAXIMUM", + "ALL", + "ANY", + nullptr + }; + return names; +} + +inline const char *EnumNameReduceWindowFunction(ReduceWindowFunction e) { + if (::flatbuffers::IsOutRange(e, ReduceWindowFunction_UNSUPPORTED, ReduceWindowFunction_ANY)) return ""; + const size_t index = static_cast(e); + return EnumNamesReduceWindowFunction()[index]; +} + +enum CustomOptionsFormat : int8_t { + CustomOptionsFormat_FLEXBUFFERS = 0, + CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, + CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS +}; + +inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] { + static const CustomOptionsFormat values[] = { + CustomOptionsFormat_FLEXBUFFERS + }; + return values; +} + +inline const char * const *EnumNamesCustomOptionsFormat() { + static const char * const names[2] = { + "FLEXBUFFERS", + nullptr + }; + return names; +} + +inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) { + if (::flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS)) return ""; + const size_t index = static_cast(e); + return EnumNamesCustomOptionsFormat()[index]; +} + +struct CustomQuantizationT : public ::flatbuffers::NativeTable { + typedef CustomQuantization TableType; + std::vector custom{}; +}; + +struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef CustomQuantizationT NativeTableType; + typedef CustomQuantizationBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CUSTOM = 4 + }; + const ::flatbuffers::Vector *custom() const { + return GetPointer *>(VT_CUSTOM); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_CUSTOM) && + verifier.VerifyVector(custom()) && + verifier.EndTable(); + } + CustomQuantizationT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CustomQuantizationT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CustomQuantizationBuilder { + typedef CustomQuantization Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_custom(::flatbuffers::Offset<::flatbuffers::Vector> custom) { + fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); + } + explicit CustomQuantizationBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateCustomQuantization( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> custom = 0) { + CustomQuantizationBuilder builder_(_fbb); + builder_.add_custom(custom); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateCustomQuantizationDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *custom = nullptr) { + if (custom) { _fbb.ForceVectorAlignment(custom->size(), sizeof(uint8_t), 16); } + auto custom__ = custom ? _fbb.CreateVector(*custom) : 0; + return tflite::CreateCustomQuantization( + _fbb, + custom__); +} + +::flatbuffers::Offset CreateCustomQuantization(::flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BlockwiseQuantizationT : public ::flatbuffers::NativeTable { + typedef BlockwiseQuantization TableType; + int32_t scales = 0; + int32_t zero_points = 0; + int32_t block_size = 0; +}; + +struct BlockwiseQuantization FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BlockwiseQuantizationT NativeTableType; + typedef BlockwiseQuantizationBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SCALES = 4, + VT_ZERO_POINTS = 6, + VT_BLOCK_SIZE = 8 + }; + int32_t scales() const { + return GetField(VT_SCALES, 0); + } + int32_t zero_points() const { + return GetField(VT_ZERO_POINTS, 0); + } + int32_t block_size() const { + return GetField(VT_BLOCK_SIZE, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SCALES, 4) && + VerifyField(verifier, VT_ZERO_POINTS, 4) && + VerifyField(verifier, VT_BLOCK_SIZE, 4) && + verifier.EndTable(); + } + BlockwiseQuantizationT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BlockwiseQuantizationT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BlockwiseQuantizationT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BlockwiseQuantizationBuilder { + typedef BlockwiseQuantization Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_scales(int32_t scales) { + fbb_.AddElement(BlockwiseQuantization::VT_SCALES, scales, 0); + } + void add_zero_points(int32_t zero_points) { + fbb_.AddElement(BlockwiseQuantization::VT_ZERO_POINTS, zero_points, 0); + } + void add_block_size(int32_t block_size) { + fbb_.AddElement(BlockwiseQuantization::VT_BLOCK_SIZE, block_size, 0); + } + explicit BlockwiseQuantizationBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBlockwiseQuantization( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t scales = 0, + int32_t zero_points = 0, + int32_t block_size = 0) { + BlockwiseQuantizationBuilder builder_(_fbb); + builder_.add_block_size(block_size); + builder_.add_zero_points(zero_points); + builder_.add_scales(scales); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateBlockwiseQuantization(::flatbuffers::FlatBufferBuilder &_fbb, const BlockwiseQuantizationT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizationParametersT : public ::flatbuffers::NativeTable { + typedef QuantizationParameters TableType; + std::vector min{}; + std::vector max{}; + std::vector scale{}; + std::vector zero_point{}; + tflite::QuantizationDetailsUnion details{}; + int32_t quantized_dimension = 0; +}; + +struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef QuantizationParametersT NativeTableType; + typedef QuantizationParametersBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MIN = 4, + VT_MAX = 6, + VT_SCALE = 8, + VT_ZERO_POINT = 10, + VT_DETAILS_TYPE = 12, + VT_DETAILS = 14, + VT_QUANTIZED_DIMENSION = 16 + }; + const ::flatbuffers::Vector *min() const { + return GetPointer *>(VT_MIN); + } + const ::flatbuffers::Vector *max() const { + return GetPointer *>(VT_MAX); + } + const ::flatbuffers::Vector *scale() const { + return GetPointer *>(VT_SCALE); + } + const ::flatbuffers::Vector *zero_point() const { + return GetPointer *>(VT_ZERO_POINT); + } + tflite::QuantizationDetails details_type() const { + return static_cast(GetField(VT_DETAILS_TYPE, 0)); + } + const void *details() const { + return GetPointer(VT_DETAILS); + } + template const T *details_as() const; + const tflite::CustomQuantization *details_as_CustomQuantization() const { + return details_type() == tflite::QuantizationDetails_CustomQuantization ? static_cast(details()) : nullptr; + } + const tflite::BlockwiseQuantization *details_as_BlockwiseQuantization() const { + return details_type() == tflite::QuantizationDetails_BlockwiseQuantization ? static_cast(details()) : nullptr; + } + int32_t quantized_dimension() const { + return GetField(VT_QUANTIZED_DIMENSION, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_MIN) && + verifier.VerifyVector(min()) && + VerifyOffset(verifier, VT_MAX) && + verifier.VerifyVector(max()) && + VerifyOffset(verifier, VT_SCALE) && + verifier.VerifyVector(scale()) && + VerifyOffset(verifier, VT_ZERO_POINT) && + verifier.VerifyVector(zero_point()) && + VerifyField(verifier, VT_DETAILS_TYPE, 1) && + VerifyOffset(verifier, VT_DETAILS) && + VerifyQuantizationDetails(verifier, details(), details_type()) && + VerifyField(verifier, VT_QUANTIZED_DIMENSION, 4) && + verifier.EndTable(); + } + QuantizationParametersT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizationParametersT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +template<> inline const tflite::CustomQuantization *QuantizationParameters::details_as() const { + return details_as_CustomQuantization(); +} + +template<> inline const tflite::BlockwiseQuantization *QuantizationParameters::details_as() const { + return details_as_BlockwiseQuantization(); +} + +struct QuantizationParametersBuilder { + typedef QuantizationParameters Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_min(::flatbuffers::Offset<::flatbuffers::Vector> min) { + fbb_.AddOffset(QuantizationParameters::VT_MIN, min); + } + void add_max(::flatbuffers::Offset<::flatbuffers::Vector> max) { + fbb_.AddOffset(QuantizationParameters::VT_MAX, max); + } + void add_scale(::flatbuffers::Offset<::flatbuffers::Vector> scale) { + fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); + } + void add_zero_point(::flatbuffers::Offset<::flatbuffers::Vector> zero_point) { + fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); + } + void add_details_type(tflite::QuantizationDetails details_type) { + fbb_.AddElement(QuantizationParameters::VT_DETAILS_TYPE, static_cast(details_type), 0); + } + void add_details(::flatbuffers::Offset details) { + fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); + } + void add_quantized_dimension(int32_t quantized_dimension) { + fbb_.AddElement(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, 0); + } + explicit QuantizationParametersBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateQuantizationParameters( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> min = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> max = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> scale = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> zero_point = 0, + tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, + ::flatbuffers::Offset details = 0, + int32_t quantized_dimension = 0) { + QuantizationParametersBuilder builder_(_fbb); + builder_.add_quantized_dimension(quantized_dimension); + builder_.add_details(details); + builder_.add_zero_point(zero_point); + builder_.add_scale(scale); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_details_type(details_type); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateQuantizationParametersDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *min = nullptr, + const std::vector *max = nullptr, + const std::vector *scale = nullptr, + const std::vector *zero_point = nullptr, + tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, + ::flatbuffers::Offset details = 0, + int32_t quantized_dimension = 0) { + auto min__ = min ? _fbb.CreateVector(*min) : 0; + auto max__ = max ? _fbb.CreateVector(*max) : 0; + auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; + auto zero_point__ = zero_point ? _fbb.CreateVector(*zero_point) : 0; + return tflite::CreateQuantizationParameters( + _fbb, + min__, + max__, + scale__, + zero_point__, + details_type, + details, + quantized_dimension); +} + +::flatbuffers::Offset CreateQuantizationParameters(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Int32VectorT : public ::flatbuffers::NativeTable { + typedef Int32Vector TableType; + std::vector values{}; +}; + +struct Int32Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef Int32VectorT NativeTableType; + typedef Int32VectorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALUES = 4 + }; + const ::flatbuffers::Vector *values() const { + return GetPointer *>(VT_VALUES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + verifier.EndTable(); + } + Int32VectorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Int32VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Int32VectorBuilder { + typedef Int32Vector Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_values(::flatbuffers::Offset<::flatbuffers::Vector> values) { + fbb_.AddOffset(Int32Vector::VT_VALUES, values); + } + explicit Int32VectorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateInt32Vector( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> values = 0) { + Int32VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateInt32VectorDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *values = nullptr) { + auto values__ = values ? _fbb.CreateVector(*values) : 0; + return tflite::CreateInt32Vector( + _fbb, + values__); +} + +::flatbuffers::Offset CreateInt32Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Uint16VectorT : public ::flatbuffers::NativeTable { + typedef Uint16Vector TableType; + std::vector values{}; +}; + +struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef Uint16VectorT NativeTableType; + typedef Uint16VectorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALUES = 4 + }; + const ::flatbuffers::Vector *values() const { + return GetPointer *>(VT_VALUES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + verifier.EndTable(); + } + Uint16VectorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Uint16VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Uint16VectorBuilder { + typedef Uint16Vector Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_values(::flatbuffers::Offset<::flatbuffers::Vector> values) { + fbb_.AddOffset(Uint16Vector::VT_VALUES, values); + } + explicit Uint16VectorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateUint16Vector( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> values = 0) { + Uint16VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateUint16VectorDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *values = nullptr) { + if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint16_t), 4); } + auto values__ = values ? _fbb.CreateVector(*values) : 0; + return tflite::CreateUint16Vector( + _fbb, + values__); +} + +::flatbuffers::Offset CreateUint16Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Uint8VectorT : public ::flatbuffers::NativeTable { + typedef Uint8Vector TableType; + std::vector values{}; +}; + +struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef Uint8VectorT NativeTableType; + typedef Uint8VectorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALUES = 4 + }; + const ::flatbuffers::Vector *values() const { + return GetPointer *>(VT_VALUES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + verifier.EndTable(); + } + Uint8VectorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Uint8VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Uint8VectorBuilder { + typedef Uint8Vector Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_values(::flatbuffers::Offset<::flatbuffers::Vector> values) { + fbb_.AddOffset(Uint8Vector::VT_VALUES, values); + } + explicit Uint8VectorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateUint8Vector( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> values = 0) { + Uint8VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateUint8VectorDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *values = nullptr) { + if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint8_t), 4); } + auto values__ = values ? _fbb.CreateVector(*values) : 0; + return tflite::CreateUint8Vector( + _fbb, + values__); +} + +::flatbuffers::Offset CreateUint8Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DimensionMetadataT : public ::flatbuffers::NativeTable { + typedef DimensionMetadata TableType; + tflite::DimensionType format = tflite::DimensionType_DENSE; + int32_t dense_size = 0; + tflite::SparseIndexVectorUnion array_segments{}; + tflite::SparseIndexVectorUnion array_indices{}; +}; + +struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DimensionMetadataT NativeTableType; + typedef DimensionMetadataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_DENSE_SIZE = 6, + VT_ARRAY_SEGMENTS_TYPE = 8, + VT_ARRAY_SEGMENTS = 10, + VT_ARRAY_INDICES_TYPE = 12, + VT_ARRAY_INDICES = 14 + }; + tflite::DimensionType format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t dense_size() const { + return GetField(VT_DENSE_SIZE, 0); + } + tflite::SparseIndexVector array_segments_type() const { + return static_cast(GetField(VT_ARRAY_SEGMENTS_TYPE, 0)); + } + const void *array_segments() const { + return GetPointer(VT_ARRAY_SEGMENTS); + } + template const T *array_segments_as() const; + const tflite::Int32Vector *array_segments_as_Int32Vector() const { + return array_segments_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_segments()) : nullptr; + } + const tflite::Uint16Vector *array_segments_as_Uint16Vector() const { + return array_segments_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_segments()) : nullptr; + } + const tflite::Uint8Vector *array_segments_as_Uint8Vector() const { + return array_segments_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_segments()) : nullptr; + } + tflite::SparseIndexVector array_indices_type() const { + return static_cast(GetField(VT_ARRAY_INDICES_TYPE, 0)); + } + const void *array_indices() const { + return GetPointer(VT_ARRAY_INDICES); + } + template const T *array_indices_as() const; + const tflite::Int32Vector *array_indices_as_Int32Vector() const { + return array_indices_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_indices()) : nullptr; + } + const tflite::Uint16Vector *array_indices_as_Uint16Vector() const { + return array_indices_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_indices()) : nullptr; + } + const tflite::Uint8Vector *array_indices_as_Uint8Vector() const { + return array_indices_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_indices()) : nullptr; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT, 1) && + VerifyField(verifier, VT_DENSE_SIZE, 4) && + VerifyField(verifier, VT_ARRAY_SEGMENTS_TYPE, 1) && + VerifyOffset(verifier, VT_ARRAY_SEGMENTS) && + VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) && + VerifyField(verifier, VT_ARRAY_INDICES_TYPE, 1) && + VerifyOffset(verifier, VT_ARRAY_INDICES) && + VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) && + verifier.EndTable(); + } + DimensionMetadataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DimensionMetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +template<> inline const tflite::Int32Vector *DimensionMetadata::array_segments_as() const { + return array_segments_as_Int32Vector(); +} + +template<> inline const tflite::Uint16Vector *DimensionMetadata::array_segments_as() const { + return array_segments_as_Uint16Vector(); +} + +template<> inline const tflite::Uint8Vector *DimensionMetadata::array_segments_as() const { + return array_segments_as_Uint8Vector(); +} + +template<> inline const tflite::Int32Vector *DimensionMetadata::array_indices_as() const { + return array_indices_as_Int32Vector(); +} + +template<> inline const tflite::Uint16Vector *DimensionMetadata::array_indices_as() const { + return array_indices_as_Uint16Vector(); +} + +template<> inline const tflite::Uint8Vector *DimensionMetadata::array_indices_as() const { + return array_indices_as_Uint8Vector(); +} + +struct DimensionMetadataBuilder { + typedef DimensionMetadata Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_format(tflite::DimensionType format) { + fbb_.AddElement(DimensionMetadata::VT_FORMAT, static_cast(format), 0); + } + void add_dense_size(int32_t dense_size) { + fbb_.AddElement(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0); + } + void add_array_segments_type(tflite::SparseIndexVector array_segments_type) { + fbb_.AddElement(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, static_cast(array_segments_type), 0); + } + void add_array_segments(::flatbuffers::Offset array_segments) { + fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments); + } + void add_array_indices_type(tflite::SparseIndexVector array_indices_type) { + fbb_.AddElement(DimensionMetadata::VT_ARRAY_INDICES_TYPE, static_cast(array_indices_type), 0); + } + void add_array_indices(::flatbuffers::Offset array_indices) { + fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices); + } + explicit DimensionMetadataBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDimensionMetadata( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::DimensionType format = tflite::DimensionType_DENSE, + int32_t dense_size = 0, + tflite::SparseIndexVector array_segments_type = tflite::SparseIndexVector_NONE, + ::flatbuffers::Offset array_segments = 0, + tflite::SparseIndexVector array_indices_type = tflite::SparseIndexVector_NONE, + ::flatbuffers::Offset array_indices = 0) { + DimensionMetadataBuilder builder_(_fbb); + builder_.add_array_indices(array_indices); + builder_.add_array_segments(array_segments); + builder_.add_dense_size(dense_size); + builder_.add_array_indices_type(array_indices_type); + builder_.add_array_segments_type(array_segments_type); + builder_.add_format(format); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateDimensionMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SparsityParametersT : public ::flatbuffers::NativeTable { + typedef SparsityParameters TableType; + std::vector traversal_order{}; + std::vector block_map{}; + std::vector> dim_metadata{}; + SparsityParametersT() = default; + SparsityParametersT(const SparsityParametersT &o); + SparsityParametersT(SparsityParametersT&&) FLATBUFFERS_NOEXCEPT = default; + SparsityParametersT &operator=(SparsityParametersT o) FLATBUFFERS_NOEXCEPT; +}; + +struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SparsityParametersT NativeTableType; + typedef SparsityParametersBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TRAVERSAL_ORDER = 4, + VT_BLOCK_MAP = 6, + VT_DIM_METADATA = 8 + }; + const ::flatbuffers::Vector *traversal_order() const { + return GetPointer *>(VT_TRAVERSAL_ORDER); + } + const ::flatbuffers::Vector *block_map() const { + return GetPointer *>(VT_BLOCK_MAP); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *dim_metadata() const { + return GetPointer> *>(VT_DIM_METADATA); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && + verifier.VerifyVector(traversal_order()) && + VerifyOffset(verifier, VT_BLOCK_MAP) && + verifier.VerifyVector(block_map()) && + VerifyOffset(verifier, VT_DIM_METADATA) && + verifier.VerifyVector(dim_metadata()) && + verifier.VerifyVectorOfTables(dim_metadata()) && + verifier.EndTable(); + } + SparsityParametersT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SparsityParametersT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SparsityParametersBuilder { + typedef SparsityParameters Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_traversal_order(::flatbuffers::Offset<::flatbuffers::Vector> traversal_order) { + fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order); + } + void add_block_map(::flatbuffers::Offset<::flatbuffers::Vector> block_map) { + fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map); + } + void add_dim_metadata(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> dim_metadata) { + fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata); + } + explicit SparsityParametersBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSparsityParameters( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> traversal_order = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> block_map = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> dim_metadata = 0) { + SparsityParametersBuilder builder_(_fbb); + builder_.add_dim_metadata(dim_metadata); + builder_.add_block_map(block_map); + builder_.add_traversal_order(traversal_order); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateSparsityParametersDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *traversal_order = nullptr, + const std::vector *block_map = nullptr, + const std::vector<::flatbuffers::Offset> *dim_metadata = nullptr) { + auto traversal_order__ = traversal_order ? _fbb.CreateVector(*traversal_order) : 0; + auto block_map__ = block_map ? _fbb.CreateVector(*block_map) : 0; + auto dim_metadata__ = dim_metadata ? _fbb.CreateVector<::flatbuffers::Offset>(*dim_metadata) : 0; + return tflite::CreateSparsityParameters( + _fbb, + traversal_order__, + block_map__, + dim_metadata__); +} + +::flatbuffers::Offset CreateSparsityParameters(::flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct VariantSubTypeT : public ::flatbuffers::NativeTable { + typedef VariantSubType TableType; + std::vector shape{}; + tflite::TensorType type = tflite::TensorType_FLOAT32; + bool has_rank = false; +}; + +struct VariantSubType FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef VariantSubTypeT NativeTableType; + typedef VariantSubTypeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SHAPE = 4, + VT_TYPE = 6, + VT_HAS_RANK = 8 + }; + const ::flatbuffers::Vector *shape() const { + return GetPointer *>(VT_SHAPE); + } + tflite::TensorType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool has_rank() const { + return GetField(VT_HAS_RANK, 0) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SHAPE) && + verifier.VerifyVector(shape()) && + VerifyField(verifier, VT_TYPE, 1) && + VerifyField(verifier, VT_HAS_RANK, 1) && + verifier.EndTable(); + } + VariantSubTypeT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(VariantSubTypeT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct VariantSubTypeBuilder { + typedef VariantSubType Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_shape(::flatbuffers::Offset<::flatbuffers::Vector> shape) { + fbb_.AddOffset(VariantSubType::VT_SHAPE, shape); + } + void add_type(tflite::TensorType type) { + fbb_.AddElement(VariantSubType::VT_TYPE, static_cast(type), 0); + } + void add_has_rank(bool has_rank) { + fbb_.AddElement(VariantSubType::VT_HAS_RANK, static_cast(has_rank), 0); + } + explicit VariantSubTypeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateVariantSubType( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> shape = 0, + tflite::TensorType type = tflite::TensorType_FLOAT32, + bool has_rank = false) { + VariantSubTypeBuilder builder_(_fbb); + builder_.add_shape(shape); + builder_.add_has_rank(has_rank); + builder_.add_type(type); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateVariantSubTypeDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *shape = nullptr, + tflite::TensorType type = tflite::TensorType_FLOAT32, + bool has_rank = false) { + auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; + return tflite::CreateVariantSubType( + _fbb, + shape__, + type, + has_rank); +} + +::flatbuffers::Offset CreateVariantSubType(::flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TensorT : public ::flatbuffers::NativeTable { + typedef Tensor TableType; + std::vector shape{}; + tflite::TensorType type = tflite::TensorType_FLOAT32; + uint32_t buffer = 0; + std::string name{}; + std::unique_ptr quantization{}; + bool is_variable = false; + std::unique_ptr sparsity{}; + std::vector shape_signature{}; + bool has_rank = false; + std::vector> variant_tensors{}; + TensorT() = default; + TensorT(const TensorT &o); + TensorT(TensorT&&) FLATBUFFERS_NOEXCEPT = default; + TensorT &operator=(TensorT o) FLATBUFFERS_NOEXCEPT; +}; + +struct Tensor FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TensorT NativeTableType; + typedef TensorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SHAPE = 4, + VT_TYPE = 6, + VT_BUFFER = 8, + VT_NAME = 10, + VT_QUANTIZATION = 12, + VT_IS_VARIABLE = 14, + VT_SPARSITY = 16, + VT_SHAPE_SIGNATURE = 18, + VT_HAS_RANK = 20, + VT_VARIANT_TENSORS = 22 + }; + const ::flatbuffers::Vector *shape() const { + return GetPointer *>(VT_SHAPE); + } + tflite::TensorType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + uint32_t buffer() const { + return GetField(VT_BUFFER, 0); + } + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + const tflite::QuantizationParameters *quantization() const { + return GetPointer(VT_QUANTIZATION); + } + bool is_variable() const { + return GetField(VT_IS_VARIABLE, 0) != 0; + } + const tflite::SparsityParameters *sparsity() const { + return GetPointer(VT_SPARSITY); + } + const ::flatbuffers::Vector *shape_signature() const { + return GetPointer *>(VT_SHAPE_SIGNATURE); + } + bool has_rank() const { + return GetField(VT_HAS_RANK, 0) != 0; + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *variant_tensors() const { + return GetPointer> *>(VT_VARIANT_TENSORS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SHAPE) && + verifier.VerifyVector(shape()) && + VerifyField(verifier, VT_TYPE, 1) && + VerifyField(verifier, VT_BUFFER, 4) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_QUANTIZATION) && + verifier.VerifyTable(quantization()) && + VerifyField(verifier, VT_IS_VARIABLE, 1) && + VerifyOffset(verifier, VT_SPARSITY) && + verifier.VerifyTable(sparsity()) && + VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && + verifier.VerifyVector(shape_signature()) && + VerifyField(verifier, VT_HAS_RANK, 1) && + VerifyOffset(verifier, VT_VARIANT_TENSORS) && + verifier.VerifyVector(variant_tensors()) && + verifier.VerifyVectorOfTables(variant_tensors()) && + verifier.EndTable(); + } + TensorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TensorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TensorBuilder { + typedef Tensor Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_shape(::flatbuffers::Offset<::flatbuffers::Vector> shape) { + fbb_.AddOffset(Tensor::VT_SHAPE, shape); + } + void add_type(tflite::TensorType type) { + fbb_.AddElement(Tensor::VT_TYPE, static_cast(type), 0); + } + void add_buffer(uint32_t buffer) { + fbb_.AddElement(Tensor::VT_BUFFER, buffer, 0); + } + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(Tensor::VT_NAME, name); + } + void add_quantization(::flatbuffers::Offset quantization) { + fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization); + } + void add_is_variable(bool is_variable) { + fbb_.AddElement(Tensor::VT_IS_VARIABLE, static_cast(is_variable), 0); + } + void add_sparsity(::flatbuffers::Offset sparsity) { + fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity); + } + void add_shape_signature(::flatbuffers::Offset<::flatbuffers::Vector> shape_signature) { + fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature); + } + void add_has_rank(bool has_rank) { + fbb_.AddElement(Tensor::VT_HAS_RANK, static_cast(has_rank), 0); + } + void add_variant_tensors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> variant_tensors) { + fbb_.AddOffset(Tensor::VT_VARIANT_TENSORS, variant_tensors); + } + explicit TensorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTensor( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> shape = 0, + tflite::TensorType type = tflite::TensorType_FLOAT32, + uint32_t buffer = 0, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + ::flatbuffers::Offset quantization = 0, + bool is_variable = false, + ::flatbuffers::Offset sparsity = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> shape_signature = 0, + bool has_rank = false, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> variant_tensors = 0) { + TensorBuilder builder_(_fbb); + builder_.add_variant_tensors(variant_tensors); + builder_.add_shape_signature(shape_signature); + builder_.add_sparsity(sparsity); + builder_.add_quantization(quantization); + builder_.add_name(name); + builder_.add_buffer(buffer); + builder_.add_shape(shape); + builder_.add_has_rank(has_rank); + builder_.add_is_variable(is_variable); + builder_.add_type(type); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTensorDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *shape = nullptr, + tflite::TensorType type = tflite::TensorType_FLOAT32, + uint32_t buffer = 0, + const char *name = nullptr, + ::flatbuffers::Offset quantization = 0, + bool is_variable = false, + ::flatbuffers::Offset sparsity = 0, + const std::vector *shape_signature = nullptr, + bool has_rank = false, + const std::vector<::flatbuffers::Offset> *variant_tensors = nullptr) { + auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + auto shape_signature__ = shape_signature ? _fbb.CreateVector(*shape_signature) : 0; + auto variant_tensors__ = variant_tensors ? _fbb.CreateVector<::flatbuffers::Offset>(*variant_tensors) : 0; + return tflite::CreateTensor( + _fbb, + shape__, + type, + buffer, + name__, + quantization, + is_variable, + sparsity, + shape_signature__, + has_rank, + variant_tensors__); +} + +::flatbuffers::Offset CreateTensor(::flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloGatherOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloGatherOptions TableType; + std::vector offset_dims{}; + std::vector collapsed_slice_dims{}; + std::vector start_index_map{}; + int64_t index_vector_dim = 0; + std::vector slice_sizes{}; + bool indices_are_sorted = false; +}; + +struct StablehloGatherOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloGatherOptionsT NativeTableType; + typedef StablehloGatherOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OFFSET_DIMS = 4, + VT_COLLAPSED_SLICE_DIMS = 6, + VT_START_INDEX_MAP = 8, + VT_INDEX_VECTOR_DIM = 10, + VT_SLICE_SIZES = 12, + VT_INDICES_ARE_SORTED = 14 + }; + const ::flatbuffers::Vector *offset_dims() const { + return GetPointer *>(VT_OFFSET_DIMS); + } + const ::flatbuffers::Vector *collapsed_slice_dims() const { + return GetPointer *>(VT_COLLAPSED_SLICE_DIMS); + } + const ::flatbuffers::Vector *start_index_map() const { + return GetPointer *>(VT_START_INDEX_MAP); + } + int64_t index_vector_dim() const { + return GetField(VT_INDEX_VECTOR_DIM, 0); + } + const ::flatbuffers::Vector *slice_sizes() const { + return GetPointer *>(VT_SLICE_SIZES); + } + bool indices_are_sorted() const { + return GetField(VT_INDICES_ARE_SORTED, 0) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_OFFSET_DIMS) && + verifier.VerifyVector(offset_dims()) && + VerifyOffset(verifier, VT_COLLAPSED_SLICE_DIMS) && + verifier.VerifyVector(collapsed_slice_dims()) && + VerifyOffset(verifier, VT_START_INDEX_MAP) && + verifier.VerifyVector(start_index_map()) && + VerifyField(verifier, VT_INDEX_VECTOR_DIM, 8) && + VerifyOffset(verifier, VT_SLICE_SIZES) && + verifier.VerifyVector(slice_sizes()) && + VerifyField(verifier, VT_INDICES_ARE_SORTED, 1) && + verifier.EndTable(); + } + StablehloGatherOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloGatherOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloGatherOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloGatherOptionsBuilder { + typedef StablehloGatherOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_offset_dims(::flatbuffers::Offset<::flatbuffers::Vector> offset_dims) { + fbb_.AddOffset(StablehloGatherOptions::VT_OFFSET_DIMS, offset_dims); + } + void add_collapsed_slice_dims(::flatbuffers::Offset<::flatbuffers::Vector> collapsed_slice_dims) { + fbb_.AddOffset(StablehloGatherOptions::VT_COLLAPSED_SLICE_DIMS, collapsed_slice_dims); + } + void add_start_index_map(::flatbuffers::Offset<::flatbuffers::Vector> start_index_map) { + fbb_.AddOffset(StablehloGatherOptions::VT_START_INDEX_MAP, start_index_map); + } + void add_index_vector_dim(int64_t index_vector_dim) { + fbb_.AddElement(StablehloGatherOptions::VT_INDEX_VECTOR_DIM, index_vector_dim, 0); + } + void add_slice_sizes(::flatbuffers::Offset<::flatbuffers::Vector> slice_sizes) { + fbb_.AddOffset(StablehloGatherOptions::VT_SLICE_SIZES, slice_sizes); + } + void add_indices_are_sorted(bool indices_are_sorted) { + fbb_.AddElement(StablehloGatherOptions::VT_INDICES_ARE_SORTED, static_cast(indices_are_sorted), 0); + } + explicit StablehloGatherOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloGatherOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> offset_dims = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> collapsed_slice_dims = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> start_index_map = 0, + int64_t index_vector_dim = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> slice_sizes = 0, + bool indices_are_sorted = false) { + StablehloGatherOptionsBuilder builder_(_fbb); + builder_.add_index_vector_dim(index_vector_dim); + builder_.add_slice_sizes(slice_sizes); + builder_.add_start_index_map(start_index_map); + builder_.add_collapsed_slice_dims(collapsed_slice_dims); + builder_.add_offset_dims(offset_dims); + builder_.add_indices_are_sorted(indices_are_sorted); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloGatherOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *offset_dims = nullptr, + const std::vector *collapsed_slice_dims = nullptr, + const std::vector *start_index_map = nullptr, + int64_t index_vector_dim = 0, + const std::vector *slice_sizes = nullptr, + bool indices_are_sorted = false) { + auto offset_dims__ = offset_dims ? _fbb.CreateVector(*offset_dims) : 0; + auto collapsed_slice_dims__ = collapsed_slice_dims ? _fbb.CreateVector(*collapsed_slice_dims) : 0; + auto start_index_map__ = start_index_map ? _fbb.CreateVector(*start_index_map) : 0; + auto slice_sizes__ = slice_sizes ? _fbb.CreateVector(*slice_sizes) : 0; + return tflite::CreateStablehloGatherOptions( + _fbb, + offset_dims__, + collapsed_slice_dims__, + start_index_map__, + index_vector_dim, + slice_sizes__, + indices_are_sorted); +} + +::flatbuffers::Offset CreateStablehloGatherOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloGatherOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloTransposeOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloTransposeOptions TableType; + std::vector permutation{}; +}; + +struct StablehloTransposeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloTransposeOptionsT NativeTableType; + typedef StablehloTransposeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PERMUTATION = 4 + }; + const ::flatbuffers::Vector *permutation() const { + return GetPointer *>(VT_PERMUTATION); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_PERMUTATION) && + verifier.VerifyVector(permutation()) && + verifier.EndTable(); + } + StablehloTransposeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloTransposeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloTransposeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloTransposeOptionsBuilder { + typedef StablehloTransposeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_permutation(::flatbuffers::Offset<::flatbuffers::Vector> permutation) { + fbb_.AddOffset(StablehloTransposeOptions::VT_PERMUTATION, permutation); + } + explicit StablehloTransposeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloTransposeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> permutation = 0) { + StablehloTransposeOptionsBuilder builder_(_fbb); + builder_.add_permutation(permutation); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloTransposeOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *permutation = nullptr) { + auto permutation__ = permutation ? _fbb.CreateVector(*permutation) : 0; + return tflite::CreateStablehloTransposeOptions( + _fbb, + permutation__); +} + +::flatbuffers::Offset CreateStablehloTransposeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloTransposeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloDotGeneralOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloDotGeneralOptions TableType; + std::vector lhs_batching_dimensions{}; + std::vector rhs_batching_dimensions{}; + std::vector lhs_contracting_dimensions{}; + std::vector rhs_contracting_dimensions{}; + std::vector precision_config{}; +}; + +struct StablehloDotGeneralOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloDotGeneralOptionsT NativeTableType; + typedef StablehloDotGeneralOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_LHS_BATCHING_DIMENSIONS = 4, + VT_RHS_BATCHING_DIMENSIONS = 6, + VT_LHS_CONTRACTING_DIMENSIONS = 8, + VT_RHS_CONTRACTING_DIMENSIONS = 10, + VT_PRECISION_CONFIG = 12 + }; + const ::flatbuffers::Vector *lhs_batching_dimensions() const { + return GetPointer *>(VT_LHS_BATCHING_DIMENSIONS); + } + const ::flatbuffers::Vector *rhs_batching_dimensions() const { + return GetPointer *>(VT_RHS_BATCHING_DIMENSIONS); + } + const ::flatbuffers::Vector *lhs_contracting_dimensions() const { + return GetPointer *>(VT_LHS_CONTRACTING_DIMENSIONS); + } + const ::flatbuffers::Vector *rhs_contracting_dimensions() const { + return GetPointer *>(VT_RHS_CONTRACTING_DIMENSIONS); + } + const ::flatbuffers::Vector *precision_config() const { + return GetPointer *>(VT_PRECISION_CONFIG); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_LHS_BATCHING_DIMENSIONS) && + verifier.VerifyVector(lhs_batching_dimensions()) && + VerifyOffset(verifier, VT_RHS_BATCHING_DIMENSIONS) && + verifier.VerifyVector(rhs_batching_dimensions()) && + VerifyOffset(verifier, VT_LHS_CONTRACTING_DIMENSIONS) && + verifier.VerifyVector(lhs_contracting_dimensions()) && + VerifyOffset(verifier, VT_RHS_CONTRACTING_DIMENSIONS) && + verifier.VerifyVector(rhs_contracting_dimensions()) && + VerifyOffset(verifier, VT_PRECISION_CONFIG) && + verifier.VerifyVector(precision_config()) && + verifier.EndTable(); + } + StablehloDotGeneralOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloDotGeneralOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDotGeneralOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloDotGeneralOptionsBuilder { + typedef StablehloDotGeneralOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_lhs_batching_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> lhs_batching_dimensions) { + fbb_.AddOffset(StablehloDotGeneralOptions::VT_LHS_BATCHING_DIMENSIONS, lhs_batching_dimensions); + } + void add_rhs_batching_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> rhs_batching_dimensions) { + fbb_.AddOffset(StablehloDotGeneralOptions::VT_RHS_BATCHING_DIMENSIONS, rhs_batching_dimensions); + } + void add_lhs_contracting_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> lhs_contracting_dimensions) { + fbb_.AddOffset(StablehloDotGeneralOptions::VT_LHS_CONTRACTING_DIMENSIONS, lhs_contracting_dimensions); + } + void add_rhs_contracting_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> rhs_contracting_dimensions) { + fbb_.AddOffset(StablehloDotGeneralOptions::VT_RHS_CONTRACTING_DIMENSIONS, rhs_contracting_dimensions); + } + void add_precision_config(::flatbuffers::Offset<::flatbuffers::Vector> precision_config) { + fbb_.AddOffset(StablehloDotGeneralOptions::VT_PRECISION_CONFIG, precision_config); + } + explicit StablehloDotGeneralOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloDotGeneralOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> lhs_batching_dimensions = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> rhs_batching_dimensions = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> lhs_contracting_dimensions = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> rhs_contracting_dimensions = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> precision_config = 0) { + StablehloDotGeneralOptionsBuilder builder_(_fbb); + builder_.add_precision_config(precision_config); + builder_.add_rhs_contracting_dimensions(rhs_contracting_dimensions); + builder_.add_lhs_contracting_dimensions(lhs_contracting_dimensions); + builder_.add_rhs_batching_dimensions(rhs_batching_dimensions); + builder_.add_lhs_batching_dimensions(lhs_batching_dimensions); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloDotGeneralOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *lhs_batching_dimensions = nullptr, + const std::vector *rhs_batching_dimensions = nullptr, + const std::vector *lhs_contracting_dimensions = nullptr, + const std::vector *rhs_contracting_dimensions = nullptr, + const std::vector *precision_config = nullptr) { + auto lhs_batching_dimensions__ = lhs_batching_dimensions ? _fbb.CreateVector(*lhs_batching_dimensions) : 0; + auto rhs_batching_dimensions__ = rhs_batching_dimensions ? _fbb.CreateVector(*rhs_batching_dimensions) : 0; + auto lhs_contracting_dimensions__ = lhs_contracting_dimensions ? _fbb.CreateVector(*lhs_contracting_dimensions) : 0; + auto rhs_contracting_dimensions__ = rhs_contracting_dimensions ? _fbb.CreateVector(*rhs_contracting_dimensions) : 0; + auto precision_config__ = precision_config ? _fbb.CreateVector(*precision_config) : 0; + return tflite::CreateStablehloDotGeneralOptions( + _fbb, + lhs_batching_dimensions__, + rhs_batching_dimensions__, + lhs_contracting_dimensions__, + rhs_contracting_dimensions__, + precision_config__); +} + +::flatbuffers::Offset CreateStablehloDotGeneralOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDotGeneralOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloReduceWindowOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloReduceWindowOptions TableType; + std::vector window_dimensions{}; + std::vector window_strides{}; + std::vector base_dilations{}; + std::vector window_dilations{}; + std::vector padding{}; + int32_t body_subgraph_index = 0; +}; + +struct StablehloReduceWindowOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloReduceWindowOptionsT NativeTableType; + typedef StablehloReduceWindowOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_WINDOW_DIMENSIONS = 4, + VT_WINDOW_STRIDES = 6, + VT_BASE_DILATIONS = 8, + VT_WINDOW_DILATIONS = 10, + VT_PADDING = 12, + VT_BODY_SUBGRAPH_INDEX = 14 + }; + const ::flatbuffers::Vector *window_dimensions() const { + return GetPointer *>(VT_WINDOW_DIMENSIONS); + } + const ::flatbuffers::Vector *window_strides() const { + return GetPointer *>(VT_WINDOW_STRIDES); + } + const ::flatbuffers::Vector *base_dilations() const { + return GetPointer *>(VT_BASE_DILATIONS); + } + const ::flatbuffers::Vector *window_dilations() const { + return GetPointer *>(VT_WINDOW_DILATIONS); + } + const ::flatbuffers::Vector *padding() const { + return GetPointer *>(VT_PADDING); + } + int32_t body_subgraph_index() const { + return GetField(VT_BODY_SUBGRAPH_INDEX, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_WINDOW_DIMENSIONS) && + verifier.VerifyVector(window_dimensions()) && + VerifyOffset(verifier, VT_WINDOW_STRIDES) && + verifier.VerifyVector(window_strides()) && + VerifyOffset(verifier, VT_BASE_DILATIONS) && + verifier.VerifyVector(base_dilations()) && + VerifyOffset(verifier, VT_WINDOW_DILATIONS) && + verifier.VerifyVector(window_dilations()) && + VerifyOffset(verifier, VT_PADDING) && + verifier.VerifyVector(padding()) && + VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + StablehloReduceWindowOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloReduceWindowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceWindowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloReduceWindowOptionsBuilder { + typedef StablehloReduceWindowOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_window_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> window_dimensions) { + fbb_.AddOffset(StablehloReduceWindowOptions::VT_WINDOW_DIMENSIONS, window_dimensions); + } + void add_window_strides(::flatbuffers::Offset<::flatbuffers::Vector> window_strides) { + fbb_.AddOffset(StablehloReduceWindowOptions::VT_WINDOW_STRIDES, window_strides); + } + void add_base_dilations(::flatbuffers::Offset<::flatbuffers::Vector> base_dilations) { + fbb_.AddOffset(StablehloReduceWindowOptions::VT_BASE_DILATIONS, base_dilations); + } + void add_window_dilations(::flatbuffers::Offset<::flatbuffers::Vector> window_dilations) { + fbb_.AddOffset(StablehloReduceWindowOptions::VT_WINDOW_DILATIONS, window_dilations); + } + void add_padding(::flatbuffers::Offset<::flatbuffers::Vector> padding) { + fbb_.AddOffset(StablehloReduceWindowOptions::VT_PADDING, padding); + } + void add_body_subgraph_index(int32_t body_subgraph_index) { + fbb_.AddElement(StablehloReduceWindowOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); + } + explicit StablehloReduceWindowOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloReduceWindowOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> window_dimensions = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> window_strides = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> base_dilations = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> window_dilations = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> padding = 0, + int32_t body_subgraph_index = 0) { + StablehloReduceWindowOptionsBuilder builder_(_fbb); + builder_.add_body_subgraph_index(body_subgraph_index); + builder_.add_padding(padding); + builder_.add_window_dilations(window_dilations); + builder_.add_base_dilations(base_dilations); + builder_.add_window_strides(window_strides); + builder_.add_window_dimensions(window_dimensions); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloReduceWindowOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *window_dimensions = nullptr, + const std::vector *window_strides = nullptr, + const std::vector *base_dilations = nullptr, + const std::vector *window_dilations = nullptr, + const std::vector *padding = nullptr, + int32_t body_subgraph_index = 0) { + auto window_dimensions__ = window_dimensions ? _fbb.CreateVector(*window_dimensions) : 0; + auto window_strides__ = window_strides ? _fbb.CreateVector(*window_strides) : 0; + auto base_dilations__ = base_dilations ? _fbb.CreateVector(*base_dilations) : 0; + auto window_dilations__ = window_dilations ? _fbb.CreateVector(*window_dilations) : 0; + auto padding__ = padding ? _fbb.CreateVector(*padding) : 0; + return tflite::CreateStablehloReduceWindowOptions( + _fbb, + window_dimensions__, + window_strides__, + base_dilations__, + window_dilations__, + padding__, + body_subgraph_index); +} + +::flatbuffers::Offset CreateStablehloReduceWindowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceWindowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloWhileOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloWhileOptions TableType; + int32_t cond_subgraph_index = 0; + int32_t body_subgraph_index = 0; +}; + +struct StablehloWhileOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloWhileOptionsT NativeTableType; + typedef StablehloWhileOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_COND_SUBGRAPH_INDEX = 4, + VT_BODY_SUBGRAPH_INDEX = 6 + }; + int32_t cond_subgraph_index() const { + return GetField(VT_COND_SUBGRAPH_INDEX, 0); + } + int32_t body_subgraph_index() const { + return GetField(VT_BODY_SUBGRAPH_INDEX, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_COND_SUBGRAPH_INDEX, 4) && + VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + StablehloWhileOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloWhileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloWhileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloWhileOptionsBuilder { + typedef StablehloWhileOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_cond_subgraph_index(int32_t cond_subgraph_index) { + fbb_.AddElement(StablehloWhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); + } + void add_body_subgraph_index(int32_t body_subgraph_index) { + fbb_.AddElement(StablehloWhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); + } + explicit StablehloWhileOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloWhileOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t cond_subgraph_index = 0, + int32_t body_subgraph_index = 0) { + StablehloWhileOptionsBuilder builder_(_fbb); + builder_.add_body_subgraph_index(body_subgraph_index); + builder_.add_cond_subgraph_index(cond_subgraph_index); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateStablehloWhileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloWhileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloSortOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloSortOptions TableType; + int64_t dimension = 0; + bool is_stable = false; + int32_t comparator_subgraph_index = 0; +}; + +struct StablehloSortOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloSortOptionsT NativeTableType; + typedef StablehloSortOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMENSION = 4, + VT_IS_STABLE = 6, + VT_COMPARATOR_SUBGRAPH_INDEX = 8 + }; + int64_t dimension() const { + return GetField(VT_DIMENSION, 0); + } + bool is_stable() const { + return GetField(VT_IS_STABLE, 0) != 0; + } + int32_t comparator_subgraph_index() const { + return GetField(VT_COMPARATOR_SUBGRAPH_INDEX, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DIMENSION, 8) && + VerifyField(verifier, VT_IS_STABLE, 1) && + VerifyField(verifier, VT_COMPARATOR_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + StablehloSortOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloSortOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSortOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloSortOptionsBuilder { + typedef StablehloSortOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_dimension(int64_t dimension) { + fbb_.AddElement(StablehloSortOptions::VT_DIMENSION, dimension, 0); + } + void add_is_stable(bool is_stable) { + fbb_.AddElement(StablehloSortOptions::VT_IS_STABLE, static_cast(is_stable), 0); + } + void add_comparator_subgraph_index(int32_t comparator_subgraph_index) { + fbb_.AddElement(StablehloSortOptions::VT_COMPARATOR_SUBGRAPH_INDEX, comparator_subgraph_index, 0); + } + explicit StablehloSortOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloSortOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int64_t dimension = 0, + bool is_stable = false, + int32_t comparator_subgraph_index = 0) { + StablehloSortOptionsBuilder builder_(_fbb); + builder_.add_dimension(dimension); + builder_.add_comparator_subgraph_index(comparator_subgraph_index); + builder_.add_is_stable(is_stable); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateStablehloSortOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSortOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloConcatenateOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloConcatenateOptions TableType; + int64_t dimension = 0; +}; + +struct StablehloConcatenateOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloConcatenateOptionsT NativeTableType; + typedef StablehloConcatenateOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMENSION = 4 + }; + int64_t dimension() const { + return GetField(VT_DIMENSION, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DIMENSION, 8) && + verifier.EndTable(); + } + StablehloConcatenateOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloConcatenateOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConcatenateOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloConcatenateOptionsBuilder { + typedef StablehloConcatenateOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_dimension(int64_t dimension) { + fbb_.AddElement(StablehloConcatenateOptions::VT_DIMENSION, dimension, 0); + } + explicit StablehloConcatenateOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloConcatenateOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int64_t dimension = 0) { + StablehloConcatenateOptionsBuilder builder_(_fbb); + builder_.add_dimension(dimension); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateStablehloConcatenateOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConcatenateOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloBroadcastInDimOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloBroadcastInDimOptions TableType; + std::vector broadcast_dimensions{}; +}; + +struct StablehloBroadcastInDimOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloBroadcastInDimOptionsT NativeTableType; + typedef StablehloBroadcastInDimOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BROADCAST_DIMENSIONS = 4 + }; + const ::flatbuffers::Vector *broadcast_dimensions() const { + return GetPointer *>(VT_BROADCAST_DIMENSIONS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BROADCAST_DIMENSIONS) && + verifier.VerifyVector(broadcast_dimensions()) && + verifier.EndTable(); + } + StablehloBroadcastInDimOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloBroadcastInDimOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloBroadcastInDimOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloBroadcastInDimOptionsBuilder { + typedef StablehloBroadcastInDimOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_broadcast_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> broadcast_dimensions) { + fbb_.AddOffset(StablehloBroadcastInDimOptions::VT_BROADCAST_DIMENSIONS, broadcast_dimensions); + } + explicit StablehloBroadcastInDimOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloBroadcastInDimOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> broadcast_dimensions = 0) { + StablehloBroadcastInDimOptionsBuilder builder_(_fbb); + builder_.add_broadcast_dimensions(broadcast_dimensions); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloBroadcastInDimOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *broadcast_dimensions = nullptr) { + auto broadcast_dimensions__ = broadcast_dimensions ? _fbb.CreateVector(*broadcast_dimensions) : 0; + return tflite::CreateStablehloBroadcastInDimOptions( + _fbb, + broadcast_dimensions__); +} + +::flatbuffers::Offset CreateStablehloBroadcastInDimOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloBroadcastInDimOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloCompareOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloCompareOptions TableType; + tflite::StablehloComparisonDirection comparison_direction = tflite::StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ; + tflite::StablehloComparisonType compare_type = tflite::StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE; +}; + +struct StablehloCompareOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloCompareOptionsT NativeTableType; + typedef StablehloCompareOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_COMPARISON_DIRECTION = 4, + VT_COMPARE_TYPE = 6 + }; + tflite::StablehloComparisonDirection comparison_direction() const { + return static_cast(GetField(VT_COMPARISON_DIRECTION, 0)); + } + tflite::StablehloComparisonType compare_type() const { + return static_cast(GetField(VT_COMPARE_TYPE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_COMPARISON_DIRECTION, 4) && + VerifyField(verifier, VT_COMPARE_TYPE, 4) && + verifier.EndTable(); + } + StablehloCompareOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloCompareOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCompareOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloCompareOptionsBuilder { + typedef StablehloCompareOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_comparison_direction(tflite::StablehloComparisonDirection comparison_direction) { + fbb_.AddElement(StablehloCompareOptions::VT_COMPARISON_DIRECTION, static_cast(comparison_direction), 0); + } + void add_compare_type(tflite::StablehloComparisonType compare_type) { + fbb_.AddElement(StablehloCompareOptions::VT_COMPARE_TYPE, static_cast(compare_type), 0); + } + explicit StablehloCompareOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloCompareOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::StablehloComparisonDirection comparison_direction = tflite::StablehloComparisonDirection_STABLEHLO_COMPARISON_DIRECTION_EQ, + tflite::StablehloComparisonType compare_type = tflite::StablehloComparisonType_STABLEHLO_COMPARISON_TYPE_NOTYPE) { + StablehloCompareOptionsBuilder builder_(_fbb); + builder_.add_compare_type(compare_type); + builder_.add_comparison_direction(comparison_direction); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateStablehloCompareOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCompareOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloDynamicSliceOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloDynamicSliceOptions TableType; + std::vector slice_sizes{}; +}; + +struct StablehloDynamicSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloDynamicSliceOptionsT NativeTableType; + typedef StablehloDynamicSliceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SLICE_SIZES = 4 + }; + const ::flatbuffers::Vector *slice_sizes() const { + return GetPointer *>(VT_SLICE_SIZES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SLICE_SIZES) && + verifier.VerifyVector(slice_sizes()) && + verifier.EndTable(); + } + StablehloDynamicSliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloDynamicSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDynamicSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloDynamicSliceOptionsBuilder { + typedef StablehloDynamicSliceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_slice_sizes(::flatbuffers::Offset<::flatbuffers::Vector> slice_sizes) { + fbb_.AddOffset(StablehloDynamicSliceOptions::VT_SLICE_SIZES, slice_sizes); + } + explicit StablehloDynamicSliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloDynamicSliceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> slice_sizes = 0) { + StablehloDynamicSliceOptionsBuilder builder_(_fbb); + builder_.add_slice_sizes(slice_sizes); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloDynamicSliceOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *slice_sizes = nullptr) { + auto slice_sizes__ = slice_sizes ? _fbb.CreateVector(*slice_sizes) : 0; + return tflite::CreateStablehloDynamicSliceOptions( + _fbb, + slice_sizes__); +} + +::flatbuffers::Offset CreateStablehloDynamicSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDynamicSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloPadOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloPadOptions TableType; + std::vector edge_padding_low{}; + std::vector edge_padding_high{}; + std::vector interior_padding{}; +}; + +struct StablehloPadOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloPadOptionsT NativeTableType; + typedef StablehloPadOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EDGE_PADDING_LOW = 4, + VT_EDGE_PADDING_HIGH = 6, + VT_INTERIOR_PADDING = 8 + }; + const ::flatbuffers::Vector *edge_padding_low() const { + return GetPointer *>(VT_EDGE_PADDING_LOW); + } + const ::flatbuffers::Vector *edge_padding_high() const { + return GetPointer *>(VT_EDGE_PADDING_HIGH); + } + const ::flatbuffers::Vector *interior_padding() const { + return GetPointer *>(VT_INTERIOR_PADDING); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_EDGE_PADDING_LOW) && + verifier.VerifyVector(edge_padding_low()) && + VerifyOffset(verifier, VT_EDGE_PADDING_HIGH) && + verifier.VerifyVector(edge_padding_high()) && + VerifyOffset(verifier, VT_INTERIOR_PADDING) && + verifier.VerifyVector(interior_padding()) && + verifier.EndTable(); + } + StablehloPadOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloPadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloPadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloPadOptionsBuilder { + typedef StablehloPadOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_edge_padding_low(::flatbuffers::Offset<::flatbuffers::Vector> edge_padding_low) { + fbb_.AddOffset(StablehloPadOptions::VT_EDGE_PADDING_LOW, edge_padding_low); + } + void add_edge_padding_high(::flatbuffers::Offset<::flatbuffers::Vector> edge_padding_high) { + fbb_.AddOffset(StablehloPadOptions::VT_EDGE_PADDING_HIGH, edge_padding_high); + } + void add_interior_padding(::flatbuffers::Offset<::flatbuffers::Vector> interior_padding) { + fbb_.AddOffset(StablehloPadOptions::VT_INTERIOR_PADDING, interior_padding); + } + explicit StablehloPadOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloPadOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> edge_padding_low = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> edge_padding_high = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> interior_padding = 0) { + StablehloPadOptionsBuilder builder_(_fbb); + builder_.add_interior_padding(interior_padding); + builder_.add_edge_padding_high(edge_padding_high); + builder_.add_edge_padding_low(edge_padding_low); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloPadOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *edge_padding_low = nullptr, + const std::vector *edge_padding_high = nullptr, + const std::vector *interior_padding = nullptr) { + auto edge_padding_low__ = edge_padding_low ? _fbb.CreateVector(*edge_padding_low) : 0; + auto edge_padding_high__ = edge_padding_high ? _fbb.CreateVector(*edge_padding_high) : 0; + auto interior_padding__ = interior_padding ? _fbb.CreateVector(*interior_padding) : 0; + return tflite::CreateStablehloPadOptions( + _fbb, + edge_padding_low__, + edge_padding_high__, + interior_padding__); +} + +::flatbuffers::Offset CreateStablehloPadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloPadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloIotaOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloIotaOptions TableType; + int64_t iota_dimension = 0; +}; + +struct StablehloIotaOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloIotaOptionsT NativeTableType; + typedef StablehloIotaOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_IOTA_DIMENSION = 4 + }; + int64_t iota_dimension() const { + return GetField(VT_IOTA_DIMENSION, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_IOTA_DIMENSION, 8) && + verifier.EndTable(); + } + StablehloIotaOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloIotaOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloIotaOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloIotaOptionsBuilder { + typedef StablehloIotaOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_iota_dimension(int64_t iota_dimension) { + fbb_.AddElement(StablehloIotaOptions::VT_IOTA_DIMENSION, iota_dimension, 0); + } + explicit StablehloIotaOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloIotaOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int64_t iota_dimension = 0) { + StablehloIotaOptionsBuilder builder_(_fbb); + builder_.add_iota_dimension(iota_dimension); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateStablehloIotaOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloIotaOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloCustomCallOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloCustomCallOptions TableType; + std::string call_target_name{}; + bool has_side_effect = false; + std::string backend_config{}; + int32_t api_version = 0; + std::vector called_computations{}; + std::vector custom_attributes{}; +}; + +struct StablehloCustomCallOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloCustomCallOptionsT NativeTableType; + typedef StablehloCustomCallOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CALL_TARGET_NAME = 4, + VT_HAS_SIDE_EFFECT = 6, + VT_BACKEND_CONFIG = 8, + VT_API_VERSION = 10, + VT_CALLED_COMPUTATIONS = 12, + VT_CUSTOM_ATTRIBUTES = 14 + }; + const ::flatbuffers::String *call_target_name() const { + return GetPointer(VT_CALL_TARGET_NAME); + } + bool has_side_effect() const { + return GetField(VT_HAS_SIDE_EFFECT, 0) != 0; + } + const ::flatbuffers::String *backend_config() const { + return GetPointer(VT_BACKEND_CONFIG); + } + int32_t api_version() const { + return GetField(VT_API_VERSION, 0); + } + const ::flatbuffers::Vector *called_computations() const { + return GetPointer *>(VT_CALLED_COMPUTATIONS); + } + const ::flatbuffers::Vector *custom_attributes() const { + return GetPointer *>(VT_CUSTOM_ATTRIBUTES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_CALL_TARGET_NAME) && + verifier.VerifyString(call_target_name()) && + VerifyField(verifier, VT_HAS_SIDE_EFFECT, 1) && + VerifyOffset(verifier, VT_BACKEND_CONFIG) && + verifier.VerifyString(backend_config()) && + VerifyField(verifier, VT_API_VERSION, 4) && + VerifyOffset(verifier, VT_CALLED_COMPUTATIONS) && + verifier.VerifyVector(called_computations()) && + VerifyOffset(verifier, VT_CUSTOM_ATTRIBUTES) && + verifier.VerifyVector(custom_attributes()) && + verifier.EndTable(); + } + StablehloCustomCallOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloCustomCallOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCustomCallOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloCustomCallOptionsBuilder { + typedef StablehloCustomCallOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_call_target_name(::flatbuffers::Offset<::flatbuffers::String> call_target_name) { + fbb_.AddOffset(StablehloCustomCallOptions::VT_CALL_TARGET_NAME, call_target_name); + } + void add_has_side_effect(bool has_side_effect) { + fbb_.AddElement(StablehloCustomCallOptions::VT_HAS_SIDE_EFFECT, static_cast(has_side_effect), 0); + } + void add_backend_config(::flatbuffers::Offset<::flatbuffers::String> backend_config) { + fbb_.AddOffset(StablehloCustomCallOptions::VT_BACKEND_CONFIG, backend_config); + } + void add_api_version(int32_t api_version) { + fbb_.AddElement(StablehloCustomCallOptions::VT_API_VERSION, api_version, 0); + } + void add_called_computations(::flatbuffers::Offset<::flatbuffers::Vector> called_computations) { + fbb_.AddOffset(StablehloCustomCallOptions::VT_CALLED_COMPUTATIONS, called_computations); + } + void add_custom_attributes(::flatbuffers::Offset<::flatbuffers::Vector> custom_attributes) { + fbb_.AddOffset(StablehloCustomCallOptions::VT_CUSTOM_ATTRIBUTES, custom_attributes); + } + explicit StablehloCustomCallOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloCustomCallOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> call_target_name = 0, + bool has_side_effect = false, + ::flatbuffers::Offset<::flatbuffers::String> backend_config = 0, + int32_t api_version = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> called_computations = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> custom_attributes = 0) { + StablehloCustomCallOptionsBuilder builder_(_fbb); + builder_.add_custom_attributes(custom_attributes); + builder_.add_called_computations(called_computations); + builder_.add_api_version(api_version); + builder_.add_backend_config(backend_config); + builder_.add_call_target_name(call_target_name); + builder_.add_has_side_effect(has_side_effect); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloCustomCallOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *call_target_name = nullptr, + bool has_side_effect = false, + const char *backend_config = nullptr, + int32_t api_version = 0, + const std::vector *called_computations = nullptr, + const std::vector *custom_attributes = nullptr) { + auto call_target_name__ = call_target_name ? _fbb.CreateString(call_target_name) : 0; + auto backend_config__ = backend_config ? _fbb.CreateString(backend_config) : 0; + auto called_computations__ = called_computations ? _fbb.CreateVector(*called_computations) : 0; + auto custom_attributes__ = custom_attributes ? _fbb.CreateVector(*custom_attributes) : 0; + return tflite::CreateStablehloCustomCallOptions( + _fbb, + call_target_name__, + has_side_effect, + backend_config__, + api_version, + called_computations__, + custom_attributes__); +} + +::flatbuffers::Offset CreateStablehloCustomCallOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCustomCallOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloReduceOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloReduceOptions TableType; + std::vector dimensions{}; + int32_t body_subgraph_index = 0; +}; + +struct StablehloReduceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloReduceOptionsT NativeTableType; + typedef StablehloReduceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMENSIONS = 4, + VT_BODY_SUBGRAPH_INDEX = 6 + }; + const ::flatbuffers::Vector *dimensions() const { + return GetPointer *>(VT_DIMENSIONS); + } + int32_t body_subgraph_index() const { + return GetField(VT_BODY_SUBGRAPH_INDEX, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIMENSIONS) && + verifier.VerifyVector(dimensions()) && + VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + StablehloReduceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloReduceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloReduceOptionsBuilder { + typedef StablehloReduceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> dimensions) { + fbb_.AddOffset(StablehloReduceOptions::VT_DIMENSIONS, dimensions); + } + void add_body_subgraph_index(int32_t body_subgraph_index) { + fbb_.AddElement(StablehloReduceOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); + } + explicit StablehloReduceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloReduceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> dimensions = 0, + int32_t body_subgraph_index = 0) { + StablehloReduceOptionsBuilder builder_(_fbb); + builder_.add_body_subgraph_index(body_subgraph_index); + builder_.add_dimensions(dimensions); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloReduceOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dimensions = nullptr, + int32_t body_subgraph_index = 0) { + auto dimensions__ = dimensions ? _fbb.CreateVector(*dimensions) : 0; + return tflite::CreateStablehloReduceOptions( + _fbb, + dimensions__, + body_subgraph_index); +} + +::flatbuffers::Offset CreateStablehloReduceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloSliceOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloSliceOptions TableType; + std::vector start_indices{}; + std::vector limit_indices{}; + std::vector strides{}; +}; + +struct StablehloSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloSliceOptionsT NativeTableType; + typedef StablehloSliceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_START_INDICES = 4, + VT_LIMIT_INDICES = 6, + VT_STRIDES = 8 + }; + const ::flatbuffers::Vector *start_indices() const { + return GetPointer *>(VT_START_INDICES); + } + const ::flatbuffers::Vector *limit_indices() const { + return GetPointer *>(VT_LIMIT_INDICES); + } + const ::flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_START_INDICES) && + verifier.VerifyVector(start_indices()) && + VerifyOffset(verifier, VT_LIMIT_INDICES) && + verifier.VerifyVector(limit_indices()) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + verifier.EndTable(); + } + StablehloSliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloSliceOptionsBuilder { + typedef StablehloSliceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_start_indices(::flatbuffers::Offset<::flatbuffers::Vector> start_indices) { + fbb_.AddOffset(StablehloSliceOptions::VT_START_INDICES, start_indices); + } + void add_limit_indices(::flatbuffers::Offset<::flatbuffers::Vector> limit_indices) { + fbb_.AddOffset(StablehloSliceOptions::VT_LIMIT_INDICES, limit_indices); + } + void add_strides(::flatbuffers::Offset<::flatbuffers::Vector> strides) { + fbb_.AddOffset(StablehloSliceOptions::VT_STRIDES, strides); + } + explicit StablehloSliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloSliceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> start_indices = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> limit_indices = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> strides = 0) { + StablehloSliceOptionsBuilder builder_(_fbb); + builder_.add_strides(strides); + builder_.add_limit_indices(limit_indices); + builder_.add_start_indices(start_indices); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloSliceOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *start_indices = nullptr, + const std::vector *limit_indices = nullptr, + const std::vector *strides = nullptr) { + auto start_indices__ = start_indices ? _fbb.CreateVector(*start_indices) : 0; + auto limit_indices__ = limit_indices ? _fbb.CreateVector(*limit_indices) : 0; + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + return tflite::CreateStablehloSliceOptions( + _fbb, + start_indices__, + limit_indices__, + strides__); +} + +::flatbuffers::Offset CreateStablehloSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloConvolutionOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloConvolutionOptions TableType; + std::vector window_strides{}; + std::vector padding{}; + std::vector lhs_dilation{}; + std::vector rhs_dilation{}; + std::vector window_reversal{}; + int64_t input_batch_dimension = 0; + int64_t input_feature_dimension = 0; + std::vector input_spatial_dimensions{}; + int64_t kernel_input_feature_dimension = 0; + int64_t kernel_output_feature_dimension = 0; + std::vector kernel_spatial_dimensions{}; + int64_t output_batch_dimension = 0; + int64_t output_feature_dimension = 0; + std::vector output_spatial_dimensions{}; + int64_t feature_group_count = 0; + int64_t batch_group_count = 0; + std::vector precision_config{}; +}; + +struct StablehloConvolutionOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloConvolutionOptionsT NativeTableType; + typedef StablehloConvolutionOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_WINDOW_STRIDES = 4, + VT_PADDING = 6, + VT_LHS_DILATION = 8, + VT_RHS_DILATION = 10, + VT_WINDOW_REVERSAL = 12, + VT_INPUT_BATCH_DIMENSION = 14, + VT_INPUT_FEATURE_DIMENSION = 16, + VT_INPUT_SPATIAL_DIMENSIONS = 18, + VT_KERNEL_INPUT_FEATURE_DIMENSION = 20, + VT_KERNEL_OUTPUT_FEATURE_DIMENSION = 22, + VT_KERNEL_SPATIAL_DIMENSIONS = 24, + VT_OUTPUT_BATCH_DIMENSION = 26, + VT_OUTPUT_FEATURE_DIMENSION = 28, + VT_OUTPUT_SPATIAL_DIMENSIONS = 30, + VT_FEATURE_GROUP_COUNT = 32, + VT_BATCH_GROUP_COUNT = 34, + VT_PRECISION_CONFIG = 36 + }; + const ::flatbuffers::Vector *window_strides() const { + return GetPointer *>(VT_WINDOW_STRIDES); + } + const ::flatbuffers::Vector *padding() const { + return GetPointer *>(VT_PADDING); + } + const ::flatbuffers::Vector *lhs_dilation() const { + return GetPointer *>(VT_LHS_DILATION); + } + const ::flatbuffers::Vector *rhs_dilation() const { + return GetPointer *>(VT_RHS_DILATION); + } + const ::flatbuffers::Vector *window_reversal() const { + return GetPointer *>(VT_WINDOW_REVERSAL); + } + int64_t input_batch_dimension() const { + return GetField(VT_INPUT_BATCH_DIMENSION, 0); + } + int64_t input_feature_dimension() const { + return GetField(VT_INPUT_FEATURE_DIMENSION, 0); + } + const ::flatbuffers::Vector *input_spatial_dimensions() const { + return GetPointer *>(VT_INPUT_SPATIAL_DIMENSIONS); + } + int64_t kernel_input_feature_dimension() const { + return GetField(VT_KERNEL_INPUT_FEATURE_DIMENSION, 0); + } + int64_t kernel_output_feature_dimension() const { + return GetField(VT_KERNEL_OUTPUT_FEATURE_DIMENSION, 0); + } + const ::flatbuffers::Vector *kernel_spatial_dimensions() const { + return GetPointer *>(VT_KERNEL_SPATIAL_DIMENSIONS); + } + int64_t output_batch_dimension() const { + return GetField(VT_OUTPUT_BATCH_DIMENSION, 0); + } + int64_t output_feature_dimension() const { + return GetField(VT_OUTPUT_FEATURE_DIMENSION, 0); + } + const ::flatbuffers::Vector *output_spatial_dimensions() const { + return GetPointer *>(VT_OUTPUT_SPATIAL_DIMENSIONS); + } + int64_t feature_group_count() const { + return GetField(VT_FEATURE_GROUP_COUNT, 0); + } + int64_t batch_group_count() const { + return GetField(VT_BATCH_GROUP_COUNT, 0); + } + const ::flatbuffers::Vector *precision_config() const { + return GetPointer *>(VT_PRECISION_CONFIG); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_WINDOW_STRIDES) && + verifier.VerifyVector(window_strides()) && + VerifyOffset(verifier, VT_PADDING) && + verifier.VerifyVector(padding()) && + VerifyOffset(verifier, VT_LHS_DILATION) && + verifier.VerifyVector(lhs_dilation()) && + VerifyOffset(verifier, VT_RHS_DILATION) && + verifier.VerifyVector(rhs_dilation()) && + VerifyOffset(verifier, VT_WINDOW_REVERSAL) && + verifier.VerifyVector(window_reversal()) && + VerifyField(verifier, VT_INPUT_BATCH_DIMENSION, 8) && + VerifyField(verifier, VT_INPUT_FEATURE_DIMENSION, 8) && + VerifyOffset(verifier, VT_INPUT_SPATIAL_DIMENSIONS) && + verifier.VerifyVector(input_spatial_dimensions()) && + VerifyField(verifier, VT_KERNEL_INPUT_FEATURE_DIMENSION, 8) && + VerifyField(verifier, VT_KERNEL_OUTPUT_FEATURE_DIMENSION, 8) && + VerifyOffset(verifier, VT_KERNEL_SPATIAL_DIMENSIONS) && + verifier.VerifyVector(kernel_spatial_dimensions()) && + VerifyField(verifier, VT_OUTPUT_BATCH_DIMENSION, 8) && + VerifyField(verifier, VT_OUTPUT_FEATURE_DIMENSION, 8) && + VerifyOffset(verifier, VT_OUTPUT_SPATIAL_DIMENSIONS) && + verifier.VerifyVector(output_spatial_dimensions()) && + VerifyField(verifier, VT_FEATURE_GROUP_COUNT, 8) && + VerifyField(verifier, VT_BATCH_GROUP_COUNT, 8) && + VerifyOffset(verifier, VT_PRECISION_CONFIG) && + verifier.VerifyVector(precision_config()) && + verifier.EndTable(); + } + StablehloConvolutionOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloConvolutionOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConvolutionOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloConvolutionOptionsBuilder { + typedef StablehloConvolutionOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_window_strides(::flatbuffers::Offset<::flatbuffers::Vector> window_strides) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_WINDOW_STRIDES, window_strides); + } + void add_padding(::flatbuffers::Offset<::flatbuffers::Vector> padding) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_PADDING, padding); + } + void add_lhs_dilation(::flatbuffers::Offset<::flatbuffers::Vector> lhs_dilation) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_LHS_DILATION, lhs_dilation); + } + void add_rhs_dilation(::flatbuffers::Offset<::flatbuffers::Vector> rhs_dilation) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_RHS_DILATION, rhs_dilation); + } + void add_window_reversal(::flatbuffers::Offset<::flatbuffers::Vector> window_reversal) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_WINDOW_REVERSAL, window_reversal); + } + void add_input_batch_dimension(int64_t input_batch_dimension) { + fbb_.AddElement(StablehloConvolutionOptions::VT_INPUT_BATCH_DIMENSION, input_batch_dimension, 0); + } + void add_input_feature_dimension(int64_t input_feature_dimension) { + fbb_.AddElement(StablehloConvolutionOptions::VT_INPUT_FEATURE_DIMENSION, input_feature_dimension, 0); + } + void add_input_spatial_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> input_spatial_dimensions) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_INPUT_SPATIAL_DIMENSIONS, input_spatial_dimensions); + } + void add_kernel_input_feature_dimension(int64_t kernel_input_feature_dimension) { + fbb_.AddElement(StablehloConvolutionOptions::VT_KERNEL_INPUT_FEATURE_DIMENSION, kernel_input_feature_dimension, 0); + } + void add_kernel_output_feature_dimension(int64_t kernel_output_feature_dimension) { + fbb_.AddElement(StablehloConvolutionOptions::VT_KERNEL_OUTPUT_FEATURE_DIMENSION, kernel_output_feature_dimension, 0); + } + void add_kernel_spatial_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> kernel_spatial_dimensions) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_KERNEL_SPATIAL_DIMENSIONS, kernel_spatial_dimensions); + } + void add_output_batch_dimension(int64_t output_batch_dimension) { + fbb_.AddElement(StablehloConvolutionOptions::VT_OUTPUT_BATCH_DIMENSION, output_batch_dimension, 0); + } + void add_output_feature_dimension(int64_t output_feature_dimension) { + fbb_.AddElement(StablehloConvolutionOptions::VT_OUTPUT_FEATURE_DIMENSION, output_feature_dimension, 0); + } + void add_output_spatial_dimensions(::flatbuffers::Offset<::flatbuffers::Vector> output_spatial_dimensions) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_OUTPUT_SPATIAL_DIMENSIONS, output_spatial_dimensions); + } + void add_feature_group_count(int64_t feature_group_count) { + fbb_.AddElement(StablehloConvolutionOptions::VT_FEATURE_GROUP_COUNT, feature_group_count, 0); + } + void add_batch_group_count(int64_t batch_group_count) { + fbb_.AddElement(StablehloConvolutionOptions::VT_BATCH_GROUP_COUNT, batch_group_count, 0); + } + void add_precision_config(::flatbuffers::Offset<::flatbuffers::Vector> precision_config) { + fbb_.AddOffset(StablehloConvolutionOptions::VT_PRECISION_CONFIG, precision_config); + } + explicit StablehloConvolutionOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloConvolutionOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> window_strides = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> padding = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> lhs_dilation = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> rhs_dilation = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> window_reversal = 0, + int64_t input_batch_dimension = 0, + int64_t input_feature_dimension = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> input_spatial_dimensions = 0, + int64_t kernel_input_feature_dimension = 0, + int64_t kernel_output_feature_dimension = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> kernel_spatial_dimensions = 0, + int64_t output_batch_dimension = 0, + int64_t output_feature_dimension = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> output_spatial_dimensions = 0, + int64_t feature_group_count = 0, + int64_t batch_group_count = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> precision_config = 0) { + StablehloConvolutionOptionsBuilder builder_(_fbb); + builder_.add_batch_group_count(batch_group_count); + builder_.add_feature_group_count(feature_group_count); + builder_.add_output_feature_dimension(output_feature_dimension); + builder_.add_output_batch_dimension(output_batch_dimension); + builder_.add_kernel_output_feature_dimension(kernel_output_feature_dimension); + builder_.add_kernel_input_feature_dimension(kernel_input_feature_dimension); + builder_.add_input_feature_dimension(input_feature_dimension); + builder_.add_input_batch_dimension(input_batch_dimension); + builder_.add_precision_config(precision_config); + builder_.add_output_spatial_dimensions(output_spatial_dimensions); + builder_.add_kernel_spatial_dimensions(kernel_spatial_dimensions); + builder_.add_input_spatial_dimensions(input_spatial_dimensions); + builder_.add_window_reversal(window_reversal); + builder_.add_rhs_dilation(rhs_dilation); + builder_.add_lhs_dilation(lhs_dilation); + builder_.add_padding(padding); + builder_.add_window_strides(window_strides); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloConvolutionOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *window_strides = nullptr, + const std::vector *padding = nullptr, + const std::vector *lhs_dilation = nullptr, + const std::vector *rhs_dilation = nullptr, + const std::vector *window_reversal = nullptr, + int64_t input_batch_dimension = 0, + int64_t input_feature_dimension = 0, + const std::vector *input_spatial_dimensions = nullptr, + int64_t kernel_input_feature_dimension = 0, + int64_t kernel_output_feature_dimension = 0, + const std::vector *kernel_spatial_dimensions = nullptr, + int64_t output_batch_dimension = 0, + int64_t output_feature_dimension = 0, + const std::vector *output_spatial_dimensions = nullptr, + int64_t feature_group_count = 0, + int64_t batch_group_count = 0, + const std::vector *precision_config = nullptr) { + auto window_strides__ = window_strides ? _fbb.CreateVector(*window_strides) : 0; + auto padding__ = padding ? _fbb.CreateVector(*padding) : 0; + auto lhs_dilation__ = lhs_dilation ? _fbb.CreateVector(*lhs_dilation) : 0; + auto rhs_dilation__ = rhs_dilation ? _fbb.CreateVector(*rhs_dilation) : 0; + auto window_reversal__ = window_reversal ? _fbb.CreateVector(*window_reversal) : 0; + auto input_spatial_dimensions__ = input_spatial_dimensions ? _fbb.CreateVector(*input_spatial_dimensions) : 0; + auto kernel_spatial_dimensions__ = kernel_spatial_dimensions ? _fbb.CreateVector(*kernel_spatial_dimensions) : 0; + auto output_spatial_dimensions__ = output_spatial_dimensions ? _fbb.CreateVector(*output_spatial_dimensions) : 0; + auto precision_config__ = precision_config ? _fbb.CreateVector(*precision_config) : 0; + return tflite::CreateStablehloConvolutionOptions( + _fbb, + window_strides__, + padding__, + lhs_dilation__, + rhs_dilation__, + window_reversal__, + input_batch_dimension, + input_feature_dimension, + input_spatial_dimensions__, + kernel_input_feature_dimension, + kernel_output_feature_dimension, + kernel_spatial_dimensions__, + output_batch_dimension, + output_feature_dimension, + output_spatial_dimensions__, + feature_group_count, + batch_group_count, + precision_config__); +} + +::flatbuffers::Offset CreateStablehloConvolutionOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConvolutionOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloScatterOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloScatterOptions TableType; + bool indices_are_sorted = false; + std::vector update_window_dims{}; + std::vector inserted_window_dims{}; + std::vector scatter_dims_to_operand_dims{}; + int64_t index_vector_dim = 0; + bool unique_indices = false; + int32_t update_computation_subgraph_index = 0; +}; + +struct StablehloScatterOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloScatterOptionsT NativeTableType; + typedef StablehloScatterOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INDICES_ARE_SORTED = 4, + VT_UPDATE_WINDOW_DIMS = 6, + VT_INSERTED_WINDOW_DIMS = 8, + VT_SCATTER_DIMS_TO_OPERAND_DIMS = 10, + VT_INDEX_VECTOR_DIM = 12, + VT_UNIQUE_INDICES = 14, + VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX = 16 + }; + bool indices_are_sorted() const { + return GetField(VT_INDICES_ARE_SORTED, 0) != 0; + } + const ::flatbuffers::Vector *update_window_dims() const { + return GetPointer *>(VT_UPDATE_WINDOW_DIMS); + } + const ::flatbuffers::Vector *inserted_window_dims() const { + return GetPointer *>(VT_INSERTED_WINDOW_DIMS); + } + const ::flatbuffers::Vector *scatter_dims_to_operand_dims() const { + return GetPointer *>(VT_SCATTER_DIMS_TO_OPERAND_DIMS); + } + int64_t index_vector_dim() const { + return GetField(VT_INDEX_VECTOR_DIM, 0); + } + bool unique_indices() const { + return GetField(VT_UNIQUE_INDICES, 0) != 0; + } + int32_t update_computation_subgraph_index() const { + return GetField(VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_INDICES_ARE_SORTED, 1) && + VerifyOffset(verifier, VT_UPDATE_WINDOW_DIMS) && + verifier.VerifyVector(update_window_dims()) && + VerifyOffset(verifier, VT_INSERTED_WINDOW_DIMS) && + verifier.VerifyVector(inserted_window_dims()) && + VerifyOffset(verifier, VT_SCATTER_DIMS_TO_OPERAND_DIMS) && + verifier.VerifyVector(scatter_dims_to_operand_dims()) && + VerifyField(verifier, VT_INDEX_VECTOR_DIM, 8) && + VerifyField(verifier, VT_UNIQUE_INDICES, 1) && + VerifyField(verifier, VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + StablehloScatterOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloScatterOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloScatterOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloScatterOptionsBuilder { + typedef StablehloScatterOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_indices_are_sorted(bool indices_are_sorted) { + fbb_.AddElement(StablehloScatterOptions::VT_INDICES_ARE_SORTED, static_cast(indices_are_sorted), 0); + } + void add_update_window_dims(::flatbuffers::Offset<::flatbuffers::Vector> update_window_dims) { + fbb_.AddOffset(StablehloScatterOptions::VT_UPDATE_WINDOW_DIMS, update_window_dims); + } + void add_inserted_window_dims(::flatbuffers::Offset<::flatbuffers::Vector> inserted_window_dims) { + fbb_.AddOffset(StablehloScatterOptions::VT_INSERTED_WINDOW_DIMS, inserted_window_dims); + } + void add_scatter_dims_to_operand_dims(::flatbuffers::Offset<::flatbuffers::Vector> scatter_dims_to_operand_dims) { + fbb_.AddOffset(StablehloScatterOptions::VT_SCATTER_DIMS_TO_OPERAND_DIMS, scatter_dims_to_operand_dims); + } + void add_index_vector_dim(int64_t index_vector_dim) { + fbb_.AddElement(StablehloScatterOptions::VT_INDEX_VECTOR_DIM, index_vector_dim, 0); + } + void add_unique_indices(bool unique_indices) { + fbb_.AddElement(StablehloScatterOptions::VT_UNIQUE_INDICES, static_cast(unique_indices), 0); + } + void add_update_computation_subgraph_index(int32_t update_computation_subgraph_index) { + fbb_.AddElement(StablehloScatterOptions::VT_UPDATE_COMPUTATION_SUBGRAPH_INDEX, update_computation_subgraph_index, 0); + } + explicit StablehloScatterOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloScatterOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool indices_are_sorted = false, + ::flatbuffers::Offset<::flatbuffers::Vector> update_window_dims = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> inserted_window_dims = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> scatter_dims_to_operand_dims = 0, + int64_t index_vector_dim = 0, + bool unique_indices = false, + int32_t update_computation_subgraph_index = 0) { + StablehloScatterOptionsBuilder builder_(_fbb); + builder_.add_index_vector_dim(index_vector_dim); + builder_.add_update_computation_subgraph_index(update_computation_subgraph_index); + builder_.add_scatter_dims_to_operand_dims(scatter_dims_to_operand_dims); + builder_.add_inserted_window_dims(inserted_window_dims); + builder_.add_update_window_dims(update_window_dims); + builder_.add_unique_indices(unique_indices); + builder_.add_indices_are_sorted(indices_are_sorted); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloScatterOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool indices_are_sorted = false, + const std::vector *update_window_dims = nullptr, + const std::vector *inserted_window_dims = nullptr, + const std::vector *scatter_dims_to_operand_dims = nullptr, + int64_t index_vector_dim = 0, + bool unique_indices = false, + int32_t update_computation_subgraph_index = 0) { + auto update_window_dims__ = update_window_dims ? _fbb.CreateVector(*update_window_dims) : 0; + auto inserted_window_dims__ = inserted_window_dims ? _fbb.CreateVector(*inserted_window_dims) : 0; + auto scatter_dims_to_operand_dims__ = scatter_dims_to_operand_dims ? _fbb.CreateVector(*scatter_dims_to_operand_dims) : 0; + return tflite::CreateStablehloScatterOptions( + _fbb, + indices_are_sorted, + update_window_dims__, + inserted_window_dims__, + scatter_dims_to_operand_dims__, + index_vector_dim, + unique_indices, + update_computation_subgraph_index); +} + +::flatbuffers::Offset CreateStablehloScatterOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloScatterOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloCaseOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloCaseOptions TableType; + std::vector branch_subgraph_indices{}; +}; + +struct StablehloCaseOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloCaseOptionsT NativeTableType; + typedef StablehloCaseOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BRANCH_SUBGRAPH_INDICES = 4 + }; + const ::flatbuffers::Vector *branch_subgraph_indices() const { + return GetPointer *>(VT_BRANCH_SUBGRAPH_INDICES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BRANCH_SUBGRAPH_INDICES) && + verifier.VerifyVector(branch_subgraph_indices()) && + verifier.EndTable(); + } + StablehloCaseOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloCaseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCaseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloCaseOptionsBuilder { + typedef StablehloCaseOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_branch_subgraph_indices(::flatbuffers::Offset<::flatbuffers::Vector> branch_subgraph_indices) { + fbb_.AddOffset(StablehloCaseOptions::VT_BRANCH_SUBGRAPH_INDICES, branch_subgraph_indices); + } + explicit StablehloCaseOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloCaseOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> branch_subgraph_indices = 0) { + StablehloCaseOptionsBuilder builder_(_fbb); + builder_.add_branch_subgraph_indices(branch_subgraph_indices); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStablehloCaseOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *branch_subgraph_indices = nullptr) { + auto branch_subgraph_indices__ = branch_subgraph_indices ? _fbb.CreateVector(*branch_subgraph_indices) : 0; + return tflite::CreateStablehloCaseOptions( + _fbb, + branch_subgraph_indices__); +} + +::flatbuffers::Offset CreateStablehloCaseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCaseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StablehloRngBitGeneratorOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloRngBitGeneratorOptions TableType; + tflite::RngAlgorithm algorithm = tflite::RngAlgorithm_DEFAULT; +}; + +struct StablehloRngBitGeneratorOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloRngBitGeneratorOptionsT NativeTableType; + typedef StablehloRngBitGeneratorOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALGORITHM = 4 + }; + tflite::RngAlgorithm algorithm() const { + return static_cast(GetField(VT_ALGORITHM, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALGORITHM, 1) && + verifier.EndTable(); + } + StablehloRngBitGeneratorOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloRngBitGeneratorOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloRngBitGeneratorOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StablehloRngBitGeneratorOptionsBuilder { + typedef StablehloRngBitGeneratorOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_algorithm(tflite::RngAlgorithm algorithm) { + fbb_.AddElement(StablehloRngBitGeneratorOptions::VT_ALGORITHM, static_cast(algorithm), 0); + } + explicit StablehloRngBitGeneratorOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStablehloRngBitGeneratorOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::RngAlgorithm algorithm = tflite::RngAlgorithm_DEFAULT) { + StablehloRngBitGeneratorOptionsBuilder builder_(_fbb); + builder_.add_algorithm(algorithm); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateStablehloRngBitGeneratorOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloRngBitGeneratorOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Conv2DOptionsT : public ::flatbuffers::NativeTable { + typedef Conv2DOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_w = 0; + int32_t stride_h = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + int32_t dilation_w_factor = 1; + int32_t dilation_h_factor = 1; + tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32; +}; + +struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef Conv2DOptionsT NativeTableType; + typedef Conv2DOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FUSED_ACTIVATION_FUNCTION = 10, + VT_DILATION_W_FACTOR = 12, + VT_DILATION_H_FACTOR = 14, + VT_QUANTIZED_BIAS_TYPE = 16 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_w_factor() const { + return GetField(VT_DILATION_W_FACTOR, 1); + } + int32_t dilation_h_factor() const { + return GetField(VT_DILATION_H_FACTOR, 1); + } + tflite::TensorType quantized_bias_type() const { + return static_cast(GetField(VT_QUANTIZED_BIAS_TYPE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && + VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && + VerifyField(verifier, VT_QUANTIZED_BIAS_TYPE, 1) && + verifier.EndTable(); + } + Conv2DOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Conv2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Conv2DOptionsBuilder { + typedef Conv2DOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(Conv2DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(Conv2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(Conv2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_dilation_w_factor(int32_t dilation_w_factor) { + fbb_.AddElement(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) { + fbb_.AddElement(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + void add_quantized_bias_type(tflite::TensorType quantized_bias_type) { + fbb_.AddElement(Conv2DOptions::VT_QUANTIZED_BIAS_TYPE, static_cast(quantized_bias_type), 0); + } + explicit Conv2DOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateConv2DOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_w = 0, + int32_t stride_h = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + int32_t dilation_w_factor = 1, + int32_t dilation_h_factor = 1, + tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32) { + Conv2DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_quantized_bias_type(quantized_bias_type); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateConv2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Conv3DOptionsT : public ::flatbuffers::NativeTable { + typedef Conv3DOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_d = 0; + int32_t stride_w = 0; + int32_t stride_h = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + int32_t dilation_d_factor = 1; + int32_t dilation_w_factor = 1; + int32_t dilation_h_factor = 1; +}; + +struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef Conv3DOptionsT NativeTableType; + typedef Conv3DOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_D = 6, + VT_STRIDE_W = 8, + VT_STRIDE_H = 10, + VT_FUSED_ACTIVATION_FUNCTION = 12, + VT_DILATION_D_FACTOR = 14, + VT_DILATION_W_FACTOR = 16, + VT_DILATION_H_FACTOR = 18 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_d() const { + return GetField(VT_STRIDE_D, 0); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_d_factor() const { + return GetField(VT_DILATION_D_FACTOR, 1); + } + int32_t dilation_w_factor() const { + return GetField(VT_DILATION_W_FACTOR, 1); + } + int32_t dilation_h_factor() const { + return GetField(VT_DILATION_H_FACTOR, 1); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_D, 4) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_DILATION_D_FACTOR, 4) && + VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && + VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && + verifier.EndTable(); + } + Conv3DOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Conv3DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Conv3DOptionsBuilder { + typedef Conv3DOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(Conv3DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_d(int32_t stride_d) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_D, stride_d, 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(Conv3DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_dilation_d_factor(int32_t dilation_d_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_D_FACTOR, dilation_d_factor, 1); + } + void add_dilation_w_factor(int32_t dilation_w_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit Conv3DOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateConv3DOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_d = 0, + int32_t stride_w = 0, + int32_t stride_h = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + int32_t dilation_d_factor = 1, + int32_t dilation_w_factor = 1, + int32_t dilation_h_factor = 1) { + Conv3DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_dilation_d_factor(dilation_d_factor); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_stride_d(stride_d); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateConv3DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Pool2DOptionsT : public ::flatbuffers::NativeTable { + typedef Pool2DOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_w = 0; + int32_t stride_h = 0; + int32_t filter_width = 0; + int32_t filter_height = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef Pool2DOptionsT NativeTableType; + typedef Pool2DOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FILTER_WIDTH = 10, + VT_FILTER_HEIGHT = 12, + VT_FUSED_ACTIVATION_FUNCTION = 14 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + int32_t filter_width() const { + return GetField(VT_FILTER_WIDTH, 0); + } + int32_t filter_height() const { + return GetField(VT_FILTER_HEIGHT, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_FILTER_WIDTH, 4) && + VerifyField(verifier, VT_FILTER_HEIGHT, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); + } + Pool2DOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Pool2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Pool2DOptionsBuilder { + typedef Pool2DOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(Pool2DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(Pool2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(Pool2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_filter_width(int32_t filter_width) { + fbb_.AddElement(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0); + } + void add_filter_height(int32_t filter_height) { + fbb_.AddElement(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + explicit Pool2DOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreatePool2DOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_w = 0, + int32_t stride_h = 0, + int32_t filter_width = 0, + int32_t filter_height = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + Pool2DOptionsBuilder builder_(_fbb); + builder_.add_filter_height(filter_height); + builder_.add_filter_width(filter_width); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +::flatbuffers::Offset CreatePool2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DepthwiseConv2DOptionsT : public ::flatbuffers::NativeTable { + typedef DepthwiseConv2DOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_w = 0; + int32_t stride_h = 0; + int32_t depth_multiplier = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + int32_t dilation_w_factor = 1; + int32_t dilation_h_factor = 1; +}; + +struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DepthwiseConv2DOptionsT NativeTableType; + typedef DepthwiseConv2DOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_DEPTH_MULTIPLIER = 10, + VT_FUSED_ACTIVATION_FUNCTION = 12, + VT_DILATION_W_FACTOR = 14, + VT_DILATION_H_FACTOR = 16 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + int32_t depth_multiplier() const { + return GetField(VT_DEPTH_MULTIPLIER, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_w_factor() const { + return GetField(VT_DILATION_W_FACTOR, 1); + } + int32_t dilation_h_factor() const { + return GetField(VT_DILATION_H_FACTOR, 1); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_DEPTH_MULTIPLIER, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && + VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && + verifier.EndTable(); + } + DepthwiseConv2DOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DepthwiseConv2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DepthwiseConv2DOptionsBuilder { + typedef DepthwiseConv2DOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_depth_multiplier(int32_t depth_multiplier) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_dilation_w_factor(int32_t dilation_w_factor) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit DepthwiseConv2DOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDepthwiseConv2DOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_w = 0, + int32_t stride_h = 0, + int32_t depth_multiplier = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + int32_t dilation_w_factor = 1, + int32_t dilation_h_factor = 1) { + DepthwiseConv2DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_depth_multiplier(depth_multiplier); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateDepthwiseConv2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ConcatEmbeddingsOptionsT : public ::flatbuffers::NativeTable { + typedef ConcatEmbeddingsOptions TableType; + int32_t num_channels = 0; + std::vector num_columns_per_channel{}; + std::vector embedding_dim_per_channel{}; +}; + +struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ConcatEmbeddingsOptionsT NativeTableType; + typedef ConcatEmbeddingsOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_CHANNELS = 4, + VT_NUM_COLUMNS_PER_CHANNEL = 6, + VT_EMBEDDING_DIM_PER_CHANNEL = 8 + }; + int32_t num_channels() const { + return GetField(VT_NUM_CHANNELS, 0); + } + const ::flatbuffers::Vector *num_columns_per_channel() const { + return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); + } + const ::flatbuffers::Vector *embedding_dim_per_channel() const { + return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_CHANNELS, 4) && + VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) && + verifier.VerifyVector(num_columns_per_channel()) && + VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) && + verifier.VerifyVector(embedding_dim_per_channel()) && + verifier.EndTable(); + } + ConcatEmbeddingsOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ConcatEmbeddingsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ConcatEmbeddingsOptionsBuilder { + typedef ConcatEmbeddingsOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_num_channels(int32_t num_channels) { + fbb_.AddElement(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0); + } + void add_num_columns_per_channel(::flatbuffers::Offset<::flatbuffers::Vector> num_columns_per_channel) { + fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel); + } + void add_embedding_dim_per_channel(::flatbuffers::Offset<::flatbuffers::Vector> embedding_dim_per_channel) { + fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, embedding_dim_per_channel); + } + explicit ConcatEmbeddingsOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateConcatEmbeddingsOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_channels = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> num_columns_per_channel = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> embedding_dim_per_channel = 0) { + ConcatEmbeddingsOptionsBuilder builder_(_fbb); + builder_.add_embedding_dim_per_channel(embedding_dim_per_channel); + builder_.add_num_columns_per_channel(num_columns_per_channel); + builder_.add_num_channels(num_channels); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateConcatEmbeddingsOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_channels = 0, + const std::vector *num_columns_per_channel = nullptr, + const std::vector *embedding_dim_per_channel = nullptr) { + auto num_columns_per_channel__ = num_columns_per_channel ? _fbb.CreateVector(*num_columns_per_channel) : 0; + auto embedding_dim_per_channel__ = embedding_dim_per_channel ? _fbb.CreateVector(*embedding_dim_per_channel) : 0; + return tflite::CreateConcatEmbeddingsOptions( + _fbb, + num_channels, + num_columns_per_channel__, + embedding_dim_per_channel__); +} + +::flatbuffers::Offset CreateConcatEmbeddingsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LSHProjectionOptionsT : public ::flatbuffers::NativeTable { + typedef LSHProjectionOptions TableType; + tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN; +}; + +struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LSHProjectionOptionsT NativeTableType; + typedef LSHProjectionOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4 + }; + tflite::LSHProjectionType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE, 1) && + verifier.EndTable(); + } + LSHProjectionOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LSHProjectionOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LSHProjectionOptionsBuilder { + typedef LSHProjectionOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_type(tflite::LSHProjectionType type) { + fbb_.AddElement(LSHProjectionOptions::VT_TYPE, static_cast(type), 0); + } + explicit LSHProjectionOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLSHProjectionOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN) { + LSHProjectionOptionsBuilder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateLSHProjectionOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SVDFOptionsT : public ::flatbuffers::NativeTable { + typedef SVDFOptions TableType; + int32_t rank = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool asymmetric_quantize_inputs = false; +}; + +struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SVDFOptionsT NativeTableType; + typedef SVDFOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_RANK = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + int32_t rank() const { + return GetField(VT_RANK, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_RANK, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + SVDFOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SVDFOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SVDFOptionsBuilder { + typedef SVDFOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_rank(int32_t rank) { + fbb_.AddElement(SVDFOptions::VT_RANK, rank, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit SVDFOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSVDFOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t rank = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) { + SVDFOptionsBuilder builder_(_fbb); + builder_.add_rank(rank); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSVDFOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RNNOptionsT : public ::flatbuffers::NativeTable { + typedef RNNOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool asymmetric_quantize_inputs = false; +}; + +struct RNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef RNNOptionsT NativeTableType; + typedef RNNOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 6 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + RNNOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RNNOptionsBuilder { + typedef RNNOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit RNNOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateRNNOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) { + RNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SequenceRNNOptionsT : public ::flatbuffers::NativeTable { + typedef SequenceRNNOptions TableType; + bool time_major = false; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool asymmetric_quantize_inputs = false; +}; + +struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SequenceRNNOptionsT NativeTableType; + typedef SequenceRNNOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TIME_MAJOR = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + bool time_major() const { + return GetField(VT_TIME_MAJOR, 0) != 0; + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TIME_MAJOR, 1) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + SequenceRNNOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SequenceRNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SequenceRNNOptionsBuilder { + typedef SequenceRNNOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_time_major(bool time_major) { + fbb_.AddElement(SequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit SequenceRNNOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSequenceRNNOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool time_major = false, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) { + SequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_time_major(time_major); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSequenceRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BidirectionalSequenceRNNOptionsT : public ::flatbuffers::NativeTable { + typedef BidirectionalSequenceRNNOptions TableType; + bool time_major = false; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool merge_outputs = false; + bool asymmetric_quantize_inputs = false; +}; + +struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BidirectionalSequenceRNNOptionsT NativeTableType; + typedef BidirectionalSequenceRNNOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TIME_MAJOR = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_MERGE_OUTPUTS = 8, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 + }; + bool time_major() const { + return GetField(VT_TIME_MAJOR, 0) != 0; + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool merge_outputs() const { + return GetField(VT_MERGE_OUTPUTS, 0) != 0; + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TIME_MAJOR, 1) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_MERGE_OUTPUTS, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + BidirectionalSequenceRNNOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BidirectionalSequenceRNNOptionsBuilder { + typedef BidirectionalSequenceRNNOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_time_major(bool time_major) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_merge_outputs(bool merge_outputs) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit BidirectionalSequenceRNNOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBidirectionalSequenceRNNOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool time_major = false, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool merge_outputs = false, + bool asymmetric_quantize_inputs = false) { + BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_time_major(time_major); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FullyConnectedOptionsT : public ::flatbuffers::NativeTable { + typedef FullyConnectedOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT; + bool keep_num_dims = false; + bool asymmetric_quantize_inputs = false; + tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32; +}; + +struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef FullyConnectedOptionsT NativeTableType; + typedef FullyConnectedOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_WEIGHTS_FORMAT = 6, + VT_KEEP_NUM_DIMS = 8, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 10, + VT_QUANTIZED_BIAS_TYPE = 12 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + tflite::FullyConnectedOptionsWeightsFormat weights_format() const { + return static_cast(GetField(VT_WEIGHTS_FORMAT, 0)); + } + bool keep_num_dims() const { + return GetField(VT_KEEP_NUM_DIMS, 0) != 0; + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + tflite::TensorType quantized_bias_type() const { + return static_cast(GetField(VT_QUANTIZED_BIAS_TYPE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_WEIGHTS_FORMAT, 1) && + VerifyField(verifier, VT_KEEP_NUM_DIMS, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + VerifyField(verifier, VT_QUANTIZED_BIAS_TYPE, 1) && + verifier.EndTable(); + } + FullyConnectedOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FullyConnectedOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FullyConnectedOptionsBuilder { + typedef FullyConnectedOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_weights_format(tflite::FullyConnectedOptionsWeightsFormat weights_format) { + fbb_.AddElement(FullyConnectedOptions::VT_WEIGHTS_FORMAT, static_cast(weights_format), 0); + } + void add_keep_num_dims(bool keep_num_dims) { + fbb_.AddElement(FullyConnectedOptions::VT_KEEP_NUM_DIMS, static_cast(keep_num_dims), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + void add_quantized_bias_type(tflite::TensorType quantized_bias_type) { + fbb_.AddElement(FullyConnectedOptions::VT_QUANTIZED_BIAS_TYPE, static_cast(quantized_bias_type), 0); + } + explicit FullyConnectedOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateFullyConnectedOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT, + bool keep_num_dims = false, + bool asymmetric_quantize_inputs = false, + tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32) { + FullyConnectedOptionsBuilder builder_(_fbb); + builder_.add_quantized_bias_type(quantized_bias_type); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_keep_num_dims(keep_num_dims); + builder_.add_weights_format(weights_format); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateFullyConnectedOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SoftmaxOptionsT : public ::flatbuffers::NativeTable { + typedef SoftmaxOptions TableType; + float beta = 0.0f; +}; + +struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SoftmaxOptionsT NativeTableType; + typedef SoftmaxOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BETA = 4 + }; + float beta() const { + return GetField(VT_BETA, 0.0f); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BETA, 4) && + verifier.EndTable(); + } + SoftmaxOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SoftmaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SoftmaxOptionsBuilder { + typedef SoftmaxOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_beta(float beta) { + fbb_.AddElement(SoftmaxOptions::VT_BETA, beta, 0.0f); + } + explicit SoftmaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSoftmaxOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + float beta = 0.0f) { + SoftmaxOptionsBuilder builder_(_fbb); + builder_.add_beta(beta); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSoftmaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ConcatenationOptionsT : public ::flatbuffers::NativeTable { + typedef ConcatenationOptions TableType; + int32_t axis = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ConcatenationOptionsT NativeTableType; + typedef ConcatenationOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); + } + ConcatenationOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ConcatenationOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ConcatenationOptionsBuilder { + typedef ConcatenationOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(ConcatenationOptions::VT_AXIS, axis, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + explicit ConcatenationOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateConcatenationOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + ConcatenationOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateConcatenationOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AddOptionsT : public ::flatbuffers::NativeTable { + typedef AddOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool pot_scale_int16 = true; +}; + +struct AddOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef AddOptionsT NativeTableType; + typedef AddOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_POT_SCALE_INT16 = 6 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool pot_scale_int16() const { + return GetField(VT_POT_SCALE_INT16, 1) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_POT_SCALE_INT16, 1) && + verifier.EndTable(); + } + AddOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AddOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AddOptionsBuilder { + typedef AddOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_pot_scale_int16(bool pot_scale_int16) { + fbb_.AddElement(AddOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); + } + explicit AddOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateAddOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool pot_scale_int16 = true) { + AddOptionsBuilder builder_(_fbb); + builder_.add_pot_scale_int16(pot_scale_int16); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateAddOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MulOptionsT : public ::flatbuffers::NativeTable { + typedef MulOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct MulOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef MulOptionsT NativeTableType; + typedef MulOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() { - return type == BuiltinOptions_SpaceToDepthOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); } - const tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() const { - return type == BuiltinOptions_SpaceToDepthOptions ? - reinterpret_cast(value) : nullptr; + MulOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MulOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MulOptionsBuilder { + typedef MulOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } - tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() { - return type == BuiltinOptions_EmbeddingLookupSparseOptions ? - reinterpret_cast(value) : nullptr; + explicit MulOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() const { - return type == BuiltinOptions_EmbeddingLookupSparseOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::MulOptionsT *AsMulOptions() { - return type == BuiltinOptions_MulOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateMulOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + MulOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateMulOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct L2NormOptionsT : public ::flatbuffers::NativeTable { + typedef L2NormOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef L2NormOptionsT NativeTableType; + typedef L2NormOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - const tflite::MulOptionsT *AsMulOptions() const { - return type == BuiltinOptions_MulOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); } - tflite::PadOptionsT *AsPadOptions() { - return type == BuiltinOptions_PadOptions ? - reinterpret_cast(value) : nullptr; + L2NormOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(L2NormOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct L2NormOptionsBuilder { + typedef L2NormOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } - const tflite::PadOptionsT *AsPadOptions() const { - return type == BuiltinOptions_PadOptions ? - reinterpret_cast(value) : nullptr; + explicit L2NormOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::GatherOptionsT *AsGatherOptions() { - return type == BuiltinOptions_GatherOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::GatherOptionsT *AsGatherOptions() const { - return type == BuiltinOptions_GatherOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateL2NormOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + L2NormOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateL2NormOptions(::flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LocalResponseNormalizationOptionsT : public ::flatbuffers::NativeTable { + typedef LocalResponseNormalizationOptions TableType; + int32_t radius = 0; + float bias = 0.0f; + float alpha = 0.0f; + float beta = 0.0f; +}; + +struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LocalResponseNormalizationOptionsT NativeTableType; + typedef LocalResponseNormalizationOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_RADIUS = 4, + VT_BIAS = 6, + VT_ALPHA = 8, + VT_BETA = 10 + }; + int32_t radius() const { + return GetField(VT_RADIUS, 0); } - tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() { - return type == BuiltinOptions_BatchToSpaceNDOptions ? - reinterpret_cast(value) : nullptr; + float bias() const { + return GetField(VT_BIAS, 0.0f); } - const tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() const { - return type == BuiltinOptions_BatchToSpaceNDOptions ? - reinterpret_cast(value) : nullptr; + float alpha() const { + return GetField(VT_ALPHA, 0.0f); } - tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() { - return type == BuiltinOptions_SpaceToBatchNDOptions ? - reinterpret_cast(value) : nullptr; + float beta() const { + return GetField(VT_BETA, 0.0f); } - const tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() const { - return type == BuiltinOptions_SpaceToBatchNDOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_RADIUS, 4) && + VerifyField(verifier, VT_BIAS, 4) && + VerifyField(verifier, VT_ALPHA, 4) && + VerifyField(verifier, VT_BETA, 4) && + verifier.EndTable(); } - tflite::TransposeOptionsT *AsTransposeOptions() { - return type == BuiltinOptions_TransposeOptions ? - reinterpret_cast(value) : nullptr; + LocalResponseNormalizationOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LocalResponseNormalizationOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LocalResponseNormalizationOptionsBuilder { + typedef LocalResponseNormalizationOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_radius(int32_t radius) { + fbb_.AddElement(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0); } - const tflite::TransposeOptionsT *AsTransposeOptions() const { - return type == BuiltinOptions_TransposeOptions ? - reinterpret_cast(value) : nullptr; + void add_bias(float bias) { + fbb_.AddElement(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f); + } + void add_alpha(float alpha) { + fbb_.AddElement(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f); + } + void add_beta(float beta) { + fbb_.AddElement(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f); + } + explicit LocalResponseNormalizationOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLocalResponseNormalizationOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t radius = 0, + float bias = 0.0f, + float alpha = 0.0f, + float beta = 0.0f) { + LocalResponseNormalizationOptionsBuilder builder_(_fbb); + builder_.add_beta(beta); + builder_.add_alpha(alpha); + builder_.add_bias(bias); + builder_.add_radius(radius); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateLocalResponseNormalizationOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LSTMOptionsT : public ::flatbuffers::NativeTable { + typedef LSTMOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + float cell_clip = 0.0f; + float proj_clip = 0.0f; + tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL; + bool asymmetric_quantize_inputs = false; +}; + +struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LSTMOptionsT NativeTableType; + typedef LSTMOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_KERNEL_TYPE = 10, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { + return GetField(VT_CELL_CLIP, 0.0f); + } + float proj_clip() const { + return GetField(VT_PROJ_CLIP, 0.0f); + } + tflite::LSTMKernelType kernel_type() const { + return static_cast(GetField(VT_KERNEL_TYPE, 0)); + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_CELL_CLIP, 4) && + VerifyField(verifier, VT_PROJ_CLIP, 4) && + VerifyField(verifier, VT_KERNEL_TYPE, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + LSTMOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LSTMOptionsBuilder { + typedef LSTMOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) { + fbb_.AddElement(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) { + fbb_.AddElement(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_kernel_type(tflite::LSTMKernelType kernel_type) { + fbb_.AddElement(LSTMOptions::VT_KERNEL_TYPE, static_cast(kernel_type), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit LSTMOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateLSTMOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + float cell_clip = 0.0f, + float proj_clip = 0.0f, + tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL, + bool asymmetric_quantize_inputs = false) { + LSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_kernel_type(kernel_type); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UnidirectionalSequenceLSTMOptionsT : public ::flatbuffers::NativeTable { + typedef UnidirectionalSequenceLSTMOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + float cell_clip = 0.0f; + float proj_clip = 0.0f; + bool time_major = false; + bool asymmetric_quantize_inputs = false; + bool diagonal_recurrent_tensors = false; +}; + +struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef UnidirectionalSequenceLSTMOptionsT NativeTableType; + typedef UnidirectionalSequenceLSTMOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_TIME_MAJOR = 10, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 12, + VT_DIAGONAL_RECURRENT_TENSORS = 14 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { + return GetField(VT_CELL_CLIP, 0.0f); + } + float proj_clip() const { + return GetField(VT_PROJ_CLIP, 0.0f); + } + bool time_major() const { + return GetField(VT_TIME_MAJOR, 0) != 0; + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool diagonal_recurrent_tensors() const { + return GetField(VT_DIAGONAL_RECURRENT_TENSORS, 0) != 0; + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_CELL_CLIP, 4) && + VerifyField(verifier, VT_PROJ_CLIP, 4) && + VerifyField(verifier, VT_TIME_MAJOR, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + VerifyField(verifier, VT_DIAGONAL_RECURRENT_TENSORS, 1) && + verifier.EndTable(); + } + UnidirectionalSequenceLSTMOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnidirectionalSequenceLSTMOptionsBuilder { + typedef UnidirectionalSequenceLSTMOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } - tflite::ReducerOptionsT *AsReducerOptions() { - return type == BuiltinOptions_ReducerOptions ? - reinterpret_cast(value) : nullptr; + void add_cell_clip(float cell_clip) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); } - const tflite::ReducerOptionsT *AsReducerOptions() const { - return type == BuiltinOptions_ReducerOptions ? - reinterpret_cast(value) : nullptr; + void add_proj_clip(float proj_clip) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); } - tflite::SubOptionsT *AsSubOptions() { - return type == BuiltinOptions_SubOptions ? - reinterpret_cast(value) : nullptr; + void add_time_major(bool time_major) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 0); } - const tflite::SubOptionsT *AsSubOptions() const { - return type == BuiltinOptions_SubOptions ? - reinterpret_cast(value) : nullptr; + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); } - tflite::DivOptionsT *AsDivOptions() { - return type == BuiltinOptions_DivOptions ? - reinterpret_cast(value) : nullptr; + void add_diagonal_recurrent_tensors(bool diagonal_recurrent_tensors) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_DIAGONAL_RECURRENT_TENSORS, static_cast(diagonal_recurrent_tensors), 0); } - const tflite::DivOptionsT *AsDivOptions() const { - return type == BuiltinOptions_DivOptions ? - reinterpret_cast(value) : nullptr; + explicit UnidirectionalSequenceLSTMOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::SqueezeOptionsT *AsSqueezeOptions() { - return type == BuiltinOptions_SqueezeOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::SqueezeOptionsT *AsSqueezeOptions() const { - return type == BuiltinOptions_SqueezeOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + float cell_clip = 0.0f, + float proj_clip = 0.0f, + bool time_major = false, + bool asymmetric_quantize_inputs = false, + bool diagonal_recurrent_tensors = false) { + UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_diagonal_recurrent_tensors(diagonal_recurrent_tensors); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_time_major(time_major); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BidirectionalSequenceLSTMOptionsT : public ::flatbuffers::NativeTable { + typedef BidirectionalSequenceLSTMOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + float cell_clip = 0.0f; + float proj_clip = 0.0f; + bool merge_outputs = false; + bool time_major = true; + bool asymmetric_quantize_inputs = false; +}; + +struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BidirectionalSequenceLSTMOptionsT NativeTableType; + typedef BidirectionalSequenceLSTMOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_MERGE_OUTPUTS = 10, + VT_TIME_MAJOR = 12, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 14 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() { - return type == BuiltinOptions_SequenceRNNOptions ? - reinterpret_cast(value) : nullptr; + float cell_clip() const { + return GetField(VT_CELL_CLIP, 0.0f); } - const tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() const { - return type == BuiltinOptions_SequenceRNNOptions ? - reinterpret_cast(value) : nullptr; + float proj_clip() const { + return GetField(VT_PROJ_CLIP, 0.0f); } - tflite::StridedSliceOptionsT *AsStridedSliceOptions() { - return type == BuiltinOptions_StridedSliceOptions ? - reinterpret_cast(value) : nullptr; + bool merge_outputs() const { + return GetField(VT_MERGE_OUTPUTS, 0) != 0; } - const tflite::StridedSliceOptionsT *AsStridedSliceOptions() const { - return type == BuiltinOptions_StridedSliceOptions ? - reinterpret_cast(value) : nullptr; + bool time_major() const { + return GetField(VT_TIME_MAJOR, 1) != 0; } - tflite::ExpOptionsT *AsExpOptions() { - return type == BuiltinOptions_ExpOptions ? - reinterpret_cast(value) : nullptr; + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } - const tflite::ExpOptionsT *AsExpOptions() const { - return type == BuiltinOptions_ExpOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_CELL_CLIP, 4) && + VerifyField(verifier, VT_PROJ_CLIP, 4) && + VerifyField(verifier, VT_MERGE_OUTPUTS, 1) && + VerifyField(verifier, VT_TIME_MAJOR, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); } - tflite::TopKV2OptionsT *AsTopKV2Options() { - return type == BuiltinOptions_TopKV2Options ? - reinterpret_cast(value) : nullptr; + BidirectionalSequenceLSTMOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BidirectionalSequenceLSTMOptionsBuilder { + typedef BidirectionalSequenceLSTMOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } - const tflite::TopKV2OptionsT *AsTopKV2Options() const { - return type == BuiltinOptions_TopKV2Options ? - reinterpret_cast(value) : nullptr; + void add_cell_clip(float cell_clip) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); } - tflite::SplitOptionsT *AsSplitOptions() { - return type == BuiltinOptions_SplitOptions ? - reinterpret_cast(value) : nullptr; + void add_proj_clip(float proj_clip) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); } - const tflite::SplitOptionsT *AsSplitOptions() const { - return type == BuiltinOptions_SplitOptions ? - reinterpret_cast(value) : nullptr; + void add_merge_outputs(bool merge_outputs) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); } - tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() { - return type == BuiltinOptions_LogSoftmaxOptions ? - reinterpret_cast(value) : nullptr; + void add_time_major(bool time_major) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 1); } - const tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() const { - return type == BuiltinOptions_LogSoftmaxOptions ? - reinterpret_cast(value) : nullptr; + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); } - tflite::CastOptionsT *AsCastOptions() { - return type == BuiltinOptions_CastOptions ? - reinterpret_cast(value) : nullptr; + explicit BidirectionalSequenceLSTMOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::CastOptionsT *AsCastOptions() const { - return type == BuiltinOptions_CastOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::DequantizeOptionsT *AsDequantizeOptions() { - return type == BuiltinOptions_DequantizeOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + float cell_clip = 0.0f, + float proj_clip = 0.0f, + bool merge_outputs = false, + bool time_major = true, + bool asymmetric_quantize_inputs = false) { + BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_time_major(time_major); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ResizeBilinearOptionsT : public ::flatbuffers::NativeTable { + typedef ResizeBilinearOptions TableType; + bool align_corners = false; + bool half_pixel_centers = false; +}; + +struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ResizeBilinearOptionsT NativeTableType; + typedef ResizeBilinearOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALIGN_CORNERS = 8, + VT_HALF_PIXEL_CENTERS = 10 + }; + bool align_corners() const { + return GetField(VT_ALIGN_CORNERS, 0) != 0; } - const tflite::DequantizeOptionsT *AsDequantizeOptions() const { - return type == BuiltinOptions_DequantizeOptions ? - reinterpret_cast(value) : nullptr; + bool half_pixel_centers() const { + return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; } - tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() { - return type == BuiltinOptions_MaximumMinimumOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALIGN_CORNERS, 1) && + VerifyField(verifier, VT_HALF_PIXEL_CENTERS, 1) && + verifier.EndTable(); } - const tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() const { - return type == BuiltinOptions_MaximumMinimumOptions ? - reinterpret_cast(value) : nullptr; + ResizeBilinearOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ResizeBilinearOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ResizeBilinearOptionsBuilder { + typedef ResizeBilinearOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) { + fbb_.AddElement(ResizeBilinearOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); } - tflite::ArgMaxOptionsT *AsArgMaxOptions() { - return type == BuiltinOptions_ArgMaxOptions ? - reinterpret_cast(value) : nullptr; + void add_half_pixel_centers(bool half_pixel_centers) { + fbb_.AddElement(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); } - const tflite::ArgMaxOptionsT *AsArgMaxOptions() const { - return type == BuiltinOptions_ArgMaxOptions ? - reinterpret_cast(value) : nullptr; + explicit ResizeBilinearOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::LessOptionsT *AsLessOptions() { - return type == BuiltinOptions_LessOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::LessOptionsT *AsLessOptions() const { - return type == BuiltinOptions_LessOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateResizeBilinearOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool align_corners = false, + bool half_pixel_centers = false) { + ResizeBilinearOptionsBuilder builder_(_fbb); + builder_.add_half_pixel_centers(half_pixel_centers); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateResizeBilinearOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ResizeNearestNeighborOptionsT : public ::flatbuffers::NativeTable { + typedef ResizeNearestNeighborOptions TableType; + bool align_corners = false; + bool half_pixel_centers = false; +}; + +struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ResizeNearestNeighborOptionsT NativeTableType; + typedef ResizeNearestNeighborOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALIGN_CORNERS = 4, + VT_HALF_PIXEL_CENTERS = 6 + }; + bool align_corners() const { + return GetField(VT_ALIGN_CORNERS, 0) != 0; } - tflite::NegOptionsT *AsNegOptions() { - return type == BuiltinOptions_NegOptions ? - reinterpret_cast(value) : nullptr; + bool half_pixel_centers() const { + return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; } - const tflite::NegOptionsT *AsNegOptions() const { - return type == BuiltinOptions_NegOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALIGN_CORNERS, 1) && + VerifyField(verifier, VT_HALF_PIXEL_CENTERS, 1) && + verifier.EndTable(); } - tflite::PadV2OptionsT *AsPadV2Options() { - return type == BuiltinOptions_PadV2Options ? - reinterpret_cast(value) : nullptr; + ResizeNearestNeighborOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ResizeNearestNeighborOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ResizeNearestNeighborOptionsBuilder { + typedef ResizeNearestNeighborOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) { + fbb_.AddElement(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); } - const tflite::PadV2OptionsT *AsPadV2Options() const { - return type == BuiltinOptions_PadV2Options ? - reinterpret_cast(value) : nullptr; + void add_half_pixel_centers(bool half_pixel_centers) { + fbb_.AddElement(ResizeNearestNeighborOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); } - tflite::GreaterOptionsT *AsGreaterOptions() { - return type == BuiltinOptions_GreaterOptions ? - reinterpret_cast(value) : nullptr; + explicit ResizeNearestNeighborOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::GreaterOptionsT *AsGreaterOptions() const { - return type == BuiltinOptions_GreaterOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() { - return type == BuiltinOptions_GreaterEqualOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateResizeNearestNeighborOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool align_corners = false, + bool half_pixel_centers = false) { + ResizeNearestNeighborOptionsBuilder builder_(_fbb); + builder_.add_half_pixel_centers(half_pixel_centers); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateResizeNearestNeighborOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CallOptionsT : public ::flatbuffers::NativeTable { + typedef CallOptions TableType; + uint32_t subgraph = 0; +}; + +struct CallOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef CallOptionsT NativeTableType; + typedef CallOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SUBGRAPH = 4 + }; + uint32_t subgraph() const { + return GetField(VT_SUBGRAPH, 0); } - const tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() const { - return type == BuiltinOptions_GreaterEqualOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SUBGRAPH, 4) && + verifier.EndTable(); } - tflite::LessEqualOptionsT *AsLessEqualOptions() { - return type == BuiltinOptions_LessEqualOptions ? - reinterpret_cast(value) : nullptr; + CallOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CallOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CallOptionsBuilder { + typedef CallOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_subgraph(uint32_t subgraph) { + fbb_.AddElement(CallOptions::VT_SUBGRAPH, subgraph, 0); } - const tflite::LessEqualOptionsT *AsLessEqualOptions() const { - return type == BuiltinOptions_LessEqualOptions ? - reinterpret_cast(value) : nullptr; + explicit CallOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::SelectOptionsT *AsSelectOptions() { - return type == BuiltinOptions_SelectOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::SelectOptionsT *AsSelectOptions() const { - return type == BuiltinOptions_SelectOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateCallOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t subgraph = 0) { + CallOptionsBuilder builder_(_fbb); + builder_.add_subgraph(subgraph); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateCallOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PadOptionsT : public ::flatbuffers::NativeTable { + typedef PadOptions TableType; +}; + +struct PadOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef PadOptionsT NativeTableType; + typedef PadOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - tflite::SliceOptionsT *AsSliceOptions() { - return type == BuiltinOptions_SliceOptions ? - reinterpret_cast(value) : nullptr; + PadOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PadOptionsBuilder { + typedef PadOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit PadOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::SliceOptionsT *AsSliceOptions() const { - return type == BuiltinOptions_SliceOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::TransposeConvOptionsT *AsTransposeConvOptions() { - return type == BuiltinOptions_TransposeConvOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreatePadOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + PadOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreatePadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PadV2OptionsT : public ::flatbuffers::NativeTable { + typedef PadV2Options TableType; +}; + +struct PadV2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef PadV2OptionsT NativeTableType; + typedef PadV2OptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const tflite::TransposeConvOptionsT *AsTransposeConvOptions() const { - return type == BuiltinOptions_TransposeConvOptions ? - reinterpret_cast(value) : nullptr; + PadV2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PadV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PadV2OptionsBuilder { + typedef PadV2Options Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit PadV2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() { - return type == BuiltinOptions_SparseToDenseOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() const { - return type == BuiltinOptions_SparseToDenseOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreatePadV2Options( + ::flatbuffers::FlatBufferBuilder &_fbb) { + PadV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreatePadV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReshapeOptionsT : public ::flatbuffers::NativeTable { + typedef ReshapeOptions TableType; + std::vector new_shape{}; +}; + +struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ReshapeOptionsT NativeTableType; + typedef ReshapeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NEW_SHAPE = 4 + }; + const ::flatbuffers::Vector *new_shape() const { + return GetPointer *>(VT_NEW_SHAPE); } - tflite::TileOptionsT *AsTileOptions() { - return type == BuiltinOptions_TileOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NEW_SHAPE) && + verifier.VerifyVector(new_shape()) && + verifier.EndTable(); } - const tflite::TileOptionsT *AsTileOptions() const { - return type == BuiltinOptions_TileOptions ? - reinterpret_cast(value) : nullptr; + ReshapeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReshapeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReshapeOptionsBuilder { + typedef ReshapeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_new_shape(::flatbuffers::Offset<::flatbuffers::Vector> new_shape) { + fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape); } - tflite::ExpandDimsOptionsT *AsExpandDimsOptions() { - return type == BuiltinOptions_ExpandDimsOptions ? - reinterpret_cast(value) : nullptr; + explicit ReshapeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::ExpandDimsOptionsT *AsExpandDimsOptions() const { - return type == BuiltinOptions_ExpandDimsOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::EqualOptionsT *AsEqualOptions() { - return type == BuiltinOptions_EqualOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateReshapeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> new_shape = 0) { + ReshapeOptionsBuilder builder_(_fbb); + builder_.add_new_shape(new_shape); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateReshapeOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *new_shape = nullptr) { + auto new_shape__ = new_shape ? _fbb.CreateVector(*new_shape) : 0; + return tflite::CreateReshapeOptions( + _fbb, + new_shape__); +} + +::flatbuffers::Offset CreateReshapeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SpaceToBatchNDOptionsT : public ::flatbuffers::NativeTable { + typedef SpaceToBatchNDOptions TableType; +}; + +struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SpaceToBatchNDOptionsT NativeTableType; + typedef SpaceToBatchNDOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const tflite::EqualOptionsT *AsEqualOptions() const { - return type == BuiltinOptions_EqualOptions ? - reinterpret_cast(value) : nullptr; + SpaceToBatchNDOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SpaceToBatchNDOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SpaceToBatchNDOptionsBuilder { + typedef SpaceToBatchNDOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SpaceToBatchNDOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::NotEqualOptionsT *AsNotEqualOptions() { - return type == BuiltinOptions_NotEqualOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::NotEqualOptionsT *AsNotEqualOptions() const { - return type == BuiltinOptions_NotEqualOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSpaceToBatchNDOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + SpaceToBatchNDOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSpaceToBatchNDOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BatchToSpaceNDOptionsT : public ::flatbuffers::NativeTable { + typedef BatchToSpaceNDOptions TableType; +}; + +struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BatchToSpaceNDOptionsT NativeTableType; + typedef BatchToSpaceNDOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - tflite::ShapeOptionsT *AsShapeOptions() { - return type == BuiltinOptions_ShapeOptions ? - reinterpret_cast(value) : nullptr; + BatchToSpaceNDOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BatchToSpaceNDOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BatchToSpaceNDOptionsBuilder { + typedef BatchToSpaceNDOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit BatchToSpaceNDOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::ShapeOptionsT *AsShapeOptions() const { - return type == BuiltinOptions_ShapeOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::PowOptionsT *AsPowOptions() { - return type == BuiltinOptions_PowOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateBatchToSpaceNDOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + BatchToSpaceNDOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateBatchToSpaceNDOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SkipGramOptionsT : public ::flatbuffers::NativeTable { + typedef SkipGramOptions TableType; + int32_t ngram_size = 0; + int32_t max_skip_size = 0; + bool include_all_ngrams = false; +}; + +struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SkipGramOptionsT NativeTableType; + typedef SkipGramOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NGRAM_SIZE = 4, + VT_MAX_SKIP_SIZE = 6, + VT_INCLUDE_ALL_NGRAMS = 8 + }; + int32_t ngram_size() const { + return GetField(VT_NGRAM_SIZE, 0); } - const tflite::PowOptionsT *AsPowOptions() const { - return type == BuiltinOptions_PowOptions ? - reinterpret_cast(value) : nullptr; + int32_t max_skip_size() const { + return GetField(VT_MAX_SKIP_SIZE, 0); } - tflite::ArgMinOptionsT *AsArgMinOptions() { - return type == BuiltinOptions_ArgMinOptions ? - reinterpret_cast(value) : nullptr; + bool include_all_ngrams() const { + return GetField(VT_INCLUDE_ALL_NGRAMS, 0) != 0; } - const tflite::ArgMinOptionsT *AsArgMinOptions() const { - return type == BuiltinOptions_ArgMinOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NGRAM_SIZE, 4) && + VerifyField(verifier, VT_MAX_SKIP_SIZE, 4) && + VerifyField(verifier, VT_INCLUDE_ALL_NGRAMS, 1) && + verifier.EndTable(); } - tflite::FakeQuantOptionsT *AsFakeQuantOptions() { - return type == BuiltinOptions_FakeQuantOptions ? - reinterpret_cast(value) : nullptr; + SkipGramOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SkipGramOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SkipGramOptionsBuilder { + typedef SkipGramOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_ngram_size(int32_t ngram_size) { + fbb_.AddElement(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0); } - const tflite::FakeQuantOptionsT *AsFakeQuantOptions() const { - return type == BuiltinOptions_FakeQuantOptions ? - reinterpret_cast(value) : nullptr; + void add_max_skip_size(int32_t max_skip_size) { + fbb_.AddElement(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0); } - tflite::PackOptionsT *AsPackOptions() { - return type == BuiltinOptions_PackOptions ? - reinterpret_cast(value) : nullptr; + void add_include_all_ngrams(bool include_all_ngrams) { + fbb_.AddElement(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, static_cast(include_all_ngrams), 0); } - const tflite::PackOptionsT *AsPackOptions() const { - return type == BuiltinOptions_PackOptions ? - reinterpret_cast(value) : nullptr; + explicit SkipGramOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::LogicalOrOptionsT *AsLogicalOrOptions() { - return type == BuiltinOptions_LogicalOrOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::LogicalOrOptionsT *AsLogicalOrOptions() const { - return type == BuiltinOptions_LogicalOrOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSkipGramOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t ngram_size = 0, + int32_t max_skip_size = 0, + bool include_all_ngrams = false) { + SkipGramOptionsBuilder builder_(_fbb); + builder_.add_max_skip_size(max_skip_size); + builder_.add_ngram_size(ngram_size); + builder_.add_include_all_ngrams(include_all_ngrams); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSkipGramOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SpaceToDepthOptionsT : public ::flatbuffers::NativeTable { + typedef SpaceToDepthOptions TableType; + int32_t block_size = 0; +}; + +struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SpaceToDepthOptionsT NativeTableType; + typedef SpaceToDepthOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { + return GetField(VT_BLOCK_SIZE, 0); } - tflite::OneHotOptionsT *AsOneHotOptions() { - return type == BuiltinOptions_OneHotOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCK_SIZE, 4) && + verifier.EndTable(); } - const tflite::OneHotOptionsT *AsOneHotOptions() const { - return type == BuiltinOptions_OneHotOptions ? - reinterpret_cast(value) : nullptr; + SpaceToDepthOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SpaceToDepthOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SpaceToDepthOptionsBuilder { + typedef SpaceToDepthOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) { + fbb_.AddElement(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0); } - tflite::LogicalAndOptionsT *AsLogicalAndOptions() { - return type == BuiltinOptions_LogicalAndOptions ? - reinterpret_cast(value) : nullptr; + explicit SpaceToDepthOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::LogicalAndOptionsT *AsLogicalAndOptions() const { - return type == BuiltinOptions_LogicalAndOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::LogicalNotOptionsT *AsLogicalNotOptions() { - return type == BuiltinOptions_LogicalNotOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSpaceToDepthOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t block_size = 0) { + SpaceToDepthOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSpaceToDepthOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DepthToSpaceOptionsT : public ::flatbuffers::NativeTable { + typedef DepthToSpaceOptions TableType; + int32_t block_size = 0; +}; + +struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DepthToSpaceOptionsT NativeTableType; + typedef DepthToSpaceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { + return GetField(VT_BLOCK_SIZE, 0); } - const tflite::LogicalNotOptionsT *AsLogicalNotOptions() const { - return type == BuiltinOptions_LogicalNotOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCK_SIZE, 4) && + verifier.EndTable(); } - tflite::UnpackOptionsT *AsUnpackOptions() { - return type == BuiltinOptions_UnpackOptions ? - reinterpret_cast(value) : nullptr; + DepthToSpaceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DepthToSpaceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DepthToSpaceOptionsBuilder { + typedef DepthToSpaceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) { + fbb_.AddElement(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); } - const tflite::UnpackOptionsT *AsUnpackOptions() const { - return type == BuiltinOptions_UnpackOptions ? - reinterpret_cast(value) : nullptr; + explicit DepthToSpaceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::FloorDivOptionsT *AsFloorDivOptions() { - return type == BuiltinOptions_FloorDivOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::FloorDivOptionsT *AsFloorDivOptions() const { - return type == BuiltinOptions_FloorDivOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateDepthToSpaceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t block_size = 0) { + DepthToSpaceOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateDepthToSpaceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SubOptionsT : public ::flatbuffers::NativeTable { + typedef SubOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool pot_scale_int16 = true; +}; + +struct SubOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SubOptionsT NativeTableType; + typedef SubOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_POT_SCALE_INT16 = 6 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - tflite::SquareOptionsT *AsSquareOptions() { - return type == BuiltinOptions_SquareOptions ? - reinterpret_cast(value) : nullptr; + bool pot_scale_int16() const { + return GetField(VT_POT_SCALE_INT16, 1) != 0; } - const tflite::SquareOptionsT *AsSquareOptions() const { - return type == BuiltinOptions_SquareOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_POT_SCALE_INT16, 1) && + verifier.EndTable(); } - tflite::ZerosLikeOptionsT *AsZerosLikeOptions() { - return type == BuiltinOptions_ZerosLikeOptions ? - reinterpret_cast(value) : nullptr; + SubOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SubOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SubOptionsBuilder { + typedef SubOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } - const tflite::ZerosLikeOptionsT *AsZerosLikeOptions() const { - return type == BuiltinOptions_ZerosLikeOptions ? - reinterpret_cast(value) : nullptr; + void add_pot_scale_int16(bool pot_scale_int16) { + fbb_.AddElement(SubOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); } - tflite::FillOptionsT *AsFillOptions() { - return type == BuiltinOptions_FillOptions ? - reinterpret_cast(value) : nullptr; + explicit SubOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::FillOptionsT *AsFillOptions() const { - return type == BuiltinOptions_FillOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() { - return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSubOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool pot_scale_int16 = true) { + SubOptionsBuilder builder_(_fbb); + builder_.add_pot_scale_int16(pot_scale_int16); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSubOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DivOptionsT : public ::flatbuffers::NativeTable { + typedef DivOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct DivOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DivOptionsT NativeTableType; + typedef DivOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - const tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() const { - return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); } - tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() { - return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? - reinterpret_cast(value) : nullptr; + DivOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DivOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DivOptionsBuilder { + typedef DivOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } - const tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() const { - return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? - reinterpret_cast(value) : nullptr; + explicit DivOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() { - return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() const { - return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateDivOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + DivOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateDivOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TopKV2OptionsT : public ::flatbuffers::NativeTable { + typedef TopKV2Options TableType; +}; + +struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TopKV2OptionsT NativeTableType; + typedef TopKV2OptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - tflite::FloorModOptionsT *AsFloorModOptions() { - return type == BuiltinOptions_FloorModOptions ? - reinterpret_cast(value) : nullptr; + TopKV2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TopKV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TopKV2OptionsBuilder { + typedef TopKV2Options Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit TopKV2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::FloorModOptionsT *AsFloorModOptions() const { - return type == BuiltinOptions_FloorModOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::RangeOptionsT *AsRangeOptions() { - return type == BuiltinOptions_RangeOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateTopKV2Options( + ::flatbuffers::FlatBufferBuilder &_fbb) { + TopKV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateTopKV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct EmbeddingLookupSparseOptionsT : public ::flatbuffers::NativeTable { + typedef EmbeddingLookupSparseOptions TableType; + tflite::CombinerType combiner = tflite::CombinerType_SUM; +}; + +struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef EmbeddingLookupSparseOptionsT NativeTableType; + typedef EmbeddingLookupSparseOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_COMBINER = 4 + }; + tflite::CombinerType combiner() const { + return static_cast(GetField(VT_COMBINER, 0)); } - const tflite::RangeOptionsT *AsRangeOptions() const { - return type == BuiltinOptions_RangeOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_COMBINER, 1) && + verifier.EndTable(); } - tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() { - return type == BuiltinOptions_ResizeNearestNeighborOptions ? - reinterpret_cast(value) : nullptr; + EmbeddingLookupSparseOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct EmbeddingLookupSparseOptionsBuilder { + typedef EmbeddingLookupSparseOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_combiner(tflite::CombinerType combiner) { + fbb_.AddElement(EmbeddingLookupSparseOptions::VT_COMBINER, static_cast(combiner), 0); } - const tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() const { - return type == BuiltinOptions_ResizeNearestNeighborOptions ? - reinterpret_cast(value) : nullptr; + explicit EmbeddingLookupSparseOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::LeakyReluOptionsT *AsLeakyReluOptions() { - return type == BuiltinOptions_LeakyReluOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::LeakyReluOptionsT *AsLeakyReluOptions() const { - return type == BuiltinOptions_LeakyReluOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateEmbeddingLookupSparseOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::CombinerType combiner = tflite::CombinerType_SUM) { + EmbeddingLookupSparseOptionsBuilder builder_(_fbb); + builder_.add_combiner(combiner); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateEmbeddingLookupSparseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GatherOptionsT : public ::flatbuffers::NativeTable { + typedef GatherOptions TableType; + int32_t axis = 0; + int32_t batch_dims = 0; +}; + +struct GatherOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef GatherOptionsT NativeTableType; + typedef GatherOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_BATCH_DIMS = 6 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); } - tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() { - return type == BuiltinOptions_SquaredDifferenceOptions ? - reinterpret_cast(value) : nullptr; + int32_t batch_dims() const { + return GetField(VT_BATCH_DIMS, 0); } - const tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() const { - return type == BuiltinOptions_SquaredDifferenceOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS, 4) && + VerifyField(verifier, VT_BATCH_DIMS, 4) && + verifier.EndTable(); } - tflite::MirrorPadOptionsT *AsMirrorPadOptions() { - return type == BuiltinOptions_MirrorPadOptions ? - reinterpret_cast(value) : nullptr; + GatherOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GatherOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GatherOptionsBuilder { + typedef GatherOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(GatherOptions::VT_AXIS, axis, 0); } - const tflite::MirrorPadOptionsT *AsMirrorPadOptions() const { - return type == BuiltinOptions_MirrorPadOptions ? - reinterpret_cast(value) : nullptr; + void add_batch_dims(int32_t batch_dims) { + fbb_.AddElement(GatherOptions::VT_BATCH_DIMS, batch_dims, 0); } - tflite::AbsOptionsT *AsAbsOptions() { - return type == BuiltinOptions_AbsOptions ? - reinterpret_cast(value) : nullptr; + explicit GatherOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::AbsOptionsT *AsAbsOptions() const { - return type == BuiltinOptions_AbsOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::SplitVOptionsT *AsSplitVOptions() { - return type == BuiltinOptions_SplitVOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateGatherOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + int32_t batch_dims = 0) { + GatherOptionsBuilder builder_(_fbb); + builder_.add_batch_dims(batch_dims); + builder_.add_axis(axis); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateGatherOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TransposeOptionsT : public ::flatbuffers::NativeTable { + typedef TransposeOptions TableType; +}; + +struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TransposeOptionsT NativeTableType; + typedef TransposeOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const tflite::SplitVOptionsT *AsSplitVOptions() const { - return type == BuiltinOptions_SplitVOptions ? - reinterpret_cast(value) : nullptr; + TransposeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TransposeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TransposeOptionsBuilder { + typedef TransposeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit TransposeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::UniqueOptionsT *AsUniqueOptions() { - return type == BuiltinOptions_UniqueOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::UniqueOptionsT *AsUniqueOptions() const { - return type == BuiltinOptions_UniqueOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateTransposeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + TransposeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateTransposeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ExpOptionsT : public ::flatbuffers::NativeTable { + typedef ExpOptions TableType; +}; + +struct ExpOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ExpOptionsT NativeTableType; + typedef ExpOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - tflite::ReverseV2OptionsT *AsReverseV2Options() { - return type == BuiltinOptions_ReverseV2Options ? - reinterpret_cast(value) : nullptr; + ExpOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ExpOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ExpOptionsBuilder { + typedef ExpOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ExpOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::ReverseV2OptionsT *AsReverseV2Options() const { - return type == BuiltinOptions_ReverseV2Options ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::AddNOptionsT *AsAddNOptions() { - return type == BuiltinOptions_AddNOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateExpOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + ExpOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateExpOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CosOptionsT : public ::flatbuffers::NativeTable { + typedef CosOptions TableType; +}; + +struct CosOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef CosOptionsT NativeTableType; + typedef CosOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const tflite::AddNOptionsT *AsAddNOptions() const { - return type == BuiltinOptions_AddNOptions ? - reinterpret_cast(value) : nullptr; + CosOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CosOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CosOptionsBuilder { + typedef CosOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit CosOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::GatherNdOptionsT *AsGatherNdOptions() { - return type == BuiltinOptions_GatherNdOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::GatherNdOptionsT *AsGatherNdOptions() const { - return type == BuiltinOptions_GatherNdOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateCosOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + CosOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateCosOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReducerOptionsT : public ::flatbuffers::NativeTable { + typedef ReducerOptions TableType; + bool keep_dims = false; +}; + +struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ReducerOptionsT NativeTableType; + typedef ReducerOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEEP_DIMS = 4 + }; + bool keep_dims() const { + return GetField(VT_KEEP_DIMS, 0) != 0; } - tflite::CosOptionsT *AsCosOptions() { - return type == BuiltinOptions_CosOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KEEP_DIMS, 1) && + verifier.EndTable(); } - const tflite::CosOptionsT *AsCosOptions() const { - return type == BuiltinOptions_CosOptions ? - reinterpret_cast(value) : nullptr; + ReducerOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReducerOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReducerOptionsBuilder { + typedef ReducerOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_keep_dims(bool keep_dims) { + fbb_.AddElement(ReducerOptions::VT_KEEP_DIMS, static_cast(keep_dims), 0); } - tflite::WhereOptionsT *AsWhereOptions() { - return type == BuiltinOptions_WhereOptions ? - reinterpret_cast(value) : nullptr; + explicit ReducerOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::WhereOptionsT *AsWhereOptions() const { - return type == BuiltinOptions_WhereOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::RankOptionsT *AsRankOptions() { - return type == BuiltinOptions_RankOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateReducerOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool keep_dims = false) { + ReducerOptionsBuilder builder_(_fbb); + builder_.add_keep_dims(keep_dims); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateReducerOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SqueezeOptionsT : public ::flatbuffers::NativeTable { + typedef SqueezeOptions TableType; + std::vector squeeze_dims{}; +}; + +struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SqueezeOptionsT NativeTableType; + typedef SqueezeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SQUEEZE_DIMS = 4 + }; + const ::flatbuffers::Vector *squeeze_dims() const { + return GetPointer *>(VT_SQUEEZE_DIMS); } - const tflite::RankOptionsT *AsRankOptions() const { - return type == BuiltinOptions_RankOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SQUEEZE_DIMS) && + verifier.VerifyVector(squeeze_dims()) && + verifier.EndTable(); } - tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() { - return type == BuiltinOptions_ReverseSequenceOptions ? - reinterpret_cast(value) : nullptr; + SqueezeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SqueezeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SqueezeOptionsBuilder { + typedef SqueezeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_squeeze_dims(::flatbuffers::Offset<::flatbuffers::Vector> squeeze_dims) { + fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); } - const tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() const { - return type == BuiltinOptions_ReverseSequenceOptions ? - reinterpret_cast(value) : nullptr; + explicit SqueezeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() { - return type == BuiltinOptions_MatrixDiagOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() const { - return type == BuiltinOptions_MatrixDiagOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSqueezeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> squeeze_dims = 0) { + SqueezeOptionsBuilder builder_(_fbb); + builder_.add_squeeze_dims(squeeze_dims); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateSqueezeOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *squeeze_dims = nullptr) { + auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector(*squeeze_dims) : 0; + return tflite::CreateSqueezeOptions( + _fbb, + squeeze_dims__); +} + +::flatbuffers::Offset CreateSqueezeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SplitOptionsT : public ::flatbuffers::NativeTable { + typedef SplitOptions TableType; + int32_t num_splits = 0; +}; + +struct SplitOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SplitOptionsT NativeTableType; + typedef SplitOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_SPLITS = 4 + }; + int32_t num_splits() const { + return GetField(VT_NUM_SPLITS, 0); } - tflite::QuantizeOptionsT *AsQuantizeOptions() { - return type == BuiltinOptions_QuantizeOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_SPLITS, 4) && + verifier.EndTable(); } - const tflite::QuantizeOptionsT *AsQuantizeOptions() const { - return type == BuiltinOptions_QuantizeOptions ? - reinterpret_cast(value) : nullptr; + SplitOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SplitOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SplitOptionsBuilder { + typedef SplitOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_num_splits(int32_t num_splits) { + fbb_.AddElement(SplitOptions::VT_NUM_SPLITS, num_splits, 0); } - tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() { - return type == BuiltinOptions_MatrixSetDiagOptions ? - reinterpret_cast(value) : nullptr; + explicit SplitOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() const { - return type == BuiltinOptions_MatrixSetDiagOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::HardSwishOptionsT *AsHardSwishOptions() { - return type == BuiltinOptions_HardSwishOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSplitOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_splits = 0) { + SplitOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSplitOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SplitVOptionsT : public ::flatbuffers::NativeTable { + typedef SplitVOptions TableType; + int32_t num_splits = 0; +}; + +struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SplitVOptionsT NativeTableType; + typedef SplitVOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_SPLITS = 4 + }; + int32_t num_splits() const { + return GetField(VT_NUM_SPLITS, 0); } - const tflite::HardSwishOptionsT *AsHardSwishOptions() const { - return type == BuiltinOptions_HardSwishOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_SPLITS, 4) && + verifier.EndTable(); } - tflite::IfOptionsT *AsIfOptions() { - return type == BuiltinOptions_IfOptions ? - reinterpret_cast(value) : nullptr; + SplitVOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SplitVOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SplitVOptionsBuilder { + typedef SplitVOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_num_splits(int32_t num_splits) { + fbb_.AddElement(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); } - const tflite::IfOptionsT *AsIfOptions() const { - return type == BuiltinOptions_IfOptions ? - reinterpret_cast(value) : nullptr; + explicit SplitVOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::WhileOptionsT *AsWhileOptions() { - return type == BuiltinOptions_WhileOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::WhileOptionsT *AsWhileOptions() const { - return type == BuiltinOptions_WhileOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSplitVOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_splits = 0) { + SplitVOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSplitVOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StridedSliceOptionsT : public ::flatbuffers::NativeTable { + typedef StridedSliceOptions TableType; + int32_t begin_mask = 0; + int32_t end_mask = 0; + int32_t ellipsis_mask = 0; + int32_t new_axis_mask = 0; + int32_t shrink_axis_mask = 0; + bool offset = false; +}; + +struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StridedSliceOptionsT NativeTableType; + typedef StridedSliceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BEGIN_MASK = 4, + VT_END_MASK = 6, + VT_ELLIPSIS_MASK = 8, + VT_NEW_AXIS_MASK = 10, + VT_SHRINK_AXIS_MASK = 12, + VT_OFFSET = 14 + }; + int32_t begin_mask() const { + return GetField(VT_BEGIN_MASK, 0); } - tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() { - return type == BuiltinOptions_DepthToSpaceOptions ? - reinterpret_cast(value) : nullptr; + int32_t end_mask() const { + return GetField(VT_END_MASK, 0); } - const tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() const { - return type == BuiltinOptions_DepthToSpaceOptions ? - reinterpret_cast(value) : nullptr; + int32_t ellipsis_mask() const { + return GetField(VT_ELLIPSIS_MASK, 0); } - tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() { - return type == BuiltinOptions_NonMaxSuppressionV4Options ? - reinterpret_cast(value) : nullptr; + int32_t new_axis_mask() const { + return GetField(VT_NEW_AXIS_MASK, 0); } - const tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() const { - return type == BuiltinOptions_NonMaxSuppressionV4Options ? - reinterpret_cast(value) : nullptr; + int32_t shrink_axis_mask() const { + return GetField(VT_SHRINK_AXIS_MASK, 0); } - tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() { - return type == BuiltinOptions_NonMaxSuppressionV5Options ? - reinterpret_cast(value) : nullptr; + bool offset() const { + return GetField(VT_OFFSET, 0) != 0; } - const tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() const { - return type == BuiltinOptions_NonMaxSuppressionV5Options ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BEGIN_MASK, 4) && + VerifyField(verifier, VT_END_MASK, 4) && + VerifyField(verifier, VT_ELLIPSIS_MASK, 4) && + VerifyField(verifier, VT_NEW_AXIS_MASK, 4) && + VerifyField(verifier, VT_SHRINK_AXIS_MASK, 4) && + VerifyField(verifier, VT_OFFSET, 1) && + verifier.EndTable(); } - tflite::ScatterNdOptionsT *AsScatterNdOptions() { - return type == BuiltinOptions_ScatterNdOptions ? - reinterpret_cast(value) : nullptr; + StridedSliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StridedSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StridedSliceOptionsBuilder { + typedef StridedSliceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_begin_mask(int32_t begin_mask) { + fbb_.AddElement(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); } - const tflite::ScatterNdOptionsT *AsScatterNdOptions() const { - return type == BuiltinOptions_ScatterNdOptions ? - reinterpret_cast(value) : nullptr; + void add_end_mask(int32_t end_mask) { + fbb_.AddElement(StridedSliceOptions::VT_END_MASK, end_mask, 0); } - tflite::SelectV2OptionsT *AsSelectV2Options() { - return type == BuiltinOptions_SelectV2Options ? - reinterpret_cast(value) : nullptr; + void add_ellipsis_mask(int32_t ellipsis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); } - const tflite::SelectV2OptionsT *AsSelectV2Options() const { - return type == BuiltinOptions_SelectV2Options ? - reinterpret_cast(value) : nullptr; + void add_new_axis_mask(int32_t new_axis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); } - tflite::DensifyOptionsT *AsDensifyOptions() { - return type == BuiltinOptions_DensifyOptions ? - reinterpret_cast(value) : nullptr; + void add_shrink_axis_mask(int32_t shrink_axis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); } - const tflite::DensifyOptionsT *AsDensifyOptions() const { - return type == BuiltinOptions_DensifyOptions ? - reinterpret_cast(value) : nullptr; + void add_offset(bool offset) { + fbb_.AddElement(StridedSliceOptions::VT_OFFSET, static_cast(offset), 0); } - tflite::SegmentSumOptionsT *AsSegmentSumOptions() { - return type == BuiltinOptions_SegmentSumOptions ? - reinterpret_cast(value) : nullptr; + explicit StridedSliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::SegmentSumOptionsT *AsSegmentSumOptions() const { - return type == BuiltinOptions_SegmentSumOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() { - return type == BuiltinOptions_BatchMatMulOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateStridedSliceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t begin_mask = 0, + int32_t end_mask = 0, + int32_t ellipsis_mask = 0, + int32_t new_axis_mask = 0, + int32_t shrink_axis_mask = 0, + bool offset = false) { + StridedSliceOptionsBuilder builder_(_fbb); + builder_.add_shrink_axis_mask(shrink_axis_mask); + builder_.add_new_axis_mask(new_axis_mask); + builder_.add_ellipsis_mask(ellipsis_mask); + builder_.add_end_mask(end_mask); + builder_.add_begin_mask(begin_mask); + builder_.add_offset(offset); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateStridedSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LogSoftmaxOptionsT : public ::flatbuffers::NativeTable { + typedef LogSoftmaxOptions TableType; +}; + +struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LogSoftmaxOptionsT NativeTableType; + typedef LogSoftmaxOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() const { - return type == BuiltinOptions_BatchMatMulOptions ? - reinterpret_cast(value) : nullptr; + LogSoftmaxOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LogSoftmaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LogSoftmaxOptionsBuilder { + typedef LogSoftmaxOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogSoftmaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::CumsumOptionsT *AsCumsumOptions() { - return type == BuiltinOptions_CumsumOptions ? - reinterpret_cast(value) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::CumsumOptionsT *AsCumsumOptions() const { - return type == BuiltinOptions_CumsumOptions ? - reinterpret_cast(value) : nullptr; +}; + +inline ::flatbuffers::Offset CreateLogSoftmaxOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + LogSoftmaxOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateLogSoftmaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CastOptionsT : public ::flatbuffers::NativeTable { + typedef CastOptions TableType; + tflite::TensorType in_data_type = tflite::TensorType_FLOAT32; + tflite::TensorType out_data_type = tflite::TensorType_FLOAT32; +}; + +struct CastOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef CastOptionsT NativeTableType; + typedef CastOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_IN_DATA_TYPE = 4, + VT_OUT_DATA_TYPE = 6 + }; + tflite::TensorType in_data_type() const { + return static_cast(GetField(VT_IN_DATA_TYPE, 0)); } - tflite::CallOnceOptionsT *AsCallOnceOptions() { - return type == BuiltinOptions_CallOnceOptions ? - reinterpret_cast(value) : nullptr; + tflite::TensorType out_data_type() const { + return static_cast(GetField(VT_OUT_DATA_TYPE, 0)); } - const tflite::CallOnceOptionsT *AsCallOnceOptions() const { - return type == BuiltinOptions_CallOnceOptions ? - reinterpret_cast(value) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_IN_DATA_TYPE, 1) && + VerifyField(verifier, VT_OUT_DATA_TYPE, 1) && + verifier.EndTable(); } - tflite::BroadcastToOptionsT *AsBroadcastToOptions() { - return type == BuiltinOptions_BroadcastToOptions ? - reinterpret_cast(value) : nullptr; + CastOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CastOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CastOptionsBuilder { + typedef CastOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_in_data_type(tflite::TensorType in_data_type) { + fbb_.AddElement(CastOptions::VT_IN_DATA_TYPE, static_cast(in_data_type), 0); } - const tflite::BroadcastToOptionsT *AsBroadcastToOptions() const { - return type == BuiltinOptions_BroadcastToOptions ? - reinterpret_cast(value) : nullptr; + void add_out_data_type(tflite::TensorType out_data_type) { + fbb_.AddElement(CastOptions::VT_OUT_DATA_TYPE, static_cast(out_data_type), 0); } - tflite::Rfft2dOptionsT *AsRfft2dOptions() { - return type == BuiltinOptions_Rfft2dOptions - ? reinterpret_cast(value) - : nullptr; + explicit CastOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::Rfft2dOptionsT *AsRfft2dOptions() const { - return type == BuiltinOptions_Rfft2dOptions - ? reinterpret_cast(value) - : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } }; -bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); -bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); +inline ::flatbuffers::Offset CreateCastOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType in_data_type = tflite::TensorType_FLOAT32, + tflite::TensorType out_data_type = tflite::TensorType_FLOAT32) { + CastOptionsBuilder builder_(_fbb); + builder_.add_out_data_type(out_data_type); + builder_.add_in_data_type(in_data_type); + return builder_.Finish(); +} -enum Padding { - Padding_SAME = 0, - Padding_VALID = 1, - Padding_MIN = Padding_SAME, - Padding_MAX = Padding_VALID +::flatbuffers::Offset CreateCastOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DequantizeOptionsT : public ::flatbuffers::NativeTable { + typedef DequantizeOptions TableType; }; -inline const Padding (&EnumValuesPadding())[2] { - static const Padding values[] = { - Padding_SAME, - Padding_VALID - }; - return values; -} +struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DequantizeOptionsT NativeTableType; + typedef DequantizeOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + DequantizeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DequantizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -inline const char * const *EnumNamesPadding() { - static const char * const names[3] = { - "SAME", - "VALID", - nullptr - }; - return names; -} +struct DequantizeOptionsBuilder { + typedef DequantizeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit DequantizeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; -inline const char *EnumNamePadding(Padding e) { - if (flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID)) return ""; - const size_t index = static_cast(e); - return EnumNamesPadding()[index]; +inline ::flatbuffers::Offset CreateDequantizeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + DequantizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); } -enum ActivationFunctionType { - ActivationFunctionType_NONE = 0, - ActivationFunctionType_RELU = 1, - ActivationFunctionType_RELU_N1_TO_1 = 2, - ActivationFunctionType_RELU6 = 3, - ActivationFunctionType_TANH = 4, - ActivationFunctionType_SIGN_BIT = 5, - ActivationFunctionType_MIN = ActivationFunctionType_NONE, - ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT +::flatbuffers::Offset CreateDequantizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MaximumMinimumOptionsT : public ::flatbuffers::NativeTable { + typedef MaximumMinimumOptions TableType; }; -inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6] { - static const ActivationFunctionType values[] = { - ActivationFunctionType_NONE, - ActivationFunctionType_RELU, - ActivationFunctionType_RELU_N1_TO_1, - ActivationFunctionType_RELU6, - ActivationFunctionType_TANH, - ActivationFunctionType_SIGN_BIT - }; - return values; -} +struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef MaximumMinimumOptionsT NativeTableType; + typedef MaximumMinimumOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + MaximumMinimumOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MaximumMinimumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -inline const char * const *EnumNamesActivationFunctionType() { - static const char * const names[7] = { - "NONE", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "TANH", - "SIGN_BIT", - nullptr - }; - return names; -} +struct MaximumMinimumOptionsBuilder { + typedef MaximumMinimumOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit MaximumMinimumOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; -inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) { - if (flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT)) return ""; - const size_t index = static_cast(e); - return EnumNamesActivationFunctionType()[index]; +inline ::flatbuffers::Offset CreateMaximumMinimumOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + MaximumMinimumOptionsBuilder builder_(_fbb); + return builder_.Finish(); } -enum LSHProjectionType { - LSHProjectionType_UNKNOWN = 0, - LSHProjectionType_SPARSE = 1, - LSHProjectionType_DENSE = 2, - LSHProjectionType_MIN = LSHProjectionType_UNKNOWN, - LSHProjectionType_MAX = LSHProjectionType_DENSE +::flatbuffers::Offset CreateMaximumMinimumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TileOptionsT : public ::flatbuffers::NativeTable { + typedef TileOptions TableType; }; -inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3] { - static const LSHProjectionType values[] = { - LSHProjectionType_UNKNOWN, - LSHProjectionType_SPARSE, - LSHProjectionType_DENSE - }; - return values; -} +struct TileOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TileOptionsT NativeTableType; + typedef TileOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + TileOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -inline const char * const *EnumNamesLSHProjectionType() { - static const char * const names[4] = { - "UNKNOWN", - "SPARSE", - "DENSE", - nullptr - }; - return names; -} +struct TileOptionsBuilder { + typedef TileOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit TileOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; -inline const char *EnumNameLSHProjectionType(LSHProjectionType e) { - if (flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE)) return ""; - const size_t index = static_cast(e); - return EnumNamesLSHProjectionType()[index]; +inline ::flatbuffers::Offset CreateTileOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + TileOptionsBuilder builder_(_fbb); + return builder_.Finish(); } -enum FullyConnectedOptionsWeightsFormat { - FullyConnectedOptionsWeightsFormat_DEFAULT = 0, - FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1, - FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT, - FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 +::flatbuffers::Offset CreateTileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ArgMaxOptionsT : public ::flatbuffers::NativeTable { + typedef ArgMaxOptions TableType; + tflite::TensorType output_type = tflite::TensorType_FLOAT32; }; -inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] { - static const FullyConnectedOptionsWeightsFormat values[] = { - FullyConnectedOptionsWeightsFormat_DEFAULT, - FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 +struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ArgMaxOptionsT NativeTableType; + typedef ArgMaxOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT_TYPE = 4 }; - return values; -} + tflite::TensorType output_type() const { + return static_cast(GetField(VT_OUTPUT_TYPE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUT_TYPE, 1) && + verifier.EndTable(); + } + ArgMaxOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ArgMaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -inline const char * const *EnumNamesFullyConnectedOptionsWeightsFormat() { - static const char * const names[3] = { - "DEFAULT", - "SHUFFLED4x16INT8", - nullptr - }; - return names; -} +struct ArgMaxOptionsBuilder { + typedef ArgMaxOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_output_type(tflite::TensorType output_type) { + fbb_.AddElement(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); + } + explicit ArgMaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; -inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) { - if (flatbuffers::IsOutRange(e, FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8)) return ""; - const size_t index = static_cast(e); - return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; +inline ::flatbuffers::Offset CreateArgMaxOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType output_type = tflite::TensorType_FLOAT32) { + ArgMaxOptionsBuilder builder_(_fbb); + builder_.add_output_type(output_type); + return builder_.Finish(); } -enum LSTMKernelType { - LSTMKernelType_FULL = 0, - LSTMKernelType_BASIC = 1, - LSTMKernelType_MIN = LSTMKernelType_FULL, - LSTMKernelType_MAX = LSTMKernelType_BASIC -}; +::flatbuffers::Offset CreateArgMaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2] { - static const LSTMKernelType values[] = { - LSTMKernelType_FULL, - LSTMKernelType_BASIC - }; - return values; -} +struct ArgMinOptionsT : public ::flatbuffers::NativeTable { + typedef ArgMinOptions TableType; + tflite::TensorType output_type = tflite::TensorType_FLOAT32; +}; -inline const char * const *EnumNamesLSTMKernelType() { - static const char * const names[3] = { - "FULL", - "BASIC", - nullptr +struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ArgMinOptionsT NativeTableType; + typedef ArgMinOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT_TYPE = 4 }; - return names; -} - -inline const char *EnumNameLSTMKernelType(LSTMKernelType e) { - if (flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC)) return ""; - const size_t index = static_cast(e); - return EnumNamesLSTMKernelType()[index]; -} + tflite::TensorType output_type() const { + return static_cast(GetField(VT_OUTPUT_TYPE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUT_TYPE, 1) && + verifier.EndTable(); + } + ArgMinOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ArgMinOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -enum CombinerType { - CombinerType_SUM = 0, - CombinerType_MEAN = 1, - CombinerType_SQRTN = 2, - CombinerType_MIN = CombinerType_SUM, - CombinerType_MAX = CombinerType_SQRTN +struct ArgMinOptionsBuilder { + typedef ArgMinOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_output_type(tflite::TensorType output_type) { + fbb_.AddElement(ArgMinOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); + } + explicit ArgMinOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } }; -inline const CombinerType (&EnumValuesCombinerType())[3] { - static const CombinerType values[] = { - CombinerType_SUM, - CombinerType_MEAN, - CombinerType_SQRTN - }; - return values; +inline ::flatbuffers::Offset CreateArgMinOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType output_type = tflite::TensorType_FLOAT32) { + ArgMinOptionsBuilder builder_(_fbb); + builder_.add_output_type(output_type); + return builder_.Finish(); } -inline const char * const *EnumNamesCombinerType() { - static const char * const names[4] = { - "SUM", - "MEAN", - "SQRTN", - nullptr - }; - return names; -} +::flatbuffers::Offset CreateArgMinOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -inline const char *EnumNameCombinerType(CombinerType e) { - if (flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN)) return ""; - const size_t index = static_cast(e); - return EnumNamesCombinerType()[index]; -} +struct GreaterOptionsT : public ::flatbuffers::NativeTable { + typedef GreaterOptions TableType; +}; -enum MirrorPadMode { - MirrorPadMode_REFLECT = 0, - MirrorPadMode_SYMMETRIC = 1, - MirrorPadMode_MIN = MirrorPadMode_REFLECT, - MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC +struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef GreaterOptionsT NativeTableType; + typedef GreaterOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + GreaterOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GreaterOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] { - static const MirrorPadMode values[] = { - MirrorPadMode_REFLECT, - MirrorPadMode_SYMMETRIC - }; - return values; -} +struct GreaterOptionsBuilder { + typedef GreaterOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit GreaterOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; -inline const char * const *EnumNamesMirrorPadMode() { - static const char * const names[3] = { - "REFLECT", - "SYMMETRIC", - nullptr - }; - return names; +inline ::flatbuffers::Offset CreateGreaterOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + GreaterOptionsBuilder builder_(_fbb); + return builder_.Finish(); } -inline const char *EnumNameMirrorPadMode(MirrorPadMode e) { - if (flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC)) return ""; - const size_t index = static_cast(e); - return EnumNamesMirrorPadMode()[index]; -} +::flatbuffers::Offset CreateGreaterOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -enum CustomOptionsFormat { - CustomOptionsFormat_FLEXBUFFERS = 0, - CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, - CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS +struct GreaterEqualOptionsT : public ::flatbuffers::NativeTable { + typedef GreaterEqualOptions TableType; }; -inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] { - static const CustomOptionsFormat values[] = { - CustomOptionsFormat_FLEXBUFFERS - }; - return values; -} +struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef GreaterEqualOptionsT NativeTableType; + typedef GreaterEqualOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + GreaterEqualOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GreaterEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -inline const char * const *EnumNamesCustomOptionsFormat() { - static const char * const names[2] = { - "FLEXBUFFERS", - nullptr - }; - return names; -} +struct GreaterEqualOptionsBuilder { + typedef GreaterEqualOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit GreaterEqualOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; -inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) { - if (flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS)) return ""; - const size_t index = static_cast(e); - return EnumNamesCustomOptionsFormat()[index]; +inline ::flatbuffers::Offset CreateGreaterEqualOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + GreaterEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); } -struct CustomQuantizationT : public flatbuffers::NativeTable { - typedef CustomQuantization TableType; - std::vector custom; - CustomQuantizationT() { - } +::flatbuffers::Offset CreateGreaterEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LessOptionsT : public ::flatbuffers::NativeTable { + typedef LessOptions TableType; }; -struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CustomQuantizationT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_CUSTOM = 4 - }; - const flatbuffers::Vector *custom() const { - return GetPointer *>(VT_CUSTOM); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct LessOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LessOptionsT NativeTableType; + typedef LessOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_CUSTOM) && - verifier.VerifyVector(custom()) && verifier.EndTable(); } - CustomQuantizationT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + LessOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LessOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct CustomQuantizationBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_custom(flatbuffers::Offset> custom) { - fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); - } - explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct LessOptionsBuilder { + typedef LessOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LessOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateCustomQuantization( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> custom = 0) { - CustomQuantizationBuilder builder_(_fbb); - builder_.add_custom(custom); +inline ::flatbuffers::Offset CreateLessOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + LessOptionsBuilder builder_(_fbb); return builder_.Finish(); } -inline flatbuffers::Offset CreateCustomQuantizationDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *custom = nullptr) { - if (custom) { _fbb.ForceVectorAlignment(custom->size(), sizeof(uint8_t), 16); } - auto custom__ = custom ? _fbb.CreateVector(*custom) : 0; - return tflite::CreateCustomQuantization( - _fbb, - custom__); -} +::flatbuffers::Offset CreateLessOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct LessEqualOptionsT : public ::flatbuffers::NativeTable { + typedef LessEqualOptions TableType; +}; -struct QuantizationParametersT : public flatbuffers::NativeTable { - typedef QuantizationParameters TableType; - std::vector min; - std::vector max; - std::vector scale; - std::vector zero_point; - tflite::QuantizationDetailsUnion details; - int32_t quantized_dimension; - QuantizationParametersT() - : quantized_dimension(0) { +struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LessEqualOptionsT NativeTableType; + typedef LessEqualOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } + LessEqualOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LessEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizationParametersT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MIN = 4, - VT_MAX = 6, - VT_SCALE = 8, - VT_ZERO_POINT = 10, - VT_DETAILS_TYPE = 12, - VT_DETAILS = 14, - VT_QUANTIZED_DIMENSION = 16 - }; - const flatbuffers::Vector *min() const { - return GetPointer *>(VT_MIN); +struct LessEqualOptionsBuilder { + typedef LessEqualOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LessEqualOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const flatbuffers::Vector *max() const { - return GetPointer *>(VT_MAX); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const flatbuffers::Vector *scale() const { - return GetPointer *>(VT_SCALE); +}; + +inline ::flatbuffers::Offset CreateLessEqualOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + LessEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateLessEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NegOptionsT : public ::flatbuffers::NativeTable { + typedef NegOptions TableType; +}; + +struct NegOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef NegOptionsT NativeTableType; + typedef NegOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const flatbuffers::Vector *zero_point() const { - return GetPointer *>(VT_ZERO_POINT); + NegOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NegOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NegOptionsBuilder { + typedef NegOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit NegOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - tflite::QuantizationDetails details_type() const { - return static_cast(GetField(VT_DETAILS_TYPE, 0)); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const void *details() const { - return GetPointer(VT_DETAILS); +}; + +inline ::flatbuffers::Offset CreateNegOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + NegOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateNegOptions(::flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SelectOptionsT : public ::flatbuffers::NativeTable { + typedef SelectOptions TableType; +}; + +struct SelectOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SelectOptionsT NativeTableType; + typedef SelectOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - template const T *details_as() const; - const tflite::CustomQuantization *details_as_CustomQuantization() const { - return details_type() == tflite::QuantizationDetails_CustomQuantization ? static_cast(details()) : nullptr; + SelectOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SelectOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SelectOptionsBuilder { + typedef SelectOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SelectOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - int32_t quantized_dimension() const { - return GetField(VT_QUANTIZED_DIMENSION, 0); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - bool Verify(flatbuffers::Verifier &verifier) const { +}; + +inline ::flatbuffers::Offset CreateSelectOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + SelectOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateSelectOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SliceOptionsT : public ::flatbuffers::NativeTable { + typedef SliceOptions TableType; +}; + +struct SliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SliceOptionsT NativeTableType; + typedef SliceOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_MIN) && - verifier.VerifyVector(min()) && - VerifyOffset(verifier, VT_MAX) && - verifier.VerifyVector(max()) && - VerifyOffset(verifier, VT_SCALE) && - verifier.VerifyVector(scale()) && - VerifyOffset(verifier, VT_ZERO_POINT) && - verifier.VerifyVector(zero_point()) && - VerifyField(verifier, VT_DETAILS_TYPE) && - VerifyOffset(verifier, VT_DETAILS) && - VerifyQuantizationDetails(verifier, details(), details_type()) && - VerifyField(verifier, VT_QUANTIZED_DIMENSION) && verifier.EndTable(); } - QuantizationParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + SliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -template<> inline const tflite::CustomQuantization *QuantizationParameters::details_as() const { - return details_as_CustomQuantization(); +struct SliceOptionsBuilder { + typedef SliceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSliceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + SliceOptionsBuilder builder_(_fbb); + return builder_.Finish(); } -struct QuantizationParametersBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_min(flatbuffers::Offset> min) { - fbb_.AddOffset(QuantizationParameters::VT_MIN, min); +::flatbuffers::Offset CreateSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TransposeConvOptionsT : public ::flatbuffers::NativeTable { + typedef TransposeConvOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_w = 0; + int32_t stride_h = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32; +}; + +struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TransposeConvOptionsT NativeTableType; + typedef TransposeConvOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FUSED_ACTIVATION_FUNCTION = 10, + VT_QUANTIZED_BIAS_TYPE = 12 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - void add_max(flatbuffers::Offset> max) { - fbb_.AddOffset(QuantizationParameters::VT_MAX, max); + tflite::TensorType quantized_bias_type() const { + return static_cast(GetField(VT_QUANTIZED_BIAS_TYPE, 0)); } - void add_scale(flatbuffers::Offset> scale) { - fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_QUANTIZED_BIAS_TYPE, 1) && + verifier.EndTable(); } - void add_zero_point(flatbuffers::Offset> zero_point) { - fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); + TransposeConvOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TransposeConvOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TransposeConvOptionsBuilder { + typedef TransposeConvOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(TransposeConvOptions::VT_PADDING, static_cast(padding), 0); } - void add_details_type(tflite::QuantizationDetails details_type) { - fbb_.AddElement(QuantizationParameters::VT_DETAILS_TYPE, static_cast(details_type), 0); + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(TransposeConvOptions::VT_STRIDE_W, stride_w, 0); } - void add_details(flatbuffers::Offset details) { - fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(TransposeConvOptions::VT_STRIDE_H, stride_h, 0); } - void add_quantized_dimension(int32_t quantized_dimension) { - fbb_.AddElement(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, 0); + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(TransposeConvOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_quantized_bias_type(tflite::TensorType quantized_bias_type) { + fbb_.AddElement(TransposeConvOptions::VT_QUANTIZED_BIAS_TYPE, static_cast(quantized_bias_type), 0); } - explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit TransposeConvOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateQuantizationParameters( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> min = 0, - flatbuffers::Offset> max = 0, - flatbuffers::Offset> scale = 0, - flatbuffers::Offset> zero_point = 0, - tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, - flatbuffers::Offset details = 0, - int32_t quantized_dimension = 0) { - QuantizationParametersBuilder builder_(_fbb); - builder_.add_quantized_dimension(quantized_dimension); - builder_.add_details(details); - builder_.add_zero_point(zero_point); - builder_.add_scale(scale); - builder_.add_max(max); - builder_.add_min(min); - builder_.add_details_type(details_type); +inline ::flatbuffers::Offset CreateTransposeConvOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_w = 0, + int32_t stride_h = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + tflite::TensorType quantized_bias_type = tflite::TensorType_FLOAT32) { + TransposeConvOptionsBuilder builder_(_fbb); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_quantized_bias_type(quantized_bias_type); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); return builder_.Finish(); } -inline flatbuffers::Offset CreateQuantizationParametersDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *min = nullptr, - const std::vector *max = nullptr, - const std::vector *scale = nullptr, - const std::vector *zero_point = nullptr, - tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, - flatbuffers::Offset details = 0, - int32_t quantized_dimension = 0) { - auto min__ = min ? _fbb.CreateVector(*min) : 0; - auto max__ = max ? _fbb.CreateVector(*max) : 0; - auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; - auto zero_point__ = zero_point ? _fbb.CreateVector(*zero_point) : 0; - return tflite::CreateQuantizationParameters( - _fbb, - min__, - max__, - scale__, - zero_point__, - details_type, - details, - quantized_dimension); -} - -flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateTransposeConvOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct Int32VectorT : public flatbuffers::NativeTable { - typedef Int32Vector TableType; - std::vector values; - Int32VectorT() { - } +struct ExpandDimsOptionsT : public ::flatbuffers::NativeTable { + typedef ExpandDimsOptions TableType; }; -struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Int32VectorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ExpandDimsOptionsT NativeTableType; + typedef ExpandDimsOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && verifier.EndTable(); } - Int32VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + ExpandDimsOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ExpandDimsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct Int32VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Int32Vector::VT_VALUES, values); - } - explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct ExpandDimsOptionsBuilder { + typedef ExpandDimsOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ExpandDimsOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - Int32VectorBuilder &operator=(const Int32VectorBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateInt32Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Int32VectorBuilder builder_(_fbb); - builder_.add_values(values); +inline ::flatbuffers::Offset CreateExpandDimsOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + ExpandDimsOptionsBuilder builder_(_fbb); return builder_.Finish(); } -inline flatbuffers::Offset CreateInt32VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateInt32Vector( - _fbb, - values__); -} +::flatbuffers::Offset CreateExpandDimsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Uint16VectorT : public flatbuffers::NativeTable { - typedef Uint16Vector TableType; - std::vector values; - Uint16VectorT() { - } +struct SparseToDenseOptionsT : public ::flatbuffers::NativeTable { + typedef SparseToDenseOptions TableType; + bool validate_indices = false; }; -struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Uint16VectorT NativeTableType; +struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SparseToDenseOptionsT NativeTableType; + typedef SparseToDenseOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 + VT_VALIDATE_INDICES = 4 }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); + bool validate_indices() const { + return GetField(VT_VALIDATE_INDICES, 0) != 0; } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && + VerifyField(verifier, VT_VALIDATE_INDICES, 1) && verifier.EndTable(); } - Uint16VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + SparseToDenseOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SparseToDenseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct Uint16VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Uint16Vector::VT_VALUES, values); +struct SparseToDenseOptionsBuilder { + typedef SparseToDenseOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_validate_indices(bool validate_indices) { + fbb_.AddElement(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast(validate_indices), 0); } - explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit SparseToDenseOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - Uint16VectorBuilder &operator=(const Uint16VectorBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateUint16Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Uint16VectorBuilder builder_(_fbb); - builder_.add_values(values); +inline ::flatbuffers::Offset CreateSparseToDenseOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool validate_indices = false) { + SparseToDenseOptionsBuilder builder_(_fbb); + builder_.add_validate_indices(validate_indices); return builder_.Finish(); } -inline flatbuffers::Offset CreateUint16VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint16_t), 4); } - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateUint16Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateSparseToDenseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct Uint8VectorT : public flatbuffers::NativeTable { - typedef Uint8Vector TableType; - std::vector values; - Uint8VectorT() { - } +struct EqualOptionsT : public ::flatbuffers::NativeTable { + typedef EqualOptions TableType; }; -struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Uint8VectorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct EqualOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef EqualOptionsT NativeTableType; + typedef EqualOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && verifier.EndTable(); } - Uint8VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + EqualOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(EqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct Uint8VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Uint8Vector::VT_VALUES, values); - } - explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct EqualOptionsBuilder { + typedef EqualOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit EqualOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - Uint8VectorBuilder &operator=(const Uint8VectorBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateUint8Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Uint8VectorBuilder builder_(_fbb); - builder_.add_values(values); +inline ::flatbuffers::Offset CreateEqualOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + EqualOptionsBuilder builder_(_fbb); return builder_.Finish(); } -inline flatbuffers::Offset CreateUint8VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint8_t), 4); } - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateUint8Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct DimensionMetadataT : public flatbuffers::NativeTable { - typedef DimensionMetadata TableType; - tflite::DimensionType format; - int32_t dense_size; - tflite::SparseIndexVectorUnion array_segments; - tflite::SparseIndexVectorUnion array_indices; - DimensionMetadataT() - : format(tflite::DimensionType_DENSE), - dense_size(0) { - } +struct NotEqualOptionsT : public ::flatbuffers::NativeTable { + typedef NotEqualOptions TableType; }; -struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DimensionMetadataT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FORMAT = 4, - VT_DENSE_SIZE = 6, - VT_ARRAY_SEGMENTS_TYPE = 8, - VT_ARRAY_SEGMENTS = 10, - VT_ARRAY_INDICES_TYPE = 12, - VT_ARRAY_INDICES = 14 - }; - tflite::DimensionType format() const { - return static_cast(GetField(VT_FORMAT, 0)); - } - int32_t dense_size() const { - return GetField(VT_DENSE_SIZE, 0); - } - tflite::SparseIndexVector array_segments_type() const { - return static_cast(GetField(VT_ARRAY_SEGMENTS_TYPE, 0)); - } - const void *array_segments() const { - return GetPointer(VT_ARRAY_SEGMENTS); - } - template const T *array_segments_as() const; - const tflite::Int32Vector *array_segments_as_Int32Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_segments()) : nullptr; - } - const tflite::Uint16Vector *array_segments_as_Uint16Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_segments()) : nullptr; - } - const tflite::Uint8Vector *array_segments_as_Uint8Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_segments()) : nullptr; - } - tflite::SparseIndexVector array_indices_type() const { - return static_cast(GetField(VT_ARRAY_INDICES_TYPE, 0)); - } - const void *array_indices() const { - return GetPointer(VT_ARRAY_INDICES); - } - template const T *array_indices_as() const; - const tflite::Int32Vector *array_indices_as_Int32Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_indices()) : nullptr; - } - const tflite::Uint16Vector *array_indices_as_Uint16Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_indices()) : nullptr; - } - const tflite::Uint8Vector *array_indices_as_Uint8Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_indices()) : nullptr; - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef NotEqualOptionsT NativeTableType; + typedef NotEqualOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FORMAT) && - VerifyField(verifier, VT_DENSE_SIZE) && - VerifyField(verifier, VT_ARRAY_SEGMENTS_TYPE) && - VerifyOffset(verifier, VT_ARRAY_SEGMENTS) && - VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) && - VerifyField(verifier, VT_ARRAY_INDICES_TYPE) && - VerifyOffset(verifier, VT_ARRAY_INDICES) && - VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) && verifier.EndTable(); } - DimensionMetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + NotEqualOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NotEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -template<> inline const tflite::Int32Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Int32Vector(); -} - -template<> inline const tflite::Uint16Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Uint16Vector(); -} - -template<> inline const tflite::Uint8Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Uint8Vector(); -} +struct NotEqualOptionsBuilder { + typedef NotEqualOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit NotEqualOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; -template<> inline const tflite::Int32Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Int32Vector(); +inline ::flatbuffers::Offset CreateNotEqualOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + NotEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); } -template<> inline const tflite::Uint16Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Uint16Vector(); -} +::flatbuffers::Offset CreateNotEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -template<> inline const tflite::Uint8Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Uint8Vector(); -} +struct ShapeOptionsT : public ::flatbuffers::NativeTable { + typedef ShapeOptions TableType; + tflite::TensorType out_type = tflite::TensorType_FLOAT32; +}; -struct DimensionMetadataBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_format(tflite::DimensionType format) { - fbb_.AddElement(DimensionMetadata::VT_FORMAT, static_cast(format), 0); +struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ShapeOptionsT NativeTableType; + typedef ShapeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUT_TYPE = 4 + }; + tflite::TensorType out_type() const { + return static_cast(GetField(VT_OUT_TYPE, 0)); } - void add_dense_size(int32_t dense_size) { - fbb_.AddElement(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0); + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUT_TYPE, 1) && + verifier.EndTable(); } - void add_array_segments_type(tflite::SparseIndexVector array_segments_type) { - fbb_.AddElement(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, static_cast(array_segments_type), 0); + ShapeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ShapeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ShapeOptionsBuilder { + typedef ShapeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_out_type(tflite::TensorType out_type) { + fbb_.AddElement(ShapeOptions::VT_OUT_TYPE, static_cast(out_type), 0); } - void add_array_segments(flatbuffers::Offset array_segments) { - fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments); + explicit ShapeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - void add_array_indices_type(tflite::SparseIndexVector array_indices_type) { - fbb_.AddElement(DimensionMetadata::VT_ARRAY_INDICES_TYPE, static_cast(array_indices_type), 0); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_array_indices(flatbuffers::Offset array_indices) { - fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices); +}; + +inline ::flatbuffers::Offset CreateShapeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType out_type = tflite::TensorType_FLOAT32) { + ShapeOptionsBuilder builder_(_fbb); + builder_.add_out_type(out_type); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateShapeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RankOptionsT : public ::flatbuffers::NativeTable { + typedef RankOptions TableType; +}; + +struct RankOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef RankOptionsT NativeTableType; + typedef RankOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) + RankOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RankOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RankOptionsBuilder { + typedef RankOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit RankOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - DimensionMetadataBuilder &operator=(const DimensionMetadataBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateDimensionMetadata( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::DimensionType format = tflite::DimensionType_DENSE, - int32_t dense_size = 0, - tflite::SparseIndexVector array_segments_type = tflite::SparseIndexVector_NONE, - flatbuffers::Offset array_segments = 0, - tflite::SparseIndexVector array_indices_type = tflite::SparseIndexVector_NONE, - flatbuffers::Offset array_indices = 0) { - DimensionMetadataBuilder builder_(_fbb); - builder_.add_array_indices(array_indices); - builder_.add_array_segments(array_segments); - builder_.add_dense_size(dense_size); - builder_.add_array_indices_type(array_indices_type); - builder_.add_array_segments_type(array_segments_type); - builder_.add_format(format); +inline ::flatbuffers::Offset CreateRankOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + RankOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateRankOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SparsityParametersT : public flatbuffers::NativeTable { - typedef SparsityParameters TableType; - std::vector traversal_order; - std::vector block_map; - std::vector> dim_metadata; - SparsityParametersT() { - } +struct PowOptionsT : public ::flatbuffers::NativeTable { + typedef PowOptions TableType; }; -struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SparsityParametersT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TRAVERSAL_ORDER = 4, - VT_BLOCK_MAP = 6, - VT_DIM_METADATA = 8 - }; - const flatbuffers::Vector *traversal_order() const { - return GetPointer *>(VT_TRAVERSAL_ORDER); - } - const flatbuffers::Vector *block_map() const { - return GetPointer *>(VT_BLOCK_MAP); - } - const flatbuffers::Vector> *dim_metadata() const { - return GetPointer> *>(VT_DIM_METADATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct PowOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef PowOptionsT NativeTableType; + typedef PowOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && - verifier.VerifyVector(traversal_order()) && - VerifyOffset(verifier, VT_BLOCK_MAP) && - verifier.VerifyVector(block_map()) && - VerifyOffset(verifier, VT_DIM_METADATA) && - verifier.VerifyVector(dim_metadata()) && - verifier.VerifyVectorOfTables(dim_metadata()) && verifier.EndTable(); } - SparsityParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + PowOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SparsityParametersBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_traversal_order(flatbuffers::Offset> traversal_order) { - fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order); - } - void add_block_map(flatbuffers::Offset> block_map) { - fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map); - } - void add_dim_metadata(flatbuffers::Offset>> dim_metadata) { - fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata); - } - explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct PowOptionsBuilder { + typedef PowOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit PowOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SparsityParametersBuilder &operator=(const SparsityParametersBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSparsityParameters( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> traversal_order = 0, - flatbuffers::Offset> block_map = 0, - flatbuffers::Offset>> dim_metadata = 0) { - SparsityParametersBuilder builder_(_fbb); - builder_.add_dim_metadata(dim_metadata); - builder_.add_block_map(block_map); - builder_.add_traversal_order(traversal_order); +inline ::flatbuffers::Offset CreatePowOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + PowOptionsBuilder builder_(_fbb); return builder_.Finish(); } -inline flatbuffers::Offset CreateSparsityParametersDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *traversal_order = nullptr, - const std::vector *block_map = nullptr, - const std::vector> *dim_metadata = nullptr) { - auto traversal_order__ = traversal_order ? _fbb.CreateVector(*traversal_order) : 0; - auto block_map__ = block_map ? _fbb.CreateVector(*block_map) : 0; - auto dim_metadata__ = dim_metadata ? _fbb.CreateVector>(*dim_metadata) : 0; - return tflite::CreateSparsityParameters( - _fbb, - traversal_order__, - block_map__, - dim_metadata__); -} +::flatbuffers::Offset CreatePowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct FakeQuantOptionsT : public ::flatbuffers::NativeTable { + typedef FakeQuantOptions TableType; + float min = 0.0f; + float max = 0.0f; + int32_t num_bits = 0; + bool narrow_range = false; +}; -struct TensorT : public flatbuffers::NativeTable { - typedef Tensor TableType; - std::vector shape; - tflite::TensorType type; - uint32_t buffer; - std::string name; - std::unique_ptr quantization; - bool is_variable; - std::unique_ptr sparsity; - std::vector shape_signature; - TensorT() - : type(tflite::TensorType_FLOAT32), - buffer(0), - is_variable(false) { - } -}; - -struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorT NativeTableType; +struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef FakeQuantOptionsT NativeTableType; + typedef FakeQuantOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SHAPE = 4, - VT_TYPE = 6, - VT_BUFFER = 8, - VT_NAME = 10, - VT_QUANTIZATION = 12, - VT_IS_VARIABLE = 14, - VT_SPARSITY = 16, - VT_SHAPE_SIGNATURE = 18 + VT_MIN = 4, + VT_MAX = 6, + VT_NUM_BITS = 8, + VT_NARROW_RANGE = 10 }; - const flatbuffers::Vector *shape() const { - return GetPointer *>(VT_SHAPE); - } - tflite::TensorType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - uint32_t buffer() const { - return GetField(VT_BUFFER, 0); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - const tflite::QuantizationParameters *quantization() const { - return GetPointer(VT_QUANTIZATION); + float min() const { + return GetField(VT_MIN, 0.0f); } - bool is_variable() const { - return GetField(VT_IS_VARIABLE, 0) != 0; + float max() const { + return GetField(VT_MAX, 0.0f); } - const tflite::SparsityParameters *sparsity() const { - return GetPointer(VT_SPARSITY); + int32_t num_bits() const { + return GetField(VT_NUM_BITS, 0); } - const flatbuffers::Vector *shape_signature() const { - return GetPointer *>(VT_SHAPE_SIGNATURE); + bool narrow_range() const { + return GetField(VT_NARROW_RANGE, 0) != 0; } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SHAPE) && - verifier.VerifyVector(shape()) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_BUFFER) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyOffset(verifier, VT_QUANTIZATION) && - verifier.VerifyTable(quantization()) && - VerifyField(verifier, VT_IS_VARIABLE) && - VerifyOffset(verifier, VT_SPARSITY) && - verifier.VerifyTable(sparsity()) && - VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && - verifier.VerifyVector(shape_signature()) && + VerifyField(verifier, VT_MIN, 4) && + VerifyField(verifier, VT_MAX, 4) && + VerifyField(verifier, VT_NUM_BITS, 4) && + VerifyField(verifier, VT_NARROW_RANGE, 1) && verifier.EndTable(); } - TensorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + FakeQuantOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FakeQuantOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct TensorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_shape(flatbuffers::Offset> shape) { - fbb_.AddOffset(Tensor::VT_SHAPE, shape); - } - void add_type(tflite::TensorType type) { - fbb_.AddElement(Tensor::VT_TYPE, static_cast(type), 0); - } - void add_buffer(uint32_t buffer) { - fbb_.AddElement(Tensor::VT_BUFFER, buffer, 0); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Tensor::VT_NAME, name); - } - void add_quantization(flatbuffers::Offset quantization) { - fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization); +struct FakeQuantOptionsBuilder { + typedef FakeQuantOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_min(float min) { + fbb_.AddElement(FakeQuantOptions::VT_MIN, min, 0.0f); } - void add_is_variable(bool is_variable) { - fbb_.AddElement(Tensor::VT_IS_VARIABLE, static_cast(is_variable), 0); + void add_max(float max) { + fbb_.AddElement(FakeQuantOptions::VT_MAX, max, 0.0f); } - void add_sparsity(flatbuffers::Offset sparsity) { - fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity); + void add_num_bits(int32_t num_bits) { + fbb_.AddElement(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); } - void add_shape_signature(flatbuffers::Offset> shape_signature) { - fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature); + void add_narrow_range(bool narrow_range) { + fbb_.AddElement(FakeQuantOptions::VT_NARROW_RANGE, static_cast(narrow_range), 0); } - explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit FakeQuantOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - TensorBuilder &operator=(const TensorBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateTensor( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> shape = 0, - tflite::TensorType type = tflite::TensorType_FLOAT32, - uint32_t buffer = 0, - flatbuffers::Offset name = 0, - flatbuffers::Offset quantization = 0, - bool is_variable = false, - flatbuffers::Offset sparsity = 0, - flatbuffers::Offset> shape_signature = 0) { - TensorBuilder builder_(_fbb); - builder_.add_shape_signature(shape_signature); - builder_.add_sparsity(sparsity); - builder_.add_quantization(quantization); - builder_.add_name(name); - builder_.add_buffer(buffer); - builder_.add_shape(shape); - builder_.add_is_variable(is_variable); - builder_.add_type(type); +inline ::flatbuffers::Offset CreateFakeQuantOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + float min = 0.0f, + float max = 0.0f, + int32_t num_bits = 0, + bool narrow_range = false) { + FakeQuantOptionsBuilder builder_(_fbb); + builder_.add_num_bits(num_bits); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_narrow_range(narrow_range); return builder_.Finish(); } -inline flatbuffers::Offset CreateTensorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *shape = nullptr, - tflite::TensorType type = tflite::TensorType_FLOAT32, - uint32_t buffer = 0, - const char *name = nullptr, - flatbuffers::Offset quantization = 0, - bool is_variable = false, - flatbuffers::Offset sparsity = 0, - const std::vector *shape_signature = nullptr) { - auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - auto shape_signature__ = shape_signature ? _fbb.CreateVector(*shape_signature) : 0; - return tflite::CreateTensor( - _fbb, - shape__, - type, - buffer, - name__, - quantization, - is_variable, - sparsity, - shape_signature__); -} +::flatbuffers::Offset CreateFakeQuantOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct PackOptionsT : public ::flatbuffers::NativeTable { + typedef PackOptions TableType; + int32_t values_count = 0; + int32_t axis = 0; +}; -struct Conv2DOptionsT : public flatbuffers::NativeTable { - typedef Conv2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - tflite::ActivationFunctionType fused_activation_function; - int32_t dilation_w_factor; - int32_t dilation_h_factor; - Conv2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - dilation_w_factor(1), - dilation_h_factor(1) { - } -}; - -struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Conv2DOptionsT NativeTableType; +struct PackOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef PackOptionsT NativeTableType; + typedef PackOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_FUSED_ACTIVATION_FUNCTION = 10, - VT_DILATION_W_FACTOR = 12, - VT_DILATION_H_FACTOR = 14 + VT_VALUES_COUNT = 4, + VT_AXIS = 6 }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); + int32_t values_count() const { + return GetField(VT_VALUES_COUNT, 0); } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); + int32_t axis() const { + return GetField(VT_AXIS, 0); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_DILATION_W_FACTOR) && - VerifyField(verifier, VT_DILATION_H_FACTOR) && + VerifyField(verifier, VT_VALUES_COUNT, 4) && + VerifyField(verifier, VT_AXIS, 4) && verifier.EndTable(); } - Conv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + PackOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PackOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct Conv2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Conv2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Conv2DOptions::VT_STRIDE_W, stride_w, 0); +struct PackOptionsBuilder { + typedef PackOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_values_count(int32_t values_count) { + fbb_.AddElement(PackOptions::VT_VALUES_COUNT, values_count, 0); } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Conv2DOptions::VT_STRIDE_H, stride_h, 0); + void add_axis(int32_t axis) { + fbb_.AddElement(PackOptions::VT_AXIS, axis, 0); } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + explicit PackOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); +}; + +inline ::flatbuffers::Offset CreatePackOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t values_count = 0, + int32_t axis = 0) { + PackOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_values_count(values_count); + return builder_.Finish(); +} + +::flatbuffers::Offset CreatePackOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LogicalOrOptionsT : public ::flatbuffers::NativeTable { + typedef LogicalOrOptions TableType; +}; + +struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LogicalOrOptionsT NativeTableType; + typedef LogicalOrOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + LogicalOrOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LogicalOrOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LogicalOrOptionsBuilder { + typedef LogicalOrOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalOrOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateConv2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - Conv2DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); +inline ::flatbuffers::Offset CreateLogicalOrOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + LogicalOrOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateLogicalOrOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct Pool2DOptionsT : public flatbuffers::NativeTable { - typedef Pool2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - int32_t filter_width; - int32_t filter_height; - tflite::ActivationFunctionType fused_activation_function; - Pool2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - filter_width(0), - filter_height(0), - fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Pool2DOptionsT NativeTableType; +struct OneHotOptionsT : public ::flatbuffers::NativeTable { + typedef OneHotOptions TableType; + int32_t axis = 0; +}; + +struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef OneHotOptionsT NativeTableType; + typedef OneHotOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_FILTER_WIDTH = 10, - VT_FILTER_HEIGHT = 12, - VT_FUSED_ACTIVATION_FUNCTION = 14 + VT_AXIS = 4 }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - int32_t filter_width() const { - return GetField(VT_FILTER_WIDTH, 0); - } - int32_t filter_height() const { - return GetField(VT_FILTER_HEIGHT, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + int32_t axis() const { + return GetField(VT_AXIS, 0); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_FILTER_WIDTH) && - VerifyField(verifier, VT_FILTER_HEIGHT) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField(verifier, VT_AXIS, 4) && verifier.EndTable(); } - Pool2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + OneHotOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OneHotOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct Pool2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Pool2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Pool2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Pool2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_filter_width(int32_t filter_width) { - fbb_.AddElement(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0); - } - void add_filter_height(int32_t filter_height) { - fbb_.AddElement(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); +struct OneHotOptionsBuilder { + typedef OneHotOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(OneHotOptions::VT_AXIS, axis, 0); } - explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit OneHotOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreatePool2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - int32_t filter_width = 0, - int32_t filter_height = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - Pool2DOptionsBuilder builder_(_fbb); - builder_.add_filter_height(filter_height); - builder_.add_filter_width(filter_width); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); +inline ::flatbuffers::Offset CreateOneHotOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0) { + OneHotOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); return builder_.Finish(); } -flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateOneHotOptions(::flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable { - typedef DepthwiseConv2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - int32_t depth_multiplier; - tflite::ActivationFunctionType fused_activation_function; - int32_t dilation_w_factor; - int32_t dilation_h_factor; - DepthwiseConv2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - depth_multiplier(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - dilation_w_factor(1), - dilation_h_factor(1) { - } -}; - -struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthwiseConv2DOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_DEPTH_MULTIPLIER = 10, - VT_FUSED_ACTIVATION_FUNCTION = 12, - VT_DILATION_W_FACTOR = 14, - VT_DILATION_H_FACTOR = 16 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - int32_t depth_multiplier() const { - return GetField(VT_DEPTH_MULTIPLIER, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); +struct AbsOptionsT : public ::flatbuffers::NativeTable { + typedef AbsOptions TableType; +}; + +struct AbsOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef AbsOptionsT NativeTableType; + typedef AbsOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); + AbsOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AbsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AbsOptionsBuilder { + typedef AbsOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit AbsOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - bool Verify(flatbuffers::Verifier &verifier) const { +}; + +inline ::flatbuffers::Offset CreateAbsOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + AbsOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateAbsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HardSwishOptionsT : public ::flatbuffers::NativeTable { + typedef HardSwishOptions TableType; +}; + +struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef HardSwishOptionsT NativeTableType; + typedef HardSwishOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_DEPTH_MULTIPLIER) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_DILATION_W_FACTOR) && - VerifyField(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable(); } - DepthwiseConv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + HardSwishOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HardSwishOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct DepthwiseConv2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0); +struct HardSwishOptionsBuilder { + typedef HardSwishOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit HardSwishOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_depth_multiplier(int32_t depth_multiplier) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0); +}; + +inline ::flatbuffers::Offset CreateHardSwishOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + HardSwishOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateHardSwishOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LogicalAndOptionsT : public ::flatbuffers::NativeTable { + typedef LogicalAndOptions TableType; +}; + +struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LogicalAndOptionsT NativeTableType; + typedef LogicalAndOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + LogicalAndOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LogicalAndOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LogicalAndOptionsBuilder { + typedef LogicalAndOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalAndOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); +}; + +inline ::flatbuffers::Offset CreateLogicalAndOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + LogicalAndOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateLogicalAndOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LogicalNotOptionsT : public ::flatbuffers::NativeTable { + typedef LogicalNotOptions TableType; +}; + +struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LogicalNotOptionsT NativeTableType; + typedef LogicalNotOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + LogicalNotOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LogicalNotOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LogicalNotOptionsBuilder { + typedef LogicalNotOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit LogicalNotOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateDepthwiseConv2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - int32_t depth_multiplier = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - DepthwiseConv2DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_depth_multiplier(depth_multiplier); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); +inline ::flatbuffers::Offset CreateLogicalNotOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + LogicalNotOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateLogicalNotOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable { - typedef ConcatEmbeddingsOptions TableType; - int32_t num_channels; - std::vector num_columns_per_channel; - std::vector embedding_dim_per_channel; - ConcatEmbeddingsOptionsT() - : num_channels(0) { - } +struct UnpackOptionsT : public ::flatbuffers::NativeTable { + typedef UnpackOptions TableType; + int32_t num = 0; + int32_t axis = 0; }; -struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ConcatEmbeddingsOptionsT NativeTableType; +struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef UnpackOptionsT NativeTableType; + typedef UnpackOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_CHANNELS = 4, - VT_NUM_COLUMNS_PER_CHANNEL = 6, - VT_EMBEDDING_DIM_PER_CHANNEL = 8 + VT_NUM = 4, + VT_AXIS = 6 }; - int32_t num_channels() const { - return GetField(VT_NUM_CHANNELS, 0); + int32_t num() const { + return GetField(VT_NUM, 0); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM, 4) && + VerifyField(verifier, VT_AXIS, 4) && + verifier.EndTable(); + } + UnpackOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnpackOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnpackOptionsBuilder { + typedef UnpackOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_num(int32_t num) { + fbb_.AddElement(UnpackOptions::VT_NUM, num, 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(UnpackOptions::VT_AXIS, axis, 0); } - const flatbuffers::Vector *num_columns_per_channel() const { - return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); + explicit UnpackOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const flatbuffers::Vector *embedding_dim_per_channel() const { - return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - bool Verify(flatbuffers::Verifier &verifier) const { +}; + +inline ::flatbuffers::Offset CreateUnpackOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t num = 0, + int32_t axis = 0) { + UnpackOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_num(num); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateUnpackOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FloorDivOptionsT : public ::flatbuffers::NativeTable { + typedef FloorDivOptions TableType; +}; + +struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef FloorDivOptionsT NativeTableType; + typedef FloorDivOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_CHANNELS) && - VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) && - verifier.VerifyVector(num_columns_per_channel()) && - VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) && - verifier.VerifyVector(embedding_dim_per_channel()) && verifier.EndTable(); } - ConcatEmbeddingsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + FloorDivOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FloorDivOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ConcatEmbeddingsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_channels(int32_t num_channels) { - fbb_.AddElement(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0); - } - void add_num_columns_per_channel(flatbuffers::Offset> num_columns_per_channel) { - fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel); - } - void add_embedding_dim_per_channel(flatbuffers::Offset> embedding_dim_per_channel) { - fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, embedding_dim_per_channel); - } - explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct FloorDivOptionsBuilder { + typedef FloorDivOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit FloorDivOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateConcatEmbeddingsOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_channels = 0, - flatbuffers::Offset> num_columns_per_channel = 0, - flatbuffers::Offset> embedding_dim_per_channel = 0) { - ConcatEmbeddingsOptionsBuilder builder_(_fbb); - builder_.add_embedding_dim_per_channel(embedding_dim_per_channel); - builder_.add_num_columns_per_channel(num_columns_per_channel); - builder_.add_num_channels(num_channels); +inline ::flatbuffers::Offset CreateFloorDivOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + FloorDivOptionsBuilder builder_(_fbb); return builder_.Finish(); } -inline flatbuffers::Offset CreateConcatEmbeddingsOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_channels = 0, - const std::vector *num_columns_per_channel = nullptr, - const std::vector *embedding_dim_per_channel = nullptr) { - auto num_columns_per_channel__ = num_columns_per_channel ? _fbb.CreateVector(*num_columns_per_channel) : 0; - auto embedding_dim_per_channel__ = embedding_dim_per_channel ? _fbb.CreateVector(*embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( - _fbb, - num_channels, - num_columns_per_channel__, - embedding_dim_per_channel__); -} - -flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateFloorDivOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct LSHProjectionOptionsT : public flatbuffers::NativeTable { - typedef LSHProjectionOptions TableType; - tflite::LSHProjectionType type; - LSHProjectionOptionsT() - : type(tflite::LSHProjectionType_UNKNOWN) { - } +struct SquareOptionsT : public ::flatbuffers::NativeTable { + typedef SquareOptions TableType; }; -struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSHProjectionOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4 - }; - tflite::LSHProjectionType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct SquareOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SquareOptionsT NativeTableType; + typedef SquareOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE) && verifier.EndTable(); } - LSHProjectionOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + SquareOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SquareOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct LSHProjectionOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(tflite::LSHProjectionType type) { - fbb_.AddElement(LSHProjectionOptions::VT_TYPE, static_cast(type), 0); - } - explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct SquareOptionsBuilder { + typedef SquareOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SquareOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateLSHProjectionOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN) { - LSHProjectionOptionsBuilder builder_(_fbb); - builder_.add_type(type); +inline ::flatbuffers::Offset CreateSquareOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + SquareOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateSquareOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SVDFOptionsT : public flatbuffers::NativeTable { - typedef SVDFOptions TableType; - int32_t rank; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - SVDFOptionsT() - : rank(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } +struct ZerosLikeOptionsT : public ::flatbuffers::NativeTable { + typedef ZerosLikeOptions TableType; }; -struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SVDFOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_RANK = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - int32_t rank() const { - return GetField(VT_RANK, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ZerosLikeOptionsT NativeTableType; + typedef ZerosLikeOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_RANK) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); } - SVDFOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + ZerosLikeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ZerosLikeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SVDFOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_rank(int32_t rank) { - fbb_.AddElement(SVDFOptions::VT_RANK, rank, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct ZerosLikeOptionsBuilder { + typedef ZerosLikeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ZerosLikeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSVDFOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t rank = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - SVDFOptionsBuilder builder_(_fbb); - builder_.add_rank(rank); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateZerosLikeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + ZerosLikeOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateZerosLikeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct RNNOptionsT : public flatbuffers::NativeTable { - typedef RNNOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - RNNOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } +struct FillOptionsT : public ::flatbuffers::NativeTable { + typedef FillOptions TableType; }; -struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct FillOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef FillOptionsT NativeTableType; + typedef FillOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); } - RNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + FillOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FillOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct RNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct FillOptionsBuilder { + typedef FillOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit FillOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - RNNOptionsBuilder &operator=(const RNNOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - RNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateFillOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + FillOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateFillOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SequenceRNNOptionsT : public flatbuffers::NativeTable { - typedef SequenceRNNOptions TableType; - bool time_major; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - SequenceRNNOptionsT() - : time_major(false), - fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } +struct FloorModOptionsT : public ::flatbuffers::NativeTable { + typedef FloorModOptions TableType; }; -struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SequenceRNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef FloorModOptionsT NativeTableType; + typedef FloorModOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); } - SequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + FloorModOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FloorModOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SequenceRNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_time_major(bool time_major) { - fbb_.AddElement(SequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct FloorModOptionsBuilder { + typedef FloorModOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit FloorModOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSequenceRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool time_major = false, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - SequenceRNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_time_major(time_major); +inline ::flatbuffers::Offset CreateFloorModOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + FloorModOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateFloorModOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct BidirectionalSequenceRNNOptionsT : public flatbuffers::NativeTable { - typedef BidirectionalSequenceRNNOptions TableType; - bool time_major; - tflite::ActivationFunctionType fused_activation_function; - bool merge_outputs; - bool asymmetric_quantize_inputs; - BidirectionalSequenceRNNOptionsT() - : time_major(false), - fused_activation_function(tflite::ActivationFunctionType_NONE), - merge_outputs(false), - asymmetric_quantize_inputs(false) { - } +struct RangeOptionsT : public ::flatbuffers::NativeTable { + typedef RangeOptions TableType; }; -struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BidirectionalSequenceRNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_MERGE_OUTPUTS = 8, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 - }; - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool merge_outputs() const { - return GetField(VT_MERGE_OUTPUTS, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct RangeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef RangeOptionsT NativeTableType; + typedef RangeOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_MERGE_OUTPUTS) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); } - BidirectionalSequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + RangeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RangeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct BidirectionalSequenceRNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_time_major(bool time_major) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_merge_outputs(bool merge_outputs) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct RangeOptionsBuilder { + typedef RangeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit RangeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool time_major = false, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool merge_outputs = false, - bool asymmetric_quantize_inputs = false) { - BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_merge_outputs(merge_outputs); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_time_major(time_major); +inline ::flatbuffers::Offset CreateRangeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + RangeOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateRangeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct FullyConnectedOptionsT : public flatbuffers::NativeTable { - typedef FullyConnectedOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - tflite::FullyConnectedOptionsWeightsFormat weights_format; - bool keep_num_dims; - bool asymmetric_quantize_inputs; - FullyConnectedOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - weights_format(tflite::FullyConnectedOptionsWeightsFormat_DEFAULT), - keep_num_dims(false), - asymmetric_quantize_inputs(false) { - } +struct LeakyReluOptionsT : public ::flatbuffers::NativeTable { + typedef LeakyReluOptions TableType; + float alpha = 0.0f; }; -struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FullyConnectedOptionsT NativeTableType; +struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef LeakyReluOptionsT NativeTableType; + typedef LeakyReluOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_WEIGHTS_FORMAT = 6, - VT_KEEP_NUM_DIMS = 8, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 + VT_ALPHA = 4 }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - tflite::FullyConnectedOptionsWeightsFormat weights_format() const { - return static_cast(GetField(VT_WEIGHTS_FORMAT, 0)); - } - bool keep_num_dims() const { - return GetField(VT_KEEP_NUM_DIMS, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + float alpha() const { + return GetField(VT_ALPHA, 0.0f); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_WEIGHTS_FORMAT) && - VerifyField(verifier, VT_KEEP_NUM_DIMS) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && + VerifyField(verifier, VT_ALPHA, 4) && verifier.EndTable(); } - FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + LeakyReluOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LeakyReluOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct FullyConnectedOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); +struct LeakyReluOptionsBuilder { + typedef LeakyReluOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { + fbb_.AddElement(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); } - void add_weights_format(tflite::FullyConnectedOptionsWeightsFormat weights_format) { - fbb_.AddElement(FullyConnectedOptions::VT_WEIGHTS_FORMAT, static_cast(weights_format), 0); + explicit LeakyReluOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - void add_keep_num_dims(bool keep_num_dims) { - fbb_.AddElement(FullyConnectedOptions::VT_KEEP_NUM_DIMS, static_cast(keep_num_dims), 0); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); +}; + +inline ::flatbuffers::Offset CreateLeakyReluOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + float alpha = 0.0f) { + LeakyReluOptionsBuilder builder_(_fbb); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateLeakyReluOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SquaredDifferenceOptionsT : public ::flatbuffers::NativeTable { + typedef SquaredDifferenceOptions TableType; +}; + +struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SquaredDifferenceOptionsT NativeTableType; + typedef SquaredDifferenceOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + SquaredDifferenceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SquaredDifferenceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SquaredDifferenceOptionsBuilder { + typedef SquaredDifferenceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SquaredDifferenceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateFullyConnectedOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT, - bool keep_num_dims = false, - bool asymmetric_quantize_inputs = false) { - FullyConnectedOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_keep_num_dims(keep_num_dims); - builder_.add_weights_format(weights_format); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateSquaredDifferenceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + SquaredDifferenceOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateSquaredDifferenceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SoftmaxOptionsT : public flatbuffers::NativeTable { - typedef SoftmaxOptions TableType; - float beta; - SoftmaxOptionsT() - : beta(0.0f) { - } +struct MirrorPadOptionsT : public ::flatbuffers::NativeTable { + typedef MirrorPadOptions TableType; + tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT; }; -struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SoftmaxOptionsT NativeTableType; +struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef MirrorPadOptionsT NativeTableType; + typedef MirrorPadOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BETA = 4 + VT_MODE = 4 }; - float beta() const { - return GetField(VT_BETA, 0.0f); + tflite::MirrorPadMode mode() const { + return static_cast(GetField(VT_MODE, 0)); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BETA) && + VerifyField(verifier, VT_MODE, 1) && verifier.EndTable(); } - SoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + MirrorPadOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MirrorPadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SoftmaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_beta(float beta) { - fbb_.AddElement(SoftmaxOptions::VT_BETA, beta, 0.0f); +struct MirrorPadOptionsBuilder { + typedef MirrorPadOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_mode(tflite::MirrorPadMode mode) { + fbb_.AddElement(MirrorPadOptions::VT_MODE, static_cast(mode), 0); } - explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit MirrorPadOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSoftmaxOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float beta = 0.0f) { - SoftmaxOptionsBuilder builder_(_fbb); - builder_.add_beta(beta); +inline ::flatbuffers::Offset CreateMirrorPadOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT) { + MirrorPadOptionsBuilder builder_(_fbb); + builder_.add_mode(mode); return builder_.Finish(); } -flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateMirrorPadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ConcatenationOptionsT : public flatbuffers::NativeTable { - typedef ConcatenationOptions TableType; - int32_t axis; - tflite::ActivationFunctionType fused_activation_function; - ConcatenationOptionsT() - : axis(0), - fused_activation_function(tflite::ActivationFunctionType_NONE) { - } +struct UniqueOptionsT : public ::flatbuffers::NativeTable { + typedef UniqueOptions TableType; + tflite::TensorType idx_out_type = tflite::TensorType_INT32; }; -struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ConcatenationOptionsT NativeTableType; +struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef UniqueOptionsT NativeTableType; + typedef UniqueOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6 + VT_IDX_OUT_TYPE = 4 }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + tflite::TensorType idx_out_type() const { + return static_cast(GetField(VT_IDX_OUT_TYPE, 2)); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField(verifier, VT_IDX_OUT_TYPE, 1) && verifier.EndTable(); } - ConcatenationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + UniqueOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UniqueOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ConcatenationOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(ConcatenationOptions::VT_AXIS, axis, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); +struct UniqueOptionsBuilder { + typedef UniqueOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_idx_out_type(tflite::TensorType idx_out_type) { + fbb_.AddElement(UniqueOptions::VT_IDX_OUT_TYPE, static_cast(idx_out_type), 2); } - explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit UniqueOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateConcatenationOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - ConcatenationOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateUniqueOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType idx_out_type = tflite::TensorType_INT32) { + UniqueOptionsBuilder builder_(_fbb); + builder_.add_idx_out_type(idx_out_type); return builder_.Finish(); } -flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateUniqueOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct AddOptionsT : public flatbuffers::NativeTable { - typedef AddOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool pot_scale_int16; - AddOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - pot_scale_int16(true) { +struct ReverseV2OptionsT : public ::flatbuffers::NativeTable { + typedef ReverseV2Options TableType; +}; + +struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ReverseV2OptionsT NativeTableType; + typedef ReverseV2OptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } + ReverseV2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReverseV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AddOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_POT_SCALE_INT16 = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); +struct ReverseV2OptionsBuilder { + typedef ReverseV2Options Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ReverseV2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - bool pot_scale_int16() const { - return GetField(VT_POT_SCALE_INT16, 1) != 0; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - bool Verify(flatbuffers::Verifier &verifier) const { +}; + +inline ::flatbuffers::Offset CreateReverseV2Options( + ::flatbuffers::FlatBufferBuilder &_fbb) { + ReverseV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateReverseV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AddNOptionsT : public ::flatbuffers::NativeTable { + typedef AddNOptions TableType; +}; + +struct AddNOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef AddNOptionsT NativeTableType; + typedef AddNOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_POT_SCALE_INT16) && verifier.EndTable(); } - AddOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + AddNOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AddNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct AddOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_pot_scale_int16(bool pot_scale_int16) { - fbb_.AddElement(AddOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); - } - explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct AddNOptionsBuilder { + typedef AddNOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit AddNOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - AddOptionsBuilder &operator=(const AddOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateAddOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool pot_scale_int16 = true) { - AddOptionsBuilder builder_(_fbb); - builder_.add_pot_scale_int16(pot_scale_int16); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateAddNOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + AddNOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateAddNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct MulOptionsT : public flatbuffers::NativeTable { - typedef MulOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - MulOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } +struct GatherNdOptionsT : public ::flatbuffers::NativeTable { + typedef GatherNdOptions TableType; }; -struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MulOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef GatherNdOptionsT NativeTableType; + typedef GatherNdOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); } - MulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + GatherNdOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GatherNdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct MulOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct GatherNdOptionsBuilder { + typedef GatherNdOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit GatherNdOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - MulOptionsBuilder &operator=(const MulOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateMulOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - MulOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateGatherNdOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + GatherNdOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateGatherNdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct L2NormOptionsT : public flatbuffers::NativeTable { - typedef L2NormOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - L2NormOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } +struct WhereOptionsT : public ::flatbuffers::NativeTable { + typedef WhereOptions TableType; }; -struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef L2NormOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct WhereOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef WhereOptionsT NativeTableType; + typedef WhereOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); } - L2NormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + WhereOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(WhereOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct L2NormOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct WhereOptionsBuilder { + typedef WhereOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit WhereOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateL2NormOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - L2NormOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateWhereOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + WhereOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateWhereOptions(::flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable { - typedef LocalResponseNormalizationOptions TableType; - int32_t radius; - float bias; - float alpha; - float beta; - LocalResponseNormalizationOptionsT() - : radius(0), - bias(0.0f), - alpha(0.0f), - beta(0.0f) { - } +struct ReverseSequenceOptionsT : public ::flatbuffers::NativeTable { + typedef ReverseSequenceOptions TableType; + int32_t seq_dim = 0; + int32_t batch_dim = 0; }; -struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LocalResponseNormalizationOptionsT NativeTableType; +struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ReverseSequenceOptionsT NativeTableType; + typedef ReverseSequenceOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_RADIUS = 4, - VT_BIAS = 6, - VT_ALPHA = 8, - VT_BETA = 10 + VT_SEQ_DIM = 4, + VT_BATCH_DIM = 6 }; - int32_t radius() const { - return GetField(VT_RADIUS, 0); - } - float bias() const { - return GetField(VT_BIAS, 0.0f); - } - float alpha() const { - return GetField(VT_ALPHA, 0.0f); + int32_t seq_dim() const { + return GetField(VT_SEQ_DIM, 0); } - float beta() const { - return GetField(VT_BETA, 0.0f); + int32_t batch_dim() const { + return GetField(VT_BATCH_DIM, 0); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_RADIUS) && - VerifyField(verifier, VT_BIAS) && - VerifyField(verifier, VT_ALPHA) && - VerifyField(verifier, VT_BETA) && + VerifyField(verifier, VT_SEQ_DIM, 4) && + VerifyField(verifier, VT_BATCH_DIM, 4) && verifier.EndTable(); } - LocalResponseNormalizationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + ReverseSequenceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReverseSequenceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct LocalResponseNormalizationOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_radius(int32_t radius) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0); - } - void add_bias(float bias) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f); - } - void add_alpha(float alpha) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f); +struct ReverseSequenceOptionsBuilder { + typedef ReverseSequenceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_seq_dim(int32_t seq_dim) { + fbb_.AddElement(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); } - void add_beta(float beta) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f); + void add_batch_dim(int32_t batch_dim) { + fbb_.AddElement(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); } - explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit ReverseSequenceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - LocalResponseNormalizationOptionsBuilder &operator=(const LocalResponseNormalizationOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t radius = 0, - float bias = 0.0f, - float alpha = 0.0f, - float beta = 0.0f) { - LocalResponseNormalizationOptionsBuilder builder_(_fbb); - builder_.add_beta(beta); - builder_.add_alpha(alpha); - builder_.add_bias(bias); - builder_.add_radius(radius); +inline ::flatbuffers::Offset CreateReverseSequenceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t seq_dim = 0, + int32_t batch_dim = 0) { + ReverseSequenceOptionsBuilder builder_(_fbb); + builder_.add_batch_dim(batch_dim); + builder_.add_seq_dim(seq_dim); return builder_.Finish(); } -flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateReverseSequenceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct LSTMOptionsT : public flatbuffers::NativeTable { - typedef LSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - tflite::LSTMKernelType kernel_type; - bool asymmetric_quantize_inputs; - LSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - kernel_type(tflite::LSTMKernelType_FULL), - asymmetric_quantize_inputs(false) { - } +struct MatrixDiagOptionsT : public ::flatbuffers::NativeTable { + typedef MatrixDiagOptions TableType; }; -struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSTMOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_KERNEL_TYPE = 10, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); +struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef MatrixDiagOptionsT NativeTableType; + typedef MatrixDiagOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - tflite::LSTMKernelType kernel_type() const { - return static_cast(GetField(VT_KERNEL_TYPE, 0)); + MatrixDiagOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MatrixDiagOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MatrixDiagOptionsBuilder { + typedef MatrixDiagOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit MatrixDiagOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - bool Verify(flatbuffers::Verifier &verifier) const { +}; + +inline ::flatbuffers::Offset CreateMatrixDiagOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + MatrixDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateMatrixDiagOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizeOptionsT : public ::flatbuffers::NativeTable { + typedef QuantizeOptions TableType; +}; + +struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef QuantizeOptionsT NativeTableType; + typedef QuantizeOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_KERNEL_TYPE) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); } - LSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + QuantizeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct LSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_kernel_type(tflite::LSTMKernelType kernel_type) { - fbb_.AddElement(LSTMOptions::VT_KERNEL_TYPE, static_cast(kernel_type), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct QuantizeOptionsBuilder { + typedef QuantizeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit QuantizeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL, - bool asymmetric_quantize_inputs = false) { - LSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_kernel_type(kernel_type); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateQuantizeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + QuantizeOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateQuantizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct UnidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { - typedef UnidirectionalSequenceLSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - bool time_major; - bool asymmetric_quantize_inputs; - UnidirectionalSequenceLSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - time_major(false), - asymmetric_quantize_inputs(false) { - } +struct MatrixSetDiagOptionsT : public ::flatbuffers::NativeTable { + typedef MatrixSetDiagOptions TableType; }; -struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnidirectionalSequenceLSTMOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_TIME_MAJOR = 10, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef MatrixSetDiagOptionsT NativeTableType; + typedef MatrixSetDiagOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); } - UnidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + MatrixSetDiagOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MatrixSetDiagOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct UnidirectionalSequenceLSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_time_major(bool time_major) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct MatrixSetDiagOptionsBuilder { + typedef MatrixSetDiagOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit MatrixSetDiagOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - UnidirectionalSequenceLSTMOptionsBuilder &operator=(const UnidirectionalSequenceLSTMOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - bool time_major = false, - bool asymmetric_quantize_inputs = false) { - UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_time_major(time_major); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateMatrixSetDiagOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + MatrixSetDiagOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateMatrixSetDiagOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct BidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { - typedef BidirectionalSequenceLSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - bool merge_outputs; - bool time_major; - bool asymmetric_quantize_inputs; - BidirectionalSequenceLSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - merge_outputs(false), - time_major(true), - asymmetric_quantize_inputs(false) { - } -}; - -struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BidirectionalSequenceLSTMOptionsT NativeTableType; +struct IfOptionsT : public ::flatbuffers::NativeTable { + typedef IfOptions TableType; + int32_t then_subgraph_index = 0; + int32_t else_subgraph_index = 0; +}; + +struct IfOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef IfOptionsT NativeTableType; + typedef IfOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_MERGE_OUTPUTS = 10, - VT_TIME_MAJOR = 12, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 14 + VT_THEN_SUBGRAPH_INDEX = 4, + VT_ELSE_SUBGRAPH_INDEX = 6 }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - bool merge_outputs() const { - return GetField(VT_MERGE_OUTPUTS, 0) != 0; - } - bool time_major() const { - return GetField(VT_TIME_MAJOR, 1) != 0; + int32_t then_subgraph_index() const { + return GetField(VT_THEN_SUBGRAPH_INDEX, 0); } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + int32_t else_subgraph_index() const { + return GetField(VT_ELSE_SUBGRAPH_INDEX, 0); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_MERGE_OUTPUTS) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && + VerifyField(verifier, VT_THEN_SUBGRAPH_INDEX, 4) && + VerifyField(verifier, VT_ELSE_SUBGRAPH_INDEX, 4) && verifier.EndTable(); } - BidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + IfOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(IfOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct BidirectionalSequenceLSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_merge_outputs(bool merge_outputs) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); - } - void add_time_major(bool time_major) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 1); +struct IfOptionsBuilder { + typedef IfOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_then_subgraph_index(int32_t then_subgraph_index) { + fbb_.AddElement(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + void add_else_subgraph_index(int32_t else_subgraph_index) { + fbb_.AddElement(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); } - explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit IfOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - BidirectionalSequenceLSTMOptionsBuilder &operator=(const BidirectionalSequenceLSTMOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - bool merge_outputs = false, - bool time_major = true, - bool asymmetric_quantize_inputs = false) { - BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_time_major(time_major); - builder_.add_merge_outputs(merge_outputs); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateIfOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t then_subgraph_index = 0, + int32_t else_subgraph_index = 0) { + IfOptionsBuilder builder_(_fbb); + builder_.add_else_subgraph_index(else_subgraph_index); + builder_.add_then_subgraph_index(then_subgraph_index); return builder_.Finish(); } -flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateIfOptions(::flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ResizeBilinearOptionsT : public flatbuffers::NativeTable { - typedef ResizeBilinearOptions TableType; - bool align_corners; - bool half_pixel_centers; - ResizeBilinearOptionsT() - : align_corners(false), - half_pixel_centers(false) { - } +struct CallOnceOptionsT : public ::flatbuffers::NativeTable { + typedef CallOnceOptions TableType; + int32_t init_subgraph_index = 0; }; -struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeBilinearOptionsT NativeTableType; +struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef CallOnceOptionsT NativeTableType; + typedef CallOnceOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGN_CORNERS = 8, - VT_HALF_PIXEL_CENTERS = 10 + VT_INIT_SUBGRAPH_INDEX = 4 }; - bool align_corners() const { - return GetField(VT_ALIGN_CORNERS, 0) != 0; - } - bool half_pixel_centers() const { - return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; + int32_t init_subgraph_index() const { + return GetField(VT_INIT_SUBGRAPH_INDEX, 0); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGN_CORNERS) && - VerifyField(verifier, VT_HALF_PIXEL_CENTERS) && + VerifyField(verifier, VT_INIT_SUBGRAPH_INDEX, 4) && verifier.EndTable(); } - ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + CallOnceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CallOnceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ResizeBilinearOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_align_corners(bool align_corners) { - fbb_.AddElement(ResizeBilinearOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); - } - void add_half_pixel_centers(bool half_pixel_centers) { - fbb_.AddElement(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); +struct CallOnceOptionsBuilder { + typedef CallOnceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_init_subgraph_index(int32_t init_subgraph_index) { + fbb_.AddElement(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0); } - explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit CallOnceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateResizeBilinearOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool align_corners = false, - bool half_pixel_centers = false) { - ResizeBilinearOptionsBuilder builder_(_fbb); - builder_.add_half_pixel_centers(half_pixel_centers); - builder_.add_align_corners(align_corners); +inline ::flatbuffers::Offset CreateCallOnceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t init_subgraph_index = 0) { + CallOnceOptionsBuilder builder_(_fbb); + builder_.add_init_subgraph_index(init_subgraph_index); return builder_.Finish(); } -flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateCallOnceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ResizeNearestNeighborOptionsT : public flatbuffers::NativeTable { - typedef ResizeNearestNeighborOptions TableType; - bool align_corners; - bool half_pixel_centers; - ResizeNearestNeighborOptionsT() - : align_corners(false), - half_pixel_centers(false) { - } +struct WhileOptionsT : public ::flatbuffers::NativeTable { + typedef WhileOptions TableType; + int32_t cond_subgraph_index = 0; + int32_t body_subgraph_index = 0; }; -struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeNearestNeighborOptionsT NativeTableType; +struct WhileOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef WhileOptionsT NativeTableType; + typedef WhileOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGN_CORNERS = 4, - VT_HALF_PIXEL_CENTERS = 6 + VT_COND_SUBGRAPH_INDEX = 4, + VT_BODY_SUBGRAPH_INDEX = 6 }; - bool align_corners() const { - return GetField(VT_ALIGN_CORNERS, 0) != 0; + int32_t cond_subgraph_index() const { + return GetField(VT_COND_SUBGRAPH_INDEX, 0); } - bool half_pixel_centers() const { - return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; + int32_t body_subgraph_index() const { + return GetField(VT_BODY_SUBGRAPH_INDEX, 0); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGN_CORNERS) && - VerifyField(verifier, VT_HALF_PIXEL_CENTERS) && + VerifyField(verifier, VT_COND_SUBGRAPH_INDEX, 4) && + VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX, 4) && verifier.EndTable(); } - ResizeNearestNeighborOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + WhileOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(WhileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ResizeNearestNeighborOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_align_corners(bool align_corners) { - fbb_.AddElement(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); +struct WhileOptionsBuilder { + typedef WhileOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_cond_subgraph_index(int32_t cond_subgraph_index) { + fbb_.AddElement(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); } - void add_half_pixel_centers(bool half_pixel_centers) { - fbb_.AddElement(ResizeNearestNeighborOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); + void add_body_subgraph_index(int32_t body_subgraph_index) { + fbb_.AddElement(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); } - explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit WhileOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateResizeNearestNeighborOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool align_corners = false, - bool half_pixel_centers = false) { - ResizeNearestNeighborOptionsBuilder builder_(_fbb); - builder_.add_half_pixel_centers(half_pixel_centers); - builder_.add_align_corners(align_corners); +inline ::flatbuffers::Offset CreateWhileOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t cond_subgraph_index = 0, + int32_t body_subgraph_index = 0) { + WhileOptionsBuilder builder_(_fbb); + builder_.add_body_subgraph_index(body_subgraph_index); + builder_.add_cond_subgraph_index(cond_subgraph_index); return builder_.Finish(); } -flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateWhileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct CallOptionsT : public flatbuffers::NativeTable { - typedef CallOptions TableType; - uint32_t subgraph; - CallOptionsT() - : subgraph(0) { - } +struct NonMaxSuppressionV4OptionsT : public ::flatbuffers::NativeTable { + typedef NonMaxSuppressionV4Options TableType; }; -struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SUBGRAPH = 4 - }; - uint32_t subgraph() const { - return GetField(VT_SUBGRAPH, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef NonMaxSuppressionV4OptionsT NativeTableType; + typedef NonMaxSuppressionV4OptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SUBGRAPH) && verifier.EndTable(); } - CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + NonMaxSuppressionV4OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct CallOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_subgraph(uint32_t subgraph) { - fbb_.AddElement(CallOptions::VT_SUBGRAPH, subgraph, 0); - } - explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct NonMaxSuppressionV4OptionsBuilder { + typedef NonMaxSuppressionV4Options Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV4OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - CallOptionsBuilder &operator=(const CallOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateCallOptions( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t subgraph = 0) { - CallOptionsBuilder builder_(_fbb); - builder_.add_subgraph(subgraph); +inline ::flatbuffers::Offset CreateNonMaxSuppressionV4Options( + ::flatbuffers::FlatBufferBuilder &_fbb) { + NonMaxSuppressionV4OptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateNonMaxSuppressionV4Options(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct PadOptionsT : public flatbuffers::NativeTable { - typedef PadOptions TableType; - PadOptionsT() { - } +struct NonMaxSuppressionV5OptionsT : public ::flatbuffers::NativeTable { + typedef NonMaxSuppressionV5Options TableType; }; -struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef NonMaxSuppressionV5OptionsT NativeTableType; + typedef NonMaxSuppressionV5OptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - PadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + NonMaxSuppressionV5OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct PadOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct NonMaxSuppressionV5OptionsBuilder { + typedef NonMaxSuppressionV5Options Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV5OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - PadOptionsBuilder &operator=(const PadOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreatePadOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - PadOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateNonMaxSuppressionV5Options( + ::flatbuffers::FlatBufferBuilder &_fbb) { + NonMaxSuppressionV5OptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateNonMaxSuppressionV5Options(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct PadV2OptionsT : public flatbuffers::NativeTable { - typedef PadV2Options TableType; - PadV2OptionsT() { - } +struct ScatterNdOptionsT : public ::flatbuffers::NativeTable { + typedef ScatterNdOptions TableType; }; -struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ScatterNdOptionsT NativeTableType; + typedef ScatterNdOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - PadV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + ScatterNdOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ScatterNdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct PadV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct ScatterNdOptionsBuilder { + typedef ScatterNdOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ScatterNdOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreatePadV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - PadV2OptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateScatterNdOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + ScatterNdOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateScatterNdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ReshapeOptionsT : public flatbuffers::NativeTable { - typedef ReshapeOptions TableType; - std::vector new_shape; - ReshapeOptionsT() { - } +struct SelectV2OptionsT : public ::flatbuffers::NativeTable { + typedef SelectV2Options TableType; }; -struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReshapeOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NEW_SHAPE = 4 - }; - const flatbuffers::Vector *new_shape() const { - return GetPointer *>(VT_NEW_SHAPE); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SelectV2OptionsT NativeTableType; + typedef SelectV2OptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NEW_SHAPE) && - verifier.VerifyVector(new_shape()) && verifier.EndTable(); } - ReshapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + SelectV2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SelectV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ReshapeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_new_shape(flatbuffers::Offset> new_shape) { - fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape); - } - explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct SelectV2OptionsBuilder { + typedef SelectV2Options Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SelectV2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateReshapeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> new_shape = 0) { - ReshapeOptionsBuilder builder_(_fbb); - builder_.add_new_shape(new_shape); +inline ::flatbuffers::Offset CreateSelectV2Options( + ::flatbuffers::FlatBufferBuilder &_fbb) { + SelectV2OptionsBuilder builder_(_fbb); return builder_.Finish(); } -inline flatbuffers::Offset CreateReshapeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *new_shape = nullptr) { - auto new_shape__ = new_shape ? _fbb.CreateVector(*new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - new_shape__); -} - -flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateSelectV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable { - typedef SpaceToBatchNDOptions TableType; - SpaceToBatchNDOptionsT() { - } +struct DensifyOptionsT : public ::flatbuffers::NativeTable { + typedef DensifyOptions TableType; }; -struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceToBatchNDOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DensifyOptionsT NativeTableType; + typedef DensifyOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - SpaceToBatchNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + DensifyOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DensifyOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SpaceToBatchNDOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct DensifyOptionsBuilder { + typedef DensifyOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit DensifyOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSpaceToBatchNDOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SpaceToBatchNDOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateDensifyOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + DensifyOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateDensifyOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct BatchToSpaceNDOptionsT : public flatbuffers::NativeTable { - typedef BatchToSpaceNDOptions TableType; - BatchToSpaceNDOptionsT() { - } +struct SegmentSumOptionsT : public ::flatbuffers::NativeTable { + typedef SegmentSumOptions TableType; }; -struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchToSpaceNDOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SegmentSumOptionsT NativeTableType; + typedef SegmentSumOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - BatchToSpaceNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + SegmentSumOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SegmentSumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct BatchToSpaceNDOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct SegmentSumOptionsBuilder { + typedef SegmentSumOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SegmentSumOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateBatchToSpaceNDOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - BatchToSpaceNDOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateSegmentSumOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + SegmentSumOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateSegmentSumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SkipGramOptionsT : public flatbuffers::NativeTable { - typedef SkipGramOptions TableType; - int32_t ngram_size; - int32_t max_skip_size; - bool include_all_ngrams; - SkipGramOptionsT() - : ngram_size(0), - max_skip_size(0), - include_all_ngrams(false) { - } +struct BatchMatMulOptionsT : public ::flatbuffers::NativeTable { + typedef BatchMatMulOptions TableType; + bool adj_x = false; + bool adj_y = false; + bool asymmetric_quantize_inputs = false; }; -struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SkipGramOptionsT NativeTableType; +struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BatchMatMulOptionsT NativeTableType; + typedef BatchMatMulOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NGRAM_SIZE = 4, - VT_MAX_SKIP_SIZE = 6, - VT_INCLUDE_ALL_NGRAMS = 8 + VT_ADJ_X = 4, + VT_ADJ_Y = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 }; - int32_t ngram_size() const { - return GetField(VT_NGRAM_SIZE, 0); + bool adj_x() const { + return GetField(VT_ADJ_X, 0) != 0; } - int32_t max_skip_size() const { - return GetField(VT_MAX_SKIP_SIZE, 0); + bool adj_y() const { + return GetField(VT_ADJ_Y, 0) != 0; } - bool include_all_ngrams() const { - return GetField(VT_INCLUDE_ALL_NGRAMS, 0) != 0; + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NGRAM_SIZE) && - VerifyField(verifier, VT_MAX_SKIP_SIZE) && - VerifyField(verifier, VT_INCLUDE_ALL_NGRAMS) && + VerifyField(verifier, VT_ADJ_X, 1) && + VerifyField(verifier, VT_ADJ_Y, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && verifier.EndTable(); } - SkipGramOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + BatchMatMulOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BatchMatMulOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SkipGramOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_ngram_size(int32_t ngram_size) { - fbb_.AddElement(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0); +struct BatchMatMulOptionsBuilder { + typedef BatchMatMulOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_adj_x(bool adj_x) { + fbb_.AddElement(BatchMatMulOptions::VT_ADJ_X, static_cast(adj_x), 0); } - void add_max_skip_size(int32_t max_skip_size) { - fbb_.AddElement(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0); + void add_adj_y(bool adj_y) { + fbb_.AddElement(BatchMatMulOptions::VT_ADJ_Y, static_cast(adj_y), 0); } - void add_include_all_ngrams(bool include_all_ngrams) { - fbb_.AddElement(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, static_cast(include_all_ngrams), 0); + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); } - explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit BatchMatMulOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSkipGramOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t ngram_size = 0, - int32_t max_skip_size = 0, - bool include_all_ngrams = false) { - SkipGramOptionsBuilder builder_(_fbb); - builder_.add_max_skip_size(max_skip_size); - builder_.add_ngram_size(ngram_size); - builder_.add_include_all_ngrams(include_all_ngrams); +inline ::flatbuffers::Offset CreateBatchMatMulOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool adj_x = false, + bool adj_y = false, + bool asymmetric_quantize_inputs = false) { + BatchMatMulOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_adj_y(adj_y); + builder_.add_adj_x(adj_x); return builder_.Finish(); } -flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateBatchMatMulOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SpaceToDepthOptionsT : public flatbuffers::NativeTable { - typedef SpaceToDepthOptions TableType; - int32_t block_size; - SpaceToDepthOptionsT() - : block_size(0) { - } +struct CumsumOptionsT : public ::flatbuffers::NativeTable { + typedef CumsumOptions TableType; + bool exclusive = false; + bool reverse = false; }; -struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceToDepthOptionsT NativeTableType; +struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef CumsumOptionsT NativeTableType; + typedef CumsumOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCK_SIZE = 4 + VT_EXCLUSIVE = 4, + VT_REVERSE = 6 }; - int32_t block_size() const { - return GetField(VT_BLOCK_SIZE, 0); + bool exclusive() const { + return GetField(VT_EXCLUSIVE, 0) != 0; + } + bool reverse() const { + return GetField(VT_REVERSE, 0) != 0; } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCK_SIZE) && + VerifyField(verifier, VT_EXCLUSIVE, 1) && + VerifyField(verifier, VT_REVERSE, 1) && verifier.EndTable(); } - SpaceToDepthOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + CumsumOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CumsumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SpaceToDepthOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_block_size(int32_t block_size) { - fbb_.AddElement(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0); +struct CumsumOptionsBuilder { + typedef CumsumOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_exclusive(bool exclusive) { + fbb_.AddElement(CumsumOptions::VT_EXCLUSIVE, static_cast(exclusive), 0); } - explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + void add_reverse(bool reverse) { + fbb_.AddElement(CumsumOptions::VT_REVERSE, static_cast(reverse), 0); + } + explicit CumsumOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSpaceToDepthOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t block_size = 0) { - SpaceToDepthOptionsBuilder builder_(_fbb); - builder_.add_block_size(block_size); +inline ::flatbuffers::Offset CreateCumsumOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool exclusive = false, + bool reverse = false) { + CumsumOptionsBuilder builder_(_fbb); + builder_.add_reverse(reverse); + builder_.add_exclusive(exclusive); return builder_.Finish(); } -flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateCumsumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct DepthToSpaceOptionsT : public flatbuffers::NativeTable { - typedef DepthToSpaceOptions TableType; - int32_t block_size; - DepthToSpaceOptionsT() - : block_size(0) { - } +struct BroadcastToOptionsT : public ::flatbuffers::NativeTable { + typedef BroadcastToOptions TableType; }; -struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthToSpaceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCK_SIZE = 4 - }; - int32_t block_size() const { - return GetField(VT_BLOCK_SIZE, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BroadcastToOptionsT NativeTableType; + typedef BroadcastToOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCK_SIZE) && verifier.EndTable(); } - DepthToSpaceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + BroadcastToOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BroadcastToOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct DepthToSpaceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_block_size(int32_t block_size) { - fbb_.AddElement(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); - } - explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct BroadcastToOptionsBuilder { + typedef BroadcastToOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit BroadcastToOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - DepthToSpaceOptionsBuilder &operator=(const DepthToSpaceOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateDepthToSpaceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t block_size = 0) { - DepthToSpaceOptionsBuilder builder_(_fbb); - builder_.add_block_size(block_size); +inline ::flatbuffers::Offset CreateBroadcastToOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + BroadcastToOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateBroadcastToOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SubOptionsT : public flatbuffers::NativeTable { - typedef SubOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool pot_scale_int16; - SubOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - pot_scale_int16(true) { - } +struct Rfft2dOptionsT : public ::flatbuffers::NativeTable { + typedef Rfft2dOptions TableType; }; -struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_POT_SCALE_INT16 = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool pot_scale_int16() const { - return GetField(VT_POT_SCALE_INT16, 1) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef Rfft2dOptionsT NativeTableType; + typedef Rfft2dOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_POT_SCALE_INT16) && verifier.EndTable(); } - SubOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + Rfft2dOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Rfft2dOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SubOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_pot_scale_int16(bool pot_scale_int16) { - fbb_.AddElement(SubOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); - } - explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct Rfft2dOptionsBuilder { + typedef Rfft2dOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit Rfft2dOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SubOptionsBuilder &operator=(const SubOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSubOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool pot_scale_int16 = true) { - SubOptionsBuilder builder_(_fbb); - builder_.add_pot_scale_int16(pot_scale_int16); - builder_.add_fused_activation_function(fused_activation_function); +inline ::flatbuffers::Offset CreateRfft2dOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + Rfft2dOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateRfft2dOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct DivOptionsT : public flatbuffers::NativeTable { - typedef DivOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - DivOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } +struct HashtableOptionsT : public ::flatbuffers::NativeTable { + typedef HashtableOptions TableType; + int32_t table_id = 0; + tflite::TensorType key_dtype = tflite::TensorType_FLOAT32; + tflite::TensorType value_dtype = tflite::TensorType_FLOAT32; }; -struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DivOptionsT NativeTableType; +struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef HashtableOptionsT NativeTableType; + typedef HashtableOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 + VT_TABLE_ID = 4, + VT_KEY_DTYPE = 6, + VT_VALUE_DTYPE = 8 }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + int32_t table_id() const { + return GetField(VT_TABLE_ID, 0); + } + tflite::TensorType key_dtype() const { + return static_cast(GetField(VT_KEY_DTYPE, 0)); + } + tflite::TensorType value_dtype() const { + return static_cast(GetField(VT_VALUE_DTYPE, 0)); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField(verifier, VT_TABLE_ID, 4) && + VerifyField(verifier, VT_KEY_DTYPE, 1) && + VerifyField(verifier, VT_VALUE_DTYPE, 1) && verifier.EndTable(); } - DivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + HashtableOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct DivOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DivOptionsBuilder &operator=(const DivOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; +struct HashtableOptionsBuilder { + typedef HashtableOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_table_id(int32_t table_id) { + fbb_.AddElement(HashtableOptions::VT_TABLE_ID, table_id, 0); } -}; - -inline flatbuffers::Offset CreateDivOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - DivOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TopKV2OptionsT : public flatbuffers::NativeTable { - typedef TopKV2Options TableType; - TopKV2OptionsT() { + void add_key_dtype(tflite::TensorType key_dtype) { + fbb_.AddElement(HashtableOptions::VT_KEY_DTYPE, static_cast(key_dtype), 0); } -}; - -struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TopKV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + void add_value_dtype(tflite::TensorType value_dtype) { + fbb_.AddElement(HashtableOptions::VT_VALUE_DTYPE, static_cast(value_dtype), 0); } - TopKV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TopKV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit HashtableOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateTopKV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - TopKV2OptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateHashtableOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t table_id = 0, + tflite::TensorType key_dtype = tflite::TensorType_FLOAT32, + tflite::TensorType value_dtype = tflite::TensorType_FLOAT32) { + HashtableOptionsBuilder builder_(_fbb); + builder_.add_table_id(table_id); + builder_.add_value_dtype(value_dtype); + builder_.add_key_dtype(key_dtype); return builder_.Finish(); } -flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateHashtableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable { - typedef EmbeddingLookupSparseOptions TableType; - tflite::CombinerType combiner; - EmbeddingLookupSparseOptionsT() - : combiner(tflite::CombinerType_SUM) { - } +struct HashtableFindOptionsT : public ::flatbuffers::NativeTable { + typedef HashtableFindOptions TableType; }; -struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EmbeddingLookupSparseOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COMBINER = 4 - }; - tflite::CombinerType combiner() const { - return static_cast(GetField(VT_COMBINER, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct HashtableFindOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef HashtableFindOptionsT NativeTableType; + typedef HashtableFindOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_COMBINER) && verifier.EndTable(); } - EmbeddingLookupSparseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + HashtableFindOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableFindOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct EmbeddingLookupSparseOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_combiner(tflite::CombinerType combiner) { - fbb_.AddElement(EmbeddingLookupSparseOptions::VT_COMBINER, static_cast(combiner), 0); - } - explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct HashtableFindOptionsBuilder { + typedef HashtableFindOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit HashtableFindOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::CombinerType combiner = tflite::CombinerType_SUM) { - EmbeddingLookupSparseOptionsBuilder builder_(_fbb); - builder_.add_combiner(combiner); +inline ::flatbuffers::Offset CreateHashtableFindOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + HashtableFindOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateHashtableFindOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct GatherOptionsT : public flatbuffers::NativeTable { - typedef GatherOptions TableType; - int32_t axis; - GatherOptionsT() - : axis(0) { - } +struct HashtableImportOptionsT : public ::flatbuffers::NativeTable { + typedef HashtableImportOptions TableType; }; -struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct HashtableImportOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef HashtableImportOptionsT NativeTableType; + typedef HashtableImportOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && verifier.EndTable(); } - GatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + HashtableImportOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableImportOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct GatherOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(GatherOptions::VT_AXIS, axis, 0); - } - explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct HashtableImportOptionsBuilder { + typedef HashtableImportOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit HashtableImportOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - GatherOptionsBuilder &operator=(const GatherOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateGatherOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0) { - GatherOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); +inline ::flatbuffers::Offset CreateHashtableImportOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + HashtableImportOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateHashtableImportOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct TransposeOptionsT : public flatbuffers::NativeTable { - typedef TransposeOptions TableType; - TransposeOptionsT() { - } +struct HashtableSizeOptionsT : public ::flatbuffers::NativeTable { + typedef HashtableSizeOptions TableType; }; -struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct HashtableSizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef HashtableSizeOptionsT NativeTableType; + typedef HashtableSizeOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - TransposeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + HashtableSizeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableSizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct TransposeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct HashtableSizeOptionsBuilder { + typedef HashtableSizeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit HashtableSizeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateTransposeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - TransposeOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateHashtableSizeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + HashtableSizeOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateHashtableSizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ExpOptionsT : public flatbuffers::NativeTable { - typedef ExpOptions TableType; - ExpOptionsT() { - } +struct VarHandleOptionsT : public ::flatbuffers::NativeTable { + typedef VarHandleOptions TableType; + std::string container{}; + std::string shared_name{}; }; -struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct VarHandleOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef VarHandleOptionsT NativeTableType; + typedef VarHandleOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CONTAINER = 4, + VT_SHARED_NAME = 6 + }; + const ::flatbuffers::String *container() const { + return GetPointer(VT_CONTAINER); + } + const ::flatbuffers::String *shared_name() const { + return GetPointer(VT_SHARED_NAME); + } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_CONTAINER) && + verifier.VerifyString(container()) && + VerifyOffset(verifier, VT_SHARED_NAME) && + verifier.VerifyString(shared_name()) && verifier.EndTable(); } - ExpOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + VarHandleOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(VarHandleOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ExpOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct VarHandleOptionsBuilder { + typedef VarHandleOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_container(::flatbuffers::Offset<::flatbuffers::String> container) { + fbb_.AddOffset(VarHandleOptions::VT_CONTAINER, container); + } + void add_shared_name(::flatbuffers::Offset<::flatbuffers::String> shared_name) { + fbb_.AddOffset(VarHandleOptions::VT_SHARED_NAME, shared_name); + } + explicit VarHandleOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ExpOptionsBuilder &operator=(const ExpOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateExpOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ExpOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateVarHandleOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> container = 0, + ::flatbuffers::Offset<::flatbuffers::String> shared_name = 0) { + VarHandleOptionsBuilder builder_(_fbb); + builder_.add_shared_name(shared_name); + builder_.add_container(container); return builder_.Finish(); } -flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline ::flatbuffers::Offset CreateVarHandleOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *container = nullptr, + const char *shared_name = nullptr) { + auto container__ = container ? _fbb.CreateString(container) : 0; + auto shared_name__ = shared_name ? _fbb.CreateString(shared_name) : 0; + return tflite::CreateVarHandleOptions( + _fbb, + container__, + shared_name__); +} -struct CosOptionsT : public flatbuffers::NativeTable { - typedef CosOptions TableType; - CosOptionsT() { - } +::flatbuffers::Offset CreateVarHandleOptions(::flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReadVariableOptionsT : public ::flatbuffers::NativeTable { + typedef ReadVariableOptions TableType; }; -struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CosOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct ReadVariableOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ReadVariableOptionsT NativeTableType; + typedef ReadVariableOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - CosOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + ReadVariableOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReadVariableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct CosOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct ReadVariableOptionsBuilder { + typedef ReadVariableOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ReadVariableOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - CosOptionsBuilder &operator=(const CosOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateCosOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - CosOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateReadVariableOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + ReadVariableOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateReadVariableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ReducerOptionsT : public flatbuffers::NativeTable { - typedef ReducerOptions TableType; - bool keep_dims; - ReducerOptionsT() - : keep_dims(false) { - } +struct AssignVariableOptionsT : public ::flatbuffers::NativeTable { + typedef AssignVariableOptions TableType; }; -struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReducerOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_KEEP_DIMS = 4 - }; - bool keep_dims() const { - return GetField(VT_KEEP_DIMS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct AssignVariableOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef AssignVariableOptionsT NativeTableType; + typedef AssignVariableOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_KEEP_DIMS) && verifier.EndTable(); } - ReducerOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + AssignVariableOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AssignVariableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ReducerOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_keep_dims(bool keep_dims) { - fbb_.AddElement(ReducerOptions::VT_KEEP_DIMS, static_cast(keep_dims), 0); - } - explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct AssignVariableOptionsBuilder { + typedef AssignVariableOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit AssignVariableOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateReducerOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool keep_dims = false) { - ReducerOptionsBuilder builder_(_fbb); - builder_.add_keep_dims(keep_dims); +inline ::flatbuffers::Offset CreateAssignVariableOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + AssignVariableOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateAssignVariableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SqueezeOptionsT : public flatbuffers::NativeTable { - typedef SqueezeOptions TableType; - std::vector squeeze_dims; - SqueezeOptionsT() { - } +struct RandomOptionsT : public ::flatbuffers::NativeTable { + typedef RandomOptions TableType; + int64_t seed = 0; + int64_t seed2 = 0; }; -struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SqueezeOptionsT NativeTableType; +struct RandomOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef RandomOptionsT NativeTableType; + typedef RandomOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SQUEEZE_DIMS = 4 + VT_SEED = 4, + VT_SEED2 = 6 }; - const flatbuffers::Vector *squeeze_dims() const { - return GetPointer *>(VT_SQUEEZE_DIMS); + int64_t seed() const { + return GetField(VT_SEED, 0); } - bool Verify(flatbuffers::Verifier &verifier) const { + int64_t seed2() const { + return GetField(VT_SEED2, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SQUEEZE_DIMS) && - verifier.VerifyVector(squeeze_dims()) && + VerifyField(verifier, VT_SEED, 8) && + VerifyField(verifier, VT_SEED2, 8) && verifier.EndTable(); } - SqueezeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + RandomOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RandomOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SqueezeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_squeeze_dims(flatbuffers::Offset> squeeze_dims) { - fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); +struct RandomOptionsBuilder { + typedef RandomOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_seed(int64_t seed) { + fbb_.AddElement(RandomOptions::VT_SEED, seed, 0); + } + void add_seed2(int64_t seed2) { + fbb_.AddElement(RandomOptions::VT_SEED2, seed2, 0); } - explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit RandomOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSqueezeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> squeeze_dims = 0) { - SqueezeOptionsBuilder builder_(_fbb); - builder_.add_squeeze_dims(squeeze_dims); +inline ::flatbuffers::Offset CreateRandomOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + int64_t seed = 0, + int64_t seed2 = 0) { + RandomOptionsBuilder builder_(_fbb); + builder_.add_seed2(seed2); + builder_.add_seed(seed); return builder_.Finish(); } -inline flatbuffers::Offset CreateSqueezeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *squeeze_dims = nullptr) { - auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector(*squeeze_dims) : 0; - return tflite::CreateSqueezeOptions( - _fbb, - squeeze_dims__); -} - -flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateRandomOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SplitOptionsT : public flatbuffers::NativeTable { - typedef SplitOptions TableType; - int32_t num_splits; - SplitOptionsT() - : num_splits(0) { - } +struct BucketizeOptionsT : public ::flatbuffers::NativeTable { + typedef BucketizeOptions TableType; + std::vector boundaries{}; }; -struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SplitOptionsT NativeTableType; +struct BucketizeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BucketizeOptionsT NativeTableType; + typedef BucketizeOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_SPLITS = 4 + VT_BOUNDARIES = 4 }; - int32_t num_splits() const { - return GetField(VT_NUM_SPLITS, 0); + const ::flatbuffers::Vector *boundaries() const { + return GetPointer *>(VT_BOUNDARIES); } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_SPLITS) && + VerifyOffset(verifier, VT_BOUNDARIES) && + verifier.VerifyVector(boundaries()) && verifier.EndTable(); } - SplitOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + BucketizeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BucketizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SplitOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_splits(int32_t num_splits) { - fbb_.AddElement(SplitOptions::VT_NUM_SPLITS, num_splits, 0); +struct BucketizeOptionsBuilder { + typedef BucketizeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_boundaries(::flatbuffers::Offset<::flatbuffers::Vector> boundaries) { + fbb_.AddOffset(BucketizeOptions::VT_BOUNDARIES, boundaries); } - explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit BucketizeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SplitOptionsBuilder &operator=(const SplitOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSplitOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_splits = 0) { - SplitOptionsBuilder builder_(_fbb); - builder_.add_num_splits(num_splits); +inline ::flatbuffers::Offset CreateBucketizeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> boundaries = 0) { + BucketizeOptionsBuilder builder_(_fbb); + builder_.add_boundaries(boundaries); return builder_.Finish(); } -flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline ::flatbuffers::Offset CreateBucketizeOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *boundaries = nullptr) { + auto boundaries__ = boundaries ? _fbb.CreateVector(*boundaries) : 0; + return tflite::CreateBucketizeOptions( + _fbb, + boundaries__); +} + +::flatbuffers::Offset CreateBucketizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SplitVOptionsT : public flatbuffers::NativeTable { - typedef SplitVOptions TableType; - int32_t num_splits; - SplitVOptionsT() - : num_splits(0) { - } +struct GeluOptionsT : public ::flatbuffers::NativeTable { + typedef GeluOptions TableType; + bool approximate = false; }; -struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SplitVOptionsT NativeTableType; +struct GeluOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef GeluOptionsT NativeTableType; + typedef GeluOptionsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_SPLITS = 4 + VT_APPROXIMATE = 4 }; - int32_t num_splits() const { - return GetField(VT_NUM_SPLITS, 0); + bool approximate() const { + return GetField(VT_APPROXIMATE, 0) != 0; } - bool Verify(flatbuffers::Verifier &verifier) const { + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_SPLITS) && + VerifyField(verifier, VT_APPROXIMATE, 1) && verifier.EndTable(); } - SplitVOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + GeluOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GeluOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SplitVOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_splits(int32_t num_splits) { - fbb_.AddElement(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); +struct GeluOptionsBuilder { + typedef GeluOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_approximate(bool approximate) { + fbb_.AddElement(GeluOptions::VT_APPROXIMATE, static_cast(approximate), 0); } - explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit GeluOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSplitVOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_splits = 0) { - SplitVOptionsBuilder builder_(_fbb); - builder_.add_num_splits(num_splits); +inline ::flatbuffers::Offset CreateGeluOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + bool approximate = false) { + GeluOptionsBuilder builder_(_fbb); + builder_.add_approximate(approximate); return builder_.Finish(); } -flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateGeluOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct StridedSliceOptionsT : public flatbuffers::NativeTable { - typedef StridedSliceOptions TableType; - int32_t begin_mask; - int32_t end_mask; - int32_t ellipsis_mask; - int32_t new_axis_mask; - int32_t shrink_axis_mask; - StridedSliceOptionsT() - : begin_mask(0), - end_mask(0), - ellipsis_mask(0), - new_axis_mask(0), - shrink_axis_mask(0) { - } +struct DynamicUpdateSliceOptionsT : public ::flatbuffers::NativeTable { + typedef DynamicUpdateSliceOptions TableType; }; -struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef StridedSliceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BEGIN_MASK = 4, - VT_END_MASK = 6, - VT_ELLIPSIS_MASK = 8, - VT_NEW_AXIS_MASK = 10, - VT_SHRINK_AXIS_MASK = 12 - }; - int32_t begin_mask() const { - return GetField(VT_BEGIN_MASK, 0); - } - int32_t end_mask() const { - return GetField(VT_END_MASK, 0); - } - int32_t ellipsis_mask() const { - return GetField(VT_ELLIPSIS_MASK, 0); - } - int32_t new_axis_mask() const { - return GetField(VT_NEW_AXIS_MASK, 0); - } - int32_t shrink_axis_mask() const { - return GetField(VT_SHRINK_AXIS_MASK, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct DynamicUpdateSliceOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DynamicUpdateSliceOptionsT NativeTableType; + typedef DynamicUpdateSliceOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BEGIN_MASK) && - VerifyField(verifier, VT_END_MASK) && - VerifyField(verifier, VT_ELLIPSIS_MASK) && - VerifyField(verifier, VT_NEW_AXIS_MASK) && - VerifyField(verifier, VT_SHRINK_AXIS_MASK) && verifier.EndTable(); } - StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + DynamicUpdateSliceOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DynamicUpdateSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct StridedSliceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_begin_mask(int32_t begin_mask) { - fbb_.AddElement(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); - } - void add_end_mask(int32_t end_mask) { - fbb_.AddElement(StridedSliceOptions::VT_END_MASK, end_mask, 0); - } - void add_ellipsis_mask(int32_t ellipsis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); - } - void add_new_axis_mask(int32_t new_axis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); - } - void add_shrink_axis_mask(int32_t shrink_axis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); - } - explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct DynamicUpdateSliceOptionsBuilder { + typedef DynamicUpdateSliceOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit DynamicUpdateSliceOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateStridedSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t begin_mask = 0, - int32_t end_mask = 0, - int32_t ellipsis_mask = 0, - int32_t new_axis_mask = 0, - int32_t shrink_axis_mask = 0) { - StridedSliceOptionsBuilder builder_(_fbb); - builder_.add_shrink_axis_mask(shrink_axis_mask); - builder_.add_new_axis_mask(new_axis_mask); - builder_.add_ellipsis_mask(ellipsis_mask); - builder_.add_end_mask(end_mask); - builder_.add_begin_mask(begin_mask); +inline ::flatbuffers::Offset CreateDynamicUpdateSliceOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + DynamicUpdateSliceOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateDynamicUpdateSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct LogSoftmaxOptionsT : public flatbuffers::NativeTable { - typedef LogSoftmaxOptions TableType; - LogSoftmaxOptionsT() { - } +struct UnsortedSegmentProdOptionsT : public ::flatbuffers::NativeTable { + typedef UnsortedSegmentProdOptions TableType; }; -struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogSoftmaxOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct UnsortedSegmentProdOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef UnsortedSegmentProdOptionsT NativeTableType; + typedef UnsortedSegmentProdOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - LogSoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + UnsortedSegmentProdOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnsortedSegmentProdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct LogSoftmaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct UnsortedSegmentProdOptionsBuilder { + typedef UnsortedSegmentProdOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit UnsortedSegmentProdOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateLogSoftmaxOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogSoftmaxOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateUnsortedSegmentProdOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentProdOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateUnsortedSegmentProdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct CastOptionsT : public flatbuffers::NativeTable { - typedef CastOptions TableType; - tflite::TensorType in_data_type; - tflite::TensorType out_data_type; - CastOptionsT() - : in_data_type(tflite::TensorType_FLOAT32), - out_data_type(tflite::TensorType_FLOAT32) { - } +struct UnsortedSegmentMaxOptionsT : public ::flatbuffers::NativeTable { + typedef UnsortedSegmentMaxOptions TableType; }; -struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CastOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_IN_DATA_TYPE = 4, - VT_OUT_DATA_TYPE = 6 - }; - tflite::TensorType in_data_type() const { - return static_cast(GetField(VT_IN_DATA_TYPE, 0)); - } - tflite::TensorType out_data_type() const { - return static_cast(GetField(VT_OUT_DATA_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct UnsortedSegmentMaxOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef UnsortedSegmentMaxOptionsT NativeTableType; + typedef UnsortedSegmentMaxOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_IN_DATA_TYPE) && - VerifyField(verifier, VT_OUT_DATA_TYPE) && verifier.EndTable(); } - CastOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + UnsortedSegmentMaxOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnsortedSegmentMaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct CastOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_in_data_type(tflite::TensorType in_data_type) { - fbb_.AddElement(CastOptions::VT_IN_DATA_TYPE, static_cast(in_data_type), 0); - } - void add_out_data_type(tflite::TensorType out_data_type) { - fbb_.AddElement(CastOptions::VT_OUT_DATA_TYPE, static_cast(out_data_type), 0); - } - explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct UnsortedSegmentMaxOptionsBuilder { + typedef UnsortedSegmentMaxOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit UnsortedSegmentMaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - CastOptionsBuilder &operator=(const CastOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateCastOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType in_data_type = tflite::TensorType_FLOAT32, - tflite::TensorType out_data_type = tflite::TensorType_FLOAT32) { - CastOptionsBuilder builder_(_fbb); - builder_.add_out_data_type(out_data_type); - builder_.add_in_data_type(in_data_type); +inline ::flatbuffers::Offset CreateUnsortedSegmentMaxOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentMaxOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateUnsortedSegmentMaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct DequantizeOptionsT : public flatbuffers::NativeTable { - typedef DequantizeOptions TableType; - DequantizeOptionsT() { - } +struct UnsortedSegmentSumOptionsT : public ::flatbuffers::NativeTable { + typedef UnsortedSegmentSumOptions TableType; }; -struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DequantizeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct UnsortedSegmentSumOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef UnsortedSegmentSumOptionsT NativeTableType; + typedef UnsortedSegmentSumOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - DequantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + UnsortedSegmentSumOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnsortedSegmentSumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct DequantizeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct UnsortedSegmentSumOptionsBuilder { + typedef UnsortedSegmentSumOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit UnsortedSegmentSumOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateDequantizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DequantizeOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateUnsortedSegmentSumOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentSumOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateUnsortedSegmentSumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct MaximumMinimumOptionsT : public flatbuffers::NativeTable { - typedef MaximumMinimumOptions TableType; - MaximumMinimumOptionsT() { - } +struct ATan2OptionsT : public ::flatbuffers::NativeTable { + typedef ATan2Options TableType; }; -struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MaximumMinimumOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct ATan2Options FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ATan2OptionsT NativeTableType; + typedef ATan2OptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - MaximumMinimumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + ATan2OptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ATan2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct MaximumMinimumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct ATan2OptionsBuilder { + typedef ATan2Options Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit ATan2OptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateMaximumMinimumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MaximumMinimumOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateATan2Options( + ::flatbuffers::FlatBufferBuilder &_fbb) { + ATan2OptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateATan2Options(::flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct TileOptionsT : public flatbuffers::NativeTable { - typedef TileOptions TableType; - TileOptionsT() { - } +struct UnsortedSegmentMinOptionsT : public ::flatbuffers::NativeTable { + typedef UnsortedSegmentMinOptions TableType; }; -struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TileOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct UnsortedSegmentMinOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef UnsortedSegmentMinOptionsT NativeTableType; + typedef UnsortedSegmentMinOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - TileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + UnsortedSegmentMinOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnsortedSegmentMinOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct TileOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct UnsortedSegmentMinOptionsBuilder { + typedef UnsortedSegmentMinOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit UnsortedSegmentMinOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - TileOptionsBuilder &operator=(const TileOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateTileOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - TileOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateUnsortedSegmentMinOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentMinOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateUnsortedSegmentMinOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ArgMaxOptionsT : public flatbuffers::NativeTable { - typedef ArgMaxOptions TableType; - tflite::TensorType output_type; - ArgMaxOptionsT() - : output_type(tflite::TensorType_FLOAT32) { - } +struct SignOptionsT : public ::flatbuffers::NativeTable { + typedef SignOptions TableType; }; -struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMaxOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUT_TYPE = 4 - }; - tflite::TensorType output_type() const { - return static_cast(GetField(VT_OUTPUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct SignOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SignOptionsT NativeTableType; + typedef SignOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUT_TYPE) && verifier.EndTable(); } - ArgMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + SignOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SignOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ArgMaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_output_type(tflite::TensorType output_type) { - fbb_.AddElement(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); - } - explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct SignOptionsBuilder { + typedef SignOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit SignOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateArgMaxOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType output_type = tflite::TensorType_FLOAT32) { - ArgMaxOptionsBuilder builder_(_fbb); - builder_.add_output_type(output_type); +inline ::flatbuffers::Offset CreateSignOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + SignOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateSignOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ArgMinOptionsT : public flatbuffers::NativeTable { - typedef ArgMinOptions TableType; - tflite::TensorType output_type; - ArgMinOptionsT() - : output_type(tflite::TensorType_FLOAT32) { - } +struct BitcastOptionsT : public ::flatbuffers::NativeTable { + typedef BitcastOptions TableType; }; -struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMinOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUT_TYPE = 4 - }; - tflite::TensorType output_type() const { - return static_cast(GetField(VT_OUTPUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { +struct BitcastOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BitcastOptionsT NativeTableType; + typedef BitcastOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUT_TYPE) && verifier.EndTable(); } - ArgMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + BitcastOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BitcastOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ArgMinOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_output_type(tflite::TensorType output_type) { - fbb_.AddElement(ArgMinOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); - } - explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct BitcastOptionsBuilder { + typedef BitcastOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit BitcastOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateArgMinOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType output_type = tflite::TensorType_FLOAT32) { - ArgMinOptionsBuilder builder_(_fbb); - builder_.add_output_type(output_type); +inline ::flatbuffers::Offset CreateBitcastOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + BitcastOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateBitcastOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct GreaterOptionsT : public flatbuffers::NativeTable { - typedef GreaterOptions TableType; - GreaterOptionsT() { - } +struct BitwiseXorOptionsT : public ::flatbuffers::NativeTable { + typedef BitwiseXorOptions TableType; }; -struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GreaterOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct BitwiseXorOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BitwiseXorOptionsT NativeTableType; + typedef BitwiseXorOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - GreaterOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + BitwiseXorOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BitwiseXorOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct GreaterOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct BitwiseXorOptionsBuilder { + typedef BitwiseXorOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit BitwiseXorOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateGreaterOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GreaterOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateBitwiseXorOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + BitwiseXorOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateBitwiseXorOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct GreaterEqualOptionsT : public flatbuffers::NativeTable { - typedef GreaterEqualOptions TableType; - GreaterEqualOptionsT() { - } +struct RightShiftOptionsT : public ::flatbuffers::NativeTable { + typedef RightShiftOptions TableType; }; -struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GreaterEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct RightShiftOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef RightShiftOptionsT NativeTableType; + typedef RightShiftOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - GreaterEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + RightShiftOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RightShiftOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct GreaterEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct RightShiftOptionsBuilder { + typedef RightShiftOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit RightShiftOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateGreaterEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GreaterEqualOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateRightShiftOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + RightShiftOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateRightShiftOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct LessOptionsT : public flatbuffers::NativeTable { - typedef LessOptions TableType; - LessOptionsT() { - } +struct DilateOptionsT : public ::flatbuffers::NativeTable { + typedef DilateOptions TableType; }; -struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LessOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct DilateOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DilateOptionsT NativeTableType; + typedef DilateOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - LessOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + DilateOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DilateOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DilateOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct LessOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct DilateOptionsBuilder { + typedef DilateOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit DilateOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - LessOptionsBuilder &operator=(const LessOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateLessOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LessOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateDilateOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + DilateOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateDilateOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DilateOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct LessEqualOptionsT : public flatbuffers::NativeTable { - typedef LessEqualOptions TableType; - LessEqualOptionsT() { - } +struct ReduceWindowOptionsT : public ::flatbuffers::NativeTable { + typedef ReduceWindowOptions TableType; + tflite::ReduceWindowFunction reduce_function = tflite::ReduceWindowFunction_UNSUPPORTED; }; -struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LessEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct ReduceWindowOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ReduceWindowOptionsT NativeTableType; + typedef ReduceWindowOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_REDUCE_FUNCTION = 4 + }; + tflite::ReduceWindowFunction reduce_function() const { + return static_cast(GetField(VT_REDUCE_FUNCTION, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && + VerifyField(verifier, VT_REDUCE_FUNCTION, 4) && verifier.EndTable(); } - LessEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + ReduceWindowOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReduceWindowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReduceWindowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct LessEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct ReduceWindowOptionsBuilder { + typedef ReduceWindowOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_reduce_function(tflite::ReduceWindowFunction reduce_function) { + fbb_.AddElement(ReduceWindowOptions::VT_REDUCE_FUNCTION, static_cast(reduce_function), 0); + } + explicit ReduceWindowOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateLessEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LessEqualOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateReduceWindowOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + tflite::ReduceWindowFunction reduce_function = tflite::ReduceWindowFunction_UNSUPPORTED) { + ReduceWindowOptionsBuilder builder_(_fbb); + builder_.add_reduce_function(reduce_function); return builder_.Finish(); } -flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +::flatbuffers::Offset CreateReduceWindowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReduceWindowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct NegOptionsT : public flatbuffers::NativeTable { - typedef NegOptions TableType; - NegOptionsT() { - } +struct OperatorCodeT : public ::flatbuffers::NativeTable { + typedef OperatorCode TableType; + int8_t deprecated_builtin_code = 0; + std::string custom_code{}; + int32_t version = 1; + tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD; }; -struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NegOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct OperatorCode FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef OperatorCodeT NativeTableType; + typedef OperatorCodeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DEPRECATED_BUILTIN_CODE = 4, + VT_CUSTOM_CODE = 6, + VT_VERSION = 8, + VT_BUILTIN_CODE = 10 + }; + int8_t deprecated_builtin_code() const { + return GetField(VT_DEPRECATED_BUILTIN_CODE, 0); + } + const ::flatbuffers::String *custom_code() const { + return GetPointer(VT_CUSTOM_CODE); + } + int32_t version() const { + return GetField(VT_VERSION, 1); + } + tflite::BuiltinOperator builtin_code() const { + return static_cast(GetField(VT_BUILTIN_CODE, 0)); + } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DEPRECATED_BUILTIN_CODE, 1) && + VerifyOffset(verifier, VT_CUSTOM_CODE) && + verifier.VerifyString(custom_code()) && + VerifyField(verifier, VT_VERSION, 4) && + VerifyField(verifier, VT_BUILTIN_CODE, 4) && verifier.EndTable(); } - NegOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + OperatorCodeT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OperatorCodeT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct NegOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct OperatorCodeBuilder { + typedef OperatorCode Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_deprecated_builtin_code(int8_t deprecated_builtin_code) { + fbb_.AddElement(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0); + } + void add_custom_code(::flatbuffers::Offset<::flatbuffers::String> custom_code) { + fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); + } + void add_version(int32_t version) { + fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); + } + void add_builtin_code(tflite::BuiltinOperator builtin_code) { + fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); + } + explicit OperatorCodeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - NegOptionsBuilder &operator=(const NegOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateNegOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - NegOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateOperatorCode( + ::flatbuffers::FlatBufferBuilder &_fbb, + int8_t deprecated_builtin_code = 0, + ::flatbuffers::Offset<::flatbuffers::String> custom_code = 0, + int32_t version = 1, + tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { + OperatorCodeBuilder builder_(_fbb); + builder_.add_builtin_code(builtin_code); + builder_.add_version(version); + builder_.add_custom_code(custom_code); + builder_.add_deprecated_builtin_code(deprecated_builtin_code); return builder_.Finish(); } -flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline ::flatbuffers::Offset CreateOperatorCodeDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + int8_t deprecated_builtin_code = 0, + const char *custom_code = nullptr, + int32_t version = 1, + tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { + auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; + return tflite::CreateOperatorCode( + _fbb, + deprecated_builtin_code, + custom_code__, + version, + builtin_code); +} + +::flatbuffers::Offset CreateOperatorCode(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SelectOptionsT : public flatbuffers::NativeTable { - typedef SelectOptions TableType; - SelectOptionsT() { - } +struct StableHLOCompositeOptionsT : public ::flatbuffers::NativeTable { + typedef StableHLOCompositeOptions TableType; + std::string name{}; + int32_t decomposition_subgraph_index = 0; + std::vector composite_attributes{}; + tflite::CustomOptionsFormat composite_attributes_format = tflite::CustomOptionsFormat_FLEXBUFFERS; + int32_t version = 0; }; -struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SelectOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct StableHLOCompositeOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StableHLOCompositeOptionsT NativeTableType; + typedef StableHLOCompositeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_DECOMPOSITION_SUBGRAPH_INDEX = 6, + VT_COMPOSITE_ATTRIBUTES = 8, + VT_COMPOSITE_ATTRIBUTES_FORMAT = 10, + VT_VERSION = 12 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + int32_t decomposition_subgraph_index() const { + return GetField(VT_DECOMPOSITION_SUBGRAPH_INDEX, 0); + } + const ::flatbuffers::Vector *composite_attributes() const { + return GetPointer *>(VT_COMPOSITE_ATTRIBUTES); + } + tflite::CustomOptionsFormat composite_attributes_format() const { + return static_cast(GetField(VT_COMPOSITE_ATTRIBUTES_FORMAT, 0)); + } + int32_t version() const { + return GetField(VT_VERSION, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_DECOMPOSITION_SUBGRAPH_INDEX, 4) && + VerifyOffset(verifier, VT_COMPOSITE_ATTRIBUTES) && + verifier.VerifyVector(composite_attributes()) && + VerifyField(verifier, VT_COMPOSITE_ATTRIBUTES_FORMAT, 1) && + VerifyField(verifier, VT_VERSION, 4) && verifier.EndTable(); } - SelectOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + StableHLOCompositeOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StableHLOCompositeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StableHLOCompositeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SelectOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct StableHLOCompositeOptionsBuilder { + typedef StableHLOCompositeOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(StableHLOCompositeOptions::VT_NAME, name); + } + void add_decomposition_subgraph_index(int32_t decomposition_subgraph_index) { + fbb_.AddElement(StableHLOCompositeOptions::VT_DECOMPOSITION_SUBGRAPH_INDEX, decomposition_subgraph_index, 0); + } + void add_composite_attributes(::flatbuffers::Offset<::flatbuffers::Vector> composite_attributes) { + fbb_.AddOffset(StableHLOCompositeOptions::VT_COMPOSITE_ATTRIBUTES, composite_attributes); + } + void add_composite_attributes_format(tflite::CustomOptionsFormat composite_attributes_format) { + fbb_.AddElement(StableHLOCompositeOptions::VT_COMPOSITE_ATTRIBUTES_FORMAT, static_cast(composite_attributes_format), 0); + } + void add_version(int32_t version) { + fbb_.AddElement(StableHLOCompositeOptions::VT_VERSION, version, 0); + } + explicit StableHLOCompositeOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SelectOptionsBuilder &operator=(const SelectOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSelectOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SelectOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateStableHLOCompositeOptions( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + int32_t decomposition_subgraph_index = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> composite_attributes = 0, + tflite::CustomOptionsFormat composite_attributes_format = tflite::CustomOptionsFormat_FLEXBUFFERS, + int32_t version = 0) { + StableHLOCompositeOptionsBuilder builder_(_fbb); + builder_.add_version(version); + builder_.add_composite_attributes(composite_attributes); + builder_.add_decomposition_subgraph_index(decomposition_subgraph_index); + builder_.add_name(name); + builder_.add_composite_attributes_format(composite_attributes_format); return builder_.Finish(); } -flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline ::flatbuffers::Offset CreateStableHLOCompositeOptionsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + int32_t decomposition_subgraph_index = 0, + const std::vector *composite_attributes = nullptr, + tflite::CustomOptionsFormat composite_attributes_format = tflite::CustomOptionsFormat_FLEXBUFFERS, + int32_t version = 0) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto composite_attributes__ = composite_attributes ? _fbb.CreateVector(*composite_attributes) : 0; + return tflite::CreateStableHLOCompositeOptions( + _fbb, + name__, + decomposition_subgraph_index, + composite_attributes__, + composite_attributes_format, + version); +} + +::flatbuffers::Offset CreateStableHLOCompositeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StableHLOCompositeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct SliceOptionsT : public flatbuffers::NativeTable { - typedef SliceOptions TableType; - SliceOptionsT() { - } +struct StablehloShiftLeftOptionsT : public ::flatbuffers::NativeTable { + typedef StablehloShiftLeftOptions TableType; }; -struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SliceOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { +struct StablehloShiftLeftOptions FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StablehloShiftLeftOptionsT NativeTableType; + typedef StablehloShiftLeftOptionsBuilder Builder; + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); } - SliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + StablehloShiftLeftOptionsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StablehloShiftLeftOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloShiftLeftOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct SliceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) +struct StablehloShiftLeftOptionsBuilder { + typedef StablehloShiftLeftOptions Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + explicit StablehloShiftLeftOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - SliceOptionsBuilder &operator=(const SliceOptionsBuilder &); - flatbuffers::Offset Finish() { + ::flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = ::flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SliceOptionsBuilder builder_(_fbb); +inline ::flatbuffers::Offset CreateStablehloShiftLeftOptions( + ::flatbuffers::FlatBufferBuilder &_fbb) { + StablehloShiftLeftOptionsBuilder builder_(_fbb); return builder_.Finish(); } -flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TransposeConvOptionsT : public flatbuffers::NativeTable { - typedef TransposeConvOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - TransposeConvOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0) { - } -}; +::flatbuffers::Offset CreateStablehloShiftLeftOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloShiftLeftOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeConvOptionsT NativeTableType; +struct OperatorT : public ::flatbuffers::NativeTable { + typedef Operator TableType; + uint32_t opcode_index = 0; + std::vector inputs{}; + std::vector outputs{}; + tflite::BuiltinOptionsUnion builtin_options{}; + std::vector custom_options{}; + tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS; + std::vector mutating_variable_inputs{}; + std::vector intermediates{}; + uint64_t large_custom_options_offset = 0; + uint64_t large_custom_options_size = 0; + tflite::BuiltinOptions2Union builtin_options_2{}; + int32_t debug_metadata_index = -1; +}; + +struct Operator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef OperatorT NativeTableType; + typedef OperatorBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8 + VT_OPCODE_INDEX = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_BUILTIN_OPTIONS_TYPE = 10, + VT_BUILTIN_OPTIONS = 12, + VT_CUSTOM_OPTIONS = 14, + VT_CUSTOM_OPTIONS_FORMAT = 16, + VT_MUTATING_VARIABLE_INPUTS = 18, + VT_INTERMEDIATES = 20, + VT_LARGE_CUSTOM_OPTIONS_OFFSET = 22, + VT_LARGE_CUSTOM_OPTIONS_SIZE = 24, + VT_BUILTIN_OPTIONS_2_TYPE = 26, + VT_BUILTIN_OPTIONS_2 = 28, + VT_DEBUG_METADATA_INDEX = 30 }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); + uint32_t opcode_index() const { + return GetField(VT_OPCODE_INDEX, 0); } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); + const ::flatbuffers::Vector *inputs() const { + return GetPointer *>(VT_INPUTS); } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); + const ::flatbuffers::Vector *outputs() const { + return GetPointer *>(VT_OUTPUTS); } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - verifier.EndTable(); + tflite::BuiltinOptions builtin_options_type() const { + return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); } - TransposeConvOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TransposeConvOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(TransposeConvOptions::VT_PADDING, static_cast(padding), 0); + const void *builtin_options() const { + return GetPointer(VT_BUILTIN_OPTIONS); + } + template const T *builtin_options_as() const; + const tflite::Conv2DOptions *builtin_options_as_Conv2DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Conv2DOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DepthwiseConv2DOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ConcatEmbeddingsOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LSHProjectionOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::Pool2DOptions *builtin_options_as_Pool2DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Pool2DOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SVDFOptions *builtin_options_as_SVDFOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SVDFOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::RNNOptions *builtin_options_as_RNNOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RNNOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FullyConnectedOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SoftmaxOptions *builtin_options_as_SoftmaxOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SoftmaxOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ConcatenationOptions *builtin_options_as_ConcatenationOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ConcatenationOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::AddOptions *builtin_options_as_AddOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_AddOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::L2NormOptions *builtin_options_as_L2NormOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_L2NormOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LocalResponseNormalizationOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LSTMOptions *builtin_options_as_LSTMOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LSTMOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ResizeBilinearOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::CallOptions *builtin_options_as_CallOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CallOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ReshapeOptions *builtin_options_as_ReshapeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ReshapeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SkipGramOptions *builtin_options_as_SkipGramOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SkipGramOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SpaceToDepthOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::MulOptions *builtin_options_as_MulOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MulOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::PadOptions *builtin_options_as_PadOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_PadOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::GatherOptions *builtin_options_as_GatherOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GatherOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BatchToSpaceNDOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SpaceToBatchNDOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::TransposeOptions *builtin_options_as_TransposeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_TransposeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ReducerOptions *builtin_options_as_ReducerOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ReducerOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SubOptions *builtin_options_as_SubOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SubOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::DivOptions *builtin_options_as_DivOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DivOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SqueezeOptions *builtin_options_as_SqueezeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SqueezeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SequenceRNNOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::StridedSliceOptions *builtin_options_as_StridedSliceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_StridedSliceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ExpOptions *builtin_options_as_ExpOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ExpOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::TopKV2Options *builtin_options_as_TopKV2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_TopKV2Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::SplitOptions *builtin_options_as_SplitOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SplitOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LogSoftmaxOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::CastOptions *builtin_options_as_CastOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CastOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::DequantizeOptions *builtin_options_as_DequantizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DequantizeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MaximumMinimumOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ArgMaxOptions *builtin_options_as_ArgMaxOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ArgMaxOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LessOptions *builtin_options_as_LessOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LessOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::NegOptions *builtin_options_as_NegOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_NegOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::PadV2Options *builtin_options_as_PadV2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_PadV2Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::GreaterOptions *builtin_options_as_GreaterOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GreaterOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GreaterEqualOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LessEqualOptions *builtin_options_as_LessEqualOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LessEqualOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SelectOptions *builtin_options_as_SelectOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SelectOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SliceOptions *builtin_options_as_SliceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SliceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::TransposeConvOptions *builtin_options_as_TransposeConvOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_TransposeConvOptions ? static_cast(builtin_options()) : nullptr; } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(TransposeConvOptions::VT_STRIDE_W, stride_w, 0); + const tflite::SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SparseToDenseOptions ? static_cast(builtin_options()) : nullptr; } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(TransposeConvOptions::VT_STRIDE_H, stride_h, 0); + const tflite::TileOptions *builtin_options_as_TileOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_TileOptions ? static_cast(builtin_options()) : nullptr; } - explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ExpandDimsOptions ? static_cast(builtin_options()) : nullptr; } - TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::EqualOptions *builtin_options_as_EqualOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_EqualOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateTransposeConvOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0) { - TransposeConvOptionsBuilder builder_(_fbb); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExpandDimsOptionsT : public flatbuffers::NativeTable { - typedef ExpandDimsOptions TableType; - ExpandDimsOptionsT() { + const tflite::NotEqualOptions *builtin_options_as_NotEqualOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_NotEqualOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpandDimsOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::ShapeOptions *builtin_options_as_ShapeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ShapeOptions ? static_cast(builtin_options()) : nullptr; } - ExpandDimsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExpandDimsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::PowOptions *builtin_options_as_PowOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_PowOptions ? static_cast(builtin_options()) : nullptr; } - ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::ArgMinOptions *builtin_options_as_ArgMinOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ArgMinOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateExpandDimsOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ExpandDimsOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SparseToDenseOptionsT : public flatbuffers::NativeTable { - typedef SparseToDenseOptions TableType; - bool validate_indices; - SparseToDenseOptionsT() - : validate_indices(false) { + const tflite::FakeQuantOptions *builtin_options_as_FakeQuantOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FakeQuantOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SparseToDenseOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALIDATE_INDICES = 4 - }; - bool validate_indices() const { - return GetField(VT_VALIDATE_INDICES, 0) != 0; + const tflite::PackOptions *builtin_options_as_PackOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_PackOptions ? static_cast(builtin_options()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALIDATE_INDICES) && - verifier.EndTable(); + const tflite::LogicalOrOptions *builtin_options_as_LogicalOrOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LogicalOrOptions ? static_cast(builtin_options()) : nullptr; } - SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SparseToDenseOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_validate_indices(bool validate_indices) { - fbb_.AddElement(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast(validate_indices), 0); + const tflite::OneHotOptions *builtin_options_as_OneHotOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_OneHotOptions ? static_cast(builtin_options()) : nullptr; } - explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::LogicalAndOptions *builtin_options_as_LogicalAndOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LogicalAndOptions ? static_cast(builtin_options()) : nullptr; } - SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::LogicalNotOptions *builtin_options_as_LogicalNotOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LogicalNotOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateSparseToDenseOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool validate_indices = false) { - SparseToDenseOptionsBuilder builder_(_fbb); - builder_.add_validate_indices(validate_indices); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EqualOptionsT : public flatbuffers::NativeTable { - typedef EqualOptions TableType; - EqualOptionsT() { + const tflite::UnpackOptions *builtin_options_as_UnpackOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnpackOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::FloorDivOptions *builtin_options_as_FloorDivOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FloorDivOptions ? static_cast(builtin_options()) : nullptr; } - EqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::SquareOptions *builtin_options_as_SquareOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SquareOptions ? static_cast(builtin_options()) : nullptr; } - EqualOptionsBuilder &operator=(const EqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ZerosLikeOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - EqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NotEqualOptionsT : public flatbuffers::NativeTable { - typedef NotEqualOptions TableType; - NotEqualOptionsT() { + const tflite::FillOptions *builtin_options_as_FillOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FillOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NotEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::BidirectionalSequenceLSTMOptions *builtin_options_as_BidirectionalSequenceLSTMOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; } - NotEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NotEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceRNNOptions ? static_cast(builtin_options()) : nullptr; } - NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::UnidirectionalSequenceLSTMOptions *builtin_options_as_UnidirectionalSequenceLSTMOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateNotEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - NotEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ShapeOptionsT : public flatbuffers::NativeTable { - typedef ShapeOptions TableType; - tflite::TensorType out_type; - ShapeOptionsT() - : out_type(tflite::TensorType_FLOAT32) { + const tflite::FloorModOptions *builtin_options_as_FloorModOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FloorModOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ShapeOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUT_TYPE = 4 - }; - tflite::TensorType out_type() const { - return static_cast(GetField(VT_OUT_TYPE, 0)); + const tflite::RangeOptions *builtin_options_as_RangeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RangeOptions ? static_cast(builtin_options()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUT_TYPE) && - verifier.EndTable(); + const tflite::ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ResizeNearestNeighborOptions ? static_cast(builtin_options()) : nullptr; } - ShapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ShapeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_out_type(tflite::TensorType out_type) { - fbb_.AddElement(ShapeOptions::VT_OUT_TYPE, static_cast(out_type), 0); + const tflite::LeakyReluOptions *builtin_options_as_LeakyReluOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LeakyReluOptions ? static_cast(builtin_options()) : nullptr; } - explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SquaredDifferenceOptions ? static_cast(builtin_options()) : nullptr; } - ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::MirrorPadOptions *builtin_options_as_MirrorPadOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MirrorPadOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateShapeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType out_type = tflite::TensorType_FLOAT32) { - ShapeOptionsBuilder builder_(_fbb); - builder_.add_out_type(out_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RankOptionsT : public flatbuffers::NativeTable { - typedef RankOptions TableType; - RankOptionsT() { + const tflite::AbsOptions *builtin_options_as_AbsOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_AbsOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RankOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::SplitVOptions *builtin_options_as_SplitVOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SplitVOptions ? static_cast(builtin_options()) : nullptr; } - RankOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RankOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::UniqueOptions *builtin_options_as_UniqueOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UniqueOptions ? static_cast(builtin_options()) : nullptr; } - RankOptionsBuilder &operator=(const RankOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::ReverseV2Options *builtin_options_as_ReverseV2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_ReverseV2Options ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateRankOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - RankOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PowOptionsT : public flatbuffers::NativeTable { - typedef PowOptions TableType; - PowOptionsT() { + const tflite::AddNOptions *builtin_options_as_AddNOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_AddNOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PowOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::GatherNdOptions *builtin_options_as_GatherNdOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GatherNdOptions ? static_cast(builtin_options()) : nullptr; } - PowOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PowOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::CosOptions *builtin_options_as_CosOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CosOptions ? static_cast(builtin_options()) : nullptr; } - PowOptionsBuilder &operator=(const PowOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::WhereOptions *builtin_options_as_WhereOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_WhereOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreatePowOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - PowOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FakeQuantOptionsT : public flatbuffers::NativeTable { - typedef FakeQuantOptions TableType; - float min; - float max; - int32_t num_bits; - bool narrow_range; - FakeQuantOptionsT() - : min(0.0f), - max(0.0f), - num_bits(0), - narrow_range(false) { + const tflite::RankOptions *builtin_options_as_RankOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RankOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ReverseSequenceOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FakeQuantOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MIN = 4, - VT_MAX = 6, - VT_NUM_BITS = 8, - VT_NARROW_RANGE = 10 - }; - float min() const { - return GetField(VT_MIN, 0.0f); + const tflite::MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MatrixDiagOptions ? static_cast(builtin_options()) : nullptr; } - float max() const { - return GetField(VT_MAX, 0.0f); + const tflite::QuantizeOptions *builtin_options_as_QuantizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_QuantizeOptions ? static_cast(builtin_options()) : nullptr; } - int32_t num_bits() const { - return GetField(VT_NUM_BITS, 0); + const tflite::MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MatrixSetDiagOptions ? static_cast(builtin_options()) : nullptr; } - bool narrow_range() const { - return GetField(VT_NARROW_RANGE, 0) != 0; + const tflite::HardSwishOptions *builtin_options_as_HardSwishOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HardSwishOptions ? static_cast(builtin_options()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MIN) && - VerifyField(verifier, VT_MAX) && - VerifyField(verifier, VT_NUM_BITS) && - VerifyField(verifier, VT_NARROW_RANGE) && - verifier.EndTable(); + const tflite::IfOptions *builtin_options_as_IfOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_IfOptions ? static_cast(builtin_options()) : nullptr; } - FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FakeQuantOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_min(float min) { - fbb_.AddElement(FakeQuantOptions::VT_MIN, min, 0.0f); + const tflite::WhileOptions *builtin_options_as_WhileOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_WhileOptions ? static_cast(builtin_options()) : nullptr; } - void add_max(float max) { - fbb_.AddElement(FakeQuantOptions::VT_MAX, max, 0.0f); + const tflite::DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DepthToSpaceOptions ? static_cast(builtin_options()) : nullptr; } - void add_num_bits(int32_t num_bits) { - fbb_.AddElement(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); + const tflite::NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const { + return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV4Options ? static_cast(builtin_options()) : nullptr; } - void add_narrow_range(bool narrow_range) { - fbb_.AddElement(FakeQuantOptions::VT_NARROW_RANGE, static_cast(narrow_range), 0); + const tflite::NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const { + return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV5Options ? static_cast(builtin_options()) : nullptr; } - explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::ScatterNdOptions *builtin_options_as_ScatterNdOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ScatterNdOptions ? static_cast(builtin_options()) : nullptr; } - FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::SelectV2Options *builtin_options_as_SelectV2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_SelectV2Options ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateFakeQuantOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float min = 0.0f, - float max = 0.0f, - int32_t num_bits = 0, - bool narrow_range = false) { - FakeQuantOptionsBuilder builder_(_fbb); - builder_.add_num_bits(num_bits); - builder_.add_max(max); - builder_.add_min(min); - builder_.add_narrow_range(narrow_range); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PackOptionsT : public flatbuffers::NativeTable { - typedef PackOptions TableType; - int32_t values_count; - int32_t axis; - PackOptionsT() - : values_count(0), - axis(0) { + const tflite::DensifyOptions *builtin_options_as_DensifyOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DensifyOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PackOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES_COUNT = 4, - VT_AXIS = 6 - }; - int32_t values_count() const { - return GetField(VT_VALUES_COUNT, 0); + const tflite::SegmentSumOptions *builtin_options_as_SegmentSumOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SegmentSumOptions ? static_cast(builtin_options()) : nullptr; } - int32_t axis() const { - return GetField(VT_AXIS, 0); + const tflite::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BatchMatMulOptions ? static_cast(builtin_options()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALUES_COUNT) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); + const tflite::CumsumOptions *builtin_options_as_CumsumOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CumsumOptions ? static_cast(builtin_options()) : nullptr; } - PackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PackOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values_count(int32_t values_count) { - fbb_.AddElement(PackOptions::VT_VALUES_COUNT, values_count, 0); + const tflite::CallOnceOptions *builtin_options_as_CallOnceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CallOnceOptions ? static_cast(builtin_options()) : nullptr; } - void add_axis(int32_t axis) { - fbb_.AddElement(PackOptions::VT_AXIS, axis, 0); + const tflite::BroadcastToOptions *builtin_options_as_BroadcastToOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BroadcastToOptions ? static_cast(builtin_options()) : nullptr; } - explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::Rfft2dOptions *builtin_options_as_Rfft2dOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Rfft2dOptions ? static_cast(builtin_options()) : nullptr; } - PackOptionsBuilder &operator=(const PackOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::Conv3DOptions *builtin_options_as_Conv3DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Conv3DOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreatePackOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t values_count = 0, - int32_t axis = 0) { - PackOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_values_count(values_count); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalOrOptionsT : public flatbuffers::NativeTable { - typedef LogicalOrOptions TableType; - LogicalOrOptionsT() { + const tflite::HashtableOptions *builtin_options_as_HashtableOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalOrOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::HashtableFindOptions *builtin_options_as_HashtableFindOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableFindOptions ? static_cast(builtin_options()) : nullptr; } - LogicalOrOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalOrOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::HashtableImportOptions *builtin_options_as_HashtableImportOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableImportOptions ? static_cast(builtin_options()) : nullptr; } - LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::HashtableSizeOptions *builtin_options_as_HashtableSizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableSizeOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateLogicalOrOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalOrOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OneHotOptionsT : public flatbuffers::NativeTable { - typedef OneHotOptions TableType; - int32_t axis; - OneHotOptionsT() - : axis(0) { + const tflite::VarHandleOptions *builtin_options_as_VarHandleOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_VarHandleOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OneHotOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); + const tflite::ReadVariableOptions *builtin_options_as_ReadVariableOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ReadVariableOptions ? static_cast(builtin_options()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); + const tflite::AssignVariableOptions *builtin_options_as_AssignVariableOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_AssignVariableOptions ? static_cast(builtin_options()) : nullptr; } - OneHotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct OneHotOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(OneHotOptions::VT_AXIS, axis, 0); + const tflite::RandomOptions *builtin_options_as_RandomOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RandomOptions ? static_cast(builtin_options()) : nullptr; } - explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::BucketizeOptions *builtin_options_as_BucketizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BucketizeOptions ? static_cast(builtin_options()) : nullptr; } - OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::GeluOptions *builtin_options_as_GeluOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GeluOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateOneHotOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0) { - OneHotOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AbsOptionsT : public flatbuffers::NativeTable { - typedef AbsOptions TableType; - AbsOptionsT() { + const tflite::DynamicUpdateSliceOptions *builtin_options_as_DynamicUpdateSliceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DynamicUpdateSliceOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AbsOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::UnsortedSegmentProdOptions *builtin_options_as_UnsortedSegmentProdOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentProdOptions ? static_cast(builtin_options()) : nullptr; } - AbsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AbsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::UnsortedSegmentMaxOptions *builtin_options_as_UnsortedSegmentMaxOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentMaxOptions ? static_cast(builtin_options()) : nullptr; } - AbsOptionsBuilder &operator=(const AbsOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::UnsortedSegmentMinOptions *builtin_options_as_UnsortedSegmentMinOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentMinOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateAbsOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AbsOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HardSwishOptionsT : public flatbuffers::NativeTable { - typedef HardSwishOptions TableType; - HardSwishOptionsT() { + const tflite::UnsortedSegmentSumOptions *builtin_options_as_UnsortedSegmentSumOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentSumOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HardSwishOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::ATan2Options *builtin_options_as_ATan2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_ATan2Options ? static_cast(builtin_options()) : nullptr; } - HardSwishOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HardSwishOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::SignOptions *builtin_options_as_SignOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SignOptions ? static_cast(builtin_options()) : nullptr; } - HardSwishOptionsBuilder &operator=(const HardSwishOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::BitcastOptions *builtin_options_as_BitcastOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BitcastOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateHardSwishOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HardSwishOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalAndOptionsT : public flatbuffers::NativeTable { - typedef LogicalAndOptions TableType; - LogicalAndOptionsT() { + const tflite::BitwiseXorOptions *builtin_options_as_BitwiseXorOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BitwiseXorOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalAndOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::RightShiftOptions *builtin_options_as_RightShiftOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RightShiftOptions ? static_cast(builtin_options()) : nullptr; } - LogicalAndOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalAndOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const ::flatbuffers::Vector *custom_options() const { + return GetPointer *>(VT_CUSTOM_OPTIONS); } - LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + tflite::CustomOptionsFormat custom_options_format() const { + return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); } -}; - -inline flatbuffers::Offset CreateLogicalAndOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalAndOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalNotOptionsT : public flatbuffers::NativeTable { - typedef LogicalNotOptions TableType; - LogicalNotOptionsT() { + const ::flatbuffers::Vector *mutating_variable_inputs() const { + return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); } -}; - -struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalNotOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const ::flatbuffers::Vector *intermediates() const { + return GetPointer *>(VT_INTERMEDIATES); } - LogicalNotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalNotOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + uint64_t large_custom_options_offset() const { + return GetField(VT_LARGE_CUSTOM_OPTIONS_OFFSET, 0); } - LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + uint64_t large_custom_options_size() const { + return GetField(VT_LARGE_CUSTOM_OPTIONS_SIZE, 0); } -}; - -inline flatbuffers::Offset CreateLogicalNotOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalNotOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnpackOptionsT : public flatbuffers::NativeTable { - typedef UnpackOptions TableType; - int32_t num; - int32_t axis; - UnpackOptionsT() - : num(0), - axis(0) { + tflite::BuiltinOptions2 builtin_options_2_type() const { + return static_cast(GetField(VT_BUILTIN_OPTIONS_2_TYPE, 0)); + } + const void *builtin_options_2() const { + return GetPointer(VT_BUILTIN_OPTIONS_2); + } + template const T *builtin_options_2_as() const; + const tflite::StablehloConcatenateOptions *builtin_options_2_as_StablehloConcatenateOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloConcatenateOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnpackOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM = 4, - VT_AXIS = 6 - }; - int32_t num() const { - return GetField(VT_NUM, 0); + const tflite::StablehloBroadcastInDimOptions *builtin_options_2_as_StablehloBroadcastInDimOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloBroadcastInDimOptions ? static_cast(builtin_options_2()) : nullptr; } - int32_t axis() const { - return GetField(VT_AXIS, 0); + const tflite::StablehloSliceOptions *builtin_options_2_as_StablehloSliceOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloSliceOptions ? static_cast(builtin_options_2()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); + const tflite::StablehloConvolutionOptions *builtin_options_2_as_StablehloConvolutionOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloConvolutionOptions ? static_cast(builtin_options_2()) : nullptr; } - UnpackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnpackOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num(int32_t num) { - fbb_.AddElement(UnpackOptions::VT_NUM, num, 0); + const tflite::StablehloCustomCallOptions *builtin_options_2_as_StablehloCustomCallOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloCustomCallOptions ? static_cast(builtin_options_2()) : nullptr; } - void add_axis(int32_t axis) { - fbb_.AddElement(UnpackOptions::VT_AXIS, axis, 0); + const tflite::StablehloReduceOptions *builtin_options_2_as_StablehloReduceOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloReduceOptions ? static_cast(builtin_options_2()) : nullptr; } - explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::StablehloScatterOptions *builtin_options_2_as_StablehloScatterOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloScatterOptions ? static_cast(builtin_options_2()) : nullptr; } - UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::StablehloCompareOptions *builtin_options_2_as_StablehloCompareOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloCompareOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -inline flatbuffers::Offset CreateUnpackOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num = 0, - int32_t axis = 0) { - UnpackOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_num(num); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FloorDivOptionsT : public flatbuffers::NativeTable { - typedef FloorDivOptions TableType; - FloorDivOptionsT() { + const tflite::StablehloDynamicSliceOptions *builtin_options_2_as_StablehloDynamicSliceOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloDynamicSliceOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FloorDivOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::StablehloPadOptions *builtin_options_2_as_StablehloPadOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloPadOptions ? static_cast(builtin_options_2()) : nullptr; } - FloorDivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FloorDivOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::StablehloIotaOptions *builtin_options_2_as_StablehloIotaOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloIotaOptions ? static_cast(builtin_options_2()) : nullptr; } - FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::StablehloDotGeneralOptions *builtin_options_2_as_StablehloDotGeneralOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloDotGeneralOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -inline flatbuffers::Offset CreateFloorDivOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FloorDivOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SquareOptionsT : public flatbuffers::NativeTable { - typedef SquareOptions TableType; - SquareOptionsT() { + const tflite::StablehloReduceWindowOptions *builtin_options_2_as_StablehloReduceWindowOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloReduceWindowOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SquareOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::StablehloSortOptions *builtin_options_2_as_StablehloSortOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloSortOptions ? static_cast(builtin_options_2()) : nullptr; } - SquareOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SquareOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::StablehloWhileOptions *builtin_options_2_as_StablehloWhileOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloWhileOptions ? static_cast(builtin_options_2()) : nullptr; } - SquareOptionsBuilder &operator=(const SquareOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::StablehloGatherOptions *builtin_options_2_as_StablehloGatherOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloGatherOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -inline flatbuffers::Offset CreateSquareOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SquareOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ZerosLikeOptionsT : public flatbuffers::NativeTable { - typedef ZerosLikeOptions TableType; - ZerosLikeOptionsT() { + const tflite::StablehloTransposeOptions *builtin_options_2_as_StablehloTransposeOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloTransposeOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ZerosLikeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); + const tflite::DilateOptions *builtin_options_2_as_DilateOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_DilateOptions ? static_cast(builtin_options_2()) : nullptr; } - ZerosLikeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ZerosLikeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const tflite::StablehloRngBitGeneratorOptions *builtin_options_2_as_StablehloRngBitGeneratorOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloRngBitGeneratorOptions ? static_cast(builtin_options_2()) : nullptr; } - ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const tflite::ReduceWindowOptions *builtin_options_2_as_ReduceWindowOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_ReduceWindowOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -inline flatbuffers::Offset CreateZerosLikeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ZerosLikeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FillOptionsT : public flatbuffers::NativeTable { - typedef FillOptions TableType; - FillOptionsT() { + const tflite::StableHLOCompositeOptions *builtin_options_2_as_StableHLOCompositeOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StableHLOCompositeOptions ? static_cast(builtin_options_2()) : nullptr; } -}; - -struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FillOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { + const tflite::StablehloShiftLeftOptions *builtin_options_2_as_StablehloShiftLeftOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloShiftLeftOptions ? static_cast(builtin_options_2()) : nullptr; + } + const tflite::StablehloCaseOptions *builtin_options_2_as_StablehloCaseOptions() const { + return builtin_options_2_type() == tflite::BuiltinOptions2_StablehloCaseOptions ? static_cast(builtin_options_2()) : nullptr; + } + int32_t debug_metadata_index() const { + return GetField(VT_DEBUG_METADATA_INDEX, -1); + } + bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OPCODE_INDEX, 4) && + VerifyOffset(verifier, VT_INPUTS) && + verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && + verifier.VerifyVector(outputs()) && + VerifyField(verifier, VT_BUILTIN_OPTIONS_TYPE, 1) && + VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && + VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && + VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && + verifier.VerifyVector(custom_options()) && + VerifyField(verifier, VT_CUSTOM_OPTIONS_FORMAT, 1) && + VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && + verifier.VerifyVector(mutating_variable_inputs()) && + VerifyOffset(verifier, VT_INTERMEDIATES) && + verifier.VerifyVector(intermediates()) && + VerifyField(verifier, VT_LARGE_CUSTOM_OPTIONS_OFFSET, 8) && + VerifyField(verifier, VT_LARGE_CUSTOM_OPTIONS_SIZE, 8) && + VerifyField(verifier, VT_BUILTIN_OPTIONS_2_TYPE, 1) && + VerifyOffset(verifier, VT_BUILTIN_OPTIONS_2) && + VerifyBuiltinOptions2(verifier, builtin_options_2(), builtin_options_2_type()) && + VerifyField(verifier, VT_DEBUG_METADATA_INDEX, 4) && verifier.EndTable(); } - FillOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + OperatorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OperatorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct FillOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FillOptionsBuilder &operator=(const FillOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::Conv2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Conv2DOptions(); +} -inline flatbuffers::Offset CreateFillOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FillOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::DepthwiseConv2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_DepthwiseConv2DOptions(); } -flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::ConcatEmbeddingsOptions *Operator::builtin_options_as() const { + return builtin_options_as_ConcatEmbeddingsOptions(); +} -struct FloorModOptionsT : public flatbuffers::NativeTable { - typedef FloorModOptions TableType; - FloorModOptionsT() { - } -}; +template<> inline const tflite::LSHProjectionOptions *Operator::builtin_options_as() const { + return builtin_options_as_LSHProjectionOptions(); +} -struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FloorModOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FloorModOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::Pool2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Pool2DOptions(); +} -struct FloorModOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::SVDFOptions *Operator::builtin_options_as() const { + return builtin_options_as_SVDFOptions(); +} -inline flatbuffers::Offset CreateFloorModOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FloorModOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::RNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_RNNOptions(); } -flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::FullyConnectedOptions *Operator::builtin_options_as() const { + return builtin_options_as_FullyConnectedOptions(); +} -struct RangeOptionsT : public flatbuffers::NativeTable { - typedef RangeOptions TableType; - RangeOptionsT() { - } -}; +template<> inline const tflite::SoftmaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_SoftmaxOptions(); +} -struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RangeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RangeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::ConcatenationOptions *Operator::builtin_options_as() const { + return builtin_options_as_ConcatenationOptions(); +} -struct RangeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RangeOptionsBuilder &operator=(const RangeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::AddOptions *Operator::builtin_options_as() const { + return builtin_options_as_AddOptions(); +} -inline flatbuffers::Offset CreateRangeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - RangeOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::L2NormOptions *Operator::builtin_options_as() const { + return builtin_options_as_L2NormOptions(); } -flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::LocalResponseNormalizationOptions *Operator::builtin_options_as() const { + return builtin_options_as_LocalResponseNormalizationOptions(); +} -struct LeakyReluOptionsT : public flatbuffers::NativeTable { - typedef LeakyReluOptions TableType; - float alpha; - LeakyReluOptionsT() - : alpha(0.0f) { - } -}; +template<> inline const tflite::LSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_LSTMOptions(); +} -struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LeakyReluOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALPHA = 4 - }; - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALPHA) && - verifier.EndTable(); - } - LeakyReluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::ResizeBilinearOptions *Operator::builtin_options_as() const { + return builtin_options_as_ResizeBilinearOptions(); +} -struct LeakyReluOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_alpha(float alpha) { - fbb_.AddElement(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); - } - explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::CallOptions *Operator::builtin_options_as() const { + return builtin_options_as_CallOptions(); +} -inline flatbuffers::Offset CreateLeakyReluOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float alpha = 0.0f) { - LeakyReluOptionsBuilder builder_(_fbb); - builder_.add_alpha(alpha); - return builder_.Finish(); +template<> inline const tflite::ReshapeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReshapeOptions(); } -flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::SkipGramOptions *Operator::builtin_options_as() const { + return builtin_options_as_SkipGramOptions(); +} -struct SquaredDifferenceOptionsT : public flatbuffers::NativeTable { - typedef SquaredDifferenceOptions TableType; - SquaredDifferenceOptionsT() { - } -}; +template<> inline const tflite::SpaceToDepthOptions *Operator::builtin_options_as() const { + return builtin_options_as_SpaceToDepthOptions(); +} -struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SquaredDifferenceOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SquaredDifferenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::EmbeddingLookupSparseOptions *Operator::builtin_options_as() const { + return builtin_options_as_EmbeddingLookupSparseOptions(); +} + +template<> inline const tflite::MulOptions *Operator::builtin_options_as() const { + return builtin_options_as_MulOptions(); +} -struct SquaredDifferenceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::PadOptions *Operator::builtin_options_as() const { + return builtin_options_as_PadOptions(); +} -inline flatbuffers::Offset CreateSquaredDifferenceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SquaredDifferenceOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::GatherOptions *Operator::builtin_options_as() const { + return builtin_options_as_GatherOptions(); } -flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::BatchToSpaceNDOptions *Operator::builtin_options_as() const { + return builtin_options_as_BatchToSpaceNDOptions(); +} -struct MirrorPadOptionsT : public flatbuffers::NativeTable { - typedef MirrorPadOptions TableType; - tflite::MirrorPadMode mode; - MirrorPadOptionsT() - : mode(tflite::MirrorPadMode_REFLECT) { - } -}; +template<> inline const tflite::SpaceToBatchNDOptions *Operator::builtin_options_as() const { + return builtin_options_as_SpaceToBatchNDOptions(); +} -struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MirrorPadOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MODE = 4 - }; - tflite::MirrorPadMode mode() const { - return static_cast(GetField(VT_MODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MODE) && - verifier.EndTable(); - } - MirrorPadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::TransposeOptions *Operator::builtin_options_as() const { + return builtin_options_as_TransposeOptions(); +} -struct MirrorPadOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_mode(tflite::MirrorPadMode mode) { - fbb_.AddElement(MirrorPadOptions::VT_MODE, static_cast(mode), 0); - } - explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::ReducerOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReducerOptions(); +} -inline flatbuffers::Offset CreateMirrorPadOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT) { - MirrorPadOptionsBuilder builder_(_fbb); - builder_.add_mode(mode); - return builder_.Finish(); +template<> inline const tflite::SubOptions *Operator::builtin_options_as() const { + return builtin_options_as_SubOptions(); } -flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::DivOptions *Operator::builtin_options_as() const { + return builtin_options_as_DivOptions(); +} -struct UniqueOptionsT : public flatbuffers::NativeTable { - typedef UniqueOptions TableType; - tflite::TensorType idx_out_type; - UniqueOptionsT() - : idx_out_type(tflite::TensorType_INT32) { - } -}; +template<> inline const tflite::SqueezeOptions *Operator::builtin_options_as() const { + return builtin_options_as_SqueezeOptions(); +} -struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UniqueOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_IDX_OUT_TYPE = 4 - }; - tflite::TensorType idx_out_type() const { - return static_cast(GetField(VT_IDX_OUT_TYPE, 2)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_IDX_OUT_TYPE) && - verifier.EndTable(); - } - UniqueOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::SequenceRNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_SequenceRNNOptions(); +} -struct UniqueOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_idx_out_type(tflite::TensorType idx_out_type) { - fbb_.AddElement(UniqueOptions::VT_IDX_OUT_TYPE, static_cast(idx_out_type), 2); - } - explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - UniqueOptionsBuilder &operator=(const UniqueOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::StridedSliceOptions *Operator::builtin_options_as() const { + return builtin_options_as_StridedSliceOptions(); +} -inline flatbuffers::Offset CreateUniqueOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType idx_out_type = tflite::TensorType_INT32) { - UniqueOptionsBuilder builder_(_fbb); - builder_.add_idx_out_type(idx_out_type); - return builder_.Finish(); +template<> inline const tflite::ExpOptions *Operator::builtin_options_as() const { + return builtin_options_as_ExpOptions(); } -flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::TopKV2Options *Operator::builtin_options_as() const { + return builtin_options_as_TopKV2Options(); +} -struct ReverseV2OptionsT : public flatbuffers::NativeTable { - typedef ReverseV2Options TableType; - ReverseV2OptionsT() { - } -}; +template<> inline const tflite::SplitOptions *Operator::builtin_options_as() const { + return builtin_options_as_SplitOptions(); +} -struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ReverseV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::LogSoftmaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogSoftmaxOptions(); +} -struct ReverseV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReverseV2OptionsBuilder &operator=(const ReverseV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::CastOptions *Operator::builtin_options_as() const { + return builtin_options_as_CastOptions(); +} -inline flatbuffers::Offset CreateReverseV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - ReverseV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::DequantizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_DequantizeOptions(); } -flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::MaximumMinimumOptions *Operator::builtin_options_as() const { + return builtin_options_as_MaximumMinimumOptions(); +} -struct AddNOptionsT : public flatbuffers::NativeTable { - typedef AddNOptions TableType; - AddNOptionsT() { - } -}; +template<> inline const tflite::ArgMaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_ArgMaxOptions(); +} -struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AddNOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - AddNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::LessOptions *Operator::builtin_options_as() const { + return builtin_options_as_LessOptions(); +} -struct AddNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AddNOptionsBuilder &operator=(const AddNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::NegOptions *Operator::builtin_options_as() const { + return builtin_options_as_NegOptions(); +} -inline flatbuffers::Offset CreateAddNOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AddNOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::PadV2Options *Operator::builtin_options_as() const { + return builtin_options_as_PadV2Options(); } -flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::GreaterOptions *Operator::builtin_options_as() const { + return builtin_options_as_GreaterOptions(); +} -struct GatherNdOptionsT : public flatbuffers::NativeTable { - typedef GatherNdOptions TableType; - GatherNdOptionsT() { - } -}; +template<> inline const tflite::GreaterEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_GreaterEqualOptions(); +} -struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherNdOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GatherNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::LessEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_LessEqualOptions(); +} -struct GatherNdOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GatherNdOptionsBuilder &operator=(const GatherNdOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::SelectOptions *Operator::builtin_options_as() const { + return builtin_options_as_SelectOptions(); +} -inline flatbuffers::Offset CreateGatherNdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GatherNdOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::SliceOptions *Operator::builtin_options_as() const { + return builtin_options_as_SliceOptions(); } -flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::TransposeConvOptions *Operator::builtin_options_as() const { + return builtin_options_as_TransposeConvOptions(); +} -struct WhereOptionsT : public flatbuffers::NativeTable { - typedef WhereOptions TableType; - WhereOptionsT() { - } -}; +template<> inline const tflite::SparseToDenseOptions *Operator::builtin_options_as() const { + return builtin_options_as_SparseToDenseOptions(); +} -struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef WhereOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - WhereOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::TileOptions *Operator::builtin_options_as() const { + return builtin_options_as_TileOptions(); +} -struct WhereOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - WhereOptionsBuilder &operator=(const WhereOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::ExpandDimsOptions *Operator::builtin_options_as() const { + return builtin_options_as_ExpandDimsOptions(); +} -inline flatbuffers::Offset CreateWhereOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - WhereOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::EqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_EqualOptions(); } -flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::NotEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_NotEqualOptions(); +} -struct ReverseSequenceOptionsT : public flatbuffers::NativeTable { - typedef ReverseSequenceOptions TableType; - int32_t seq_dim; - int32_t batch_dim; - ReverseSequenceOptionsT() - : seq_dim(0), - batch_dim(0) { - } -}; +template<> inline const tflite::ShapeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ShapeOptions(); +} -struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseSequenceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SEQ_DIM = 4, - VT_BATCH_DIM = 6 - }; - int32_t seq_dim() const { - return GetField(VT_SEQ_DIM, 0); - } - int32_t batch_dim() const { - return GetField(VT_BATCH_DIM, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SEQ_DIM) && - VerifyField(verifier, VT_BATCH_DIM) && - verifier.EndTable(); - } - ReverseSequenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::PowOptions *Operator::builtin_options_as() const { + return builtin_options_as_PowOptions(); +} -struct ReverseSequenceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_seq_dim(int32_t seq_dim) { - fbb_.AddElement(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); - } - void add_batch_dim(int32_t batch_dim) { - fbb_.AddElement(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); - } - explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReverseSequenceOptionsBuilder &operator=(const ReverseSequenceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::ArgMinOptions *Operator::builtin_options_as() const { + return builtin_options_as_ArgMinOptions(); +} -inline flatbuffers::Offset CreateReverseSequenceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t seq_dim = 0, - int32_t batch_dim = 0) { - ReverseSequenceOptionsBuilder builder_(_fbb); - builder_.add_batch_dim(batch_dim); - builder_.add_seq_dim(seq_dim); - return builder_.Finish(); +template<> inline const tflite::FakeQuantOptions *Operator::builtin_options_as() const { + return builtin_options_as_FakeQuantOptions(); } -flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::PackOptions *Operator::builtin_options_as() const { + return builtin_options_as_PackOptions(); +} -struct MatrixDiagOptionsT : public flatbuffers::NativeTable { - typedef MatrixDiagOptions TableType; - MatrixDiagOptionsT() { - } -}; +template<> inline const tflite::LogicalOrOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalOrOptions(); +} -struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatrixDiagOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MatrixDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::OneHotOptions *Operator::builtin_options_as() const { + return builtin_options_as_OneHotOptions(); +} -struct MatrixDiagOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MatrixDiagOptionsBuilder &operator=(const MatrixDiagOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::LogicalAndOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalAndOptions(); +} -inline flatbuffers::Offset CreateMatrixDiagOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MatrixDiagOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::LogicalNotOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalNotOptions(); } -flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::UnpackOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnpackOptions(); +} -struct QuantizeOptionsT : public flatbuffers::NativeTable { - typedef QuantizeOptions TableType; - QuantizeOptionsT() { - } -}; +template<> inline const tflite::FloorDivOptions *Operator::builtin_options_as() const { + return builtin_options_as_FloorDivOptions(); +} -struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - QuantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::SquareOptions *Operator::builtin_options_as() const { + return builtin_options_as_SquareOptions(); +} -struct QuantizeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizeOptionsBuilder &operator=(const QuantizeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::ZerosLikeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ZerosLikeOptions(); +} -inline flatbuffers::Offset CreateQuantizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - QuantizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::FillOptions *Operator::builtin_options_as() const { + return builtin_options_as_FillOptions(); } -flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::BidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_BidirectionalSequenceLSTMOptions(); +} -struct MatrixSetDiagOptionsT : public flatbuffers::NativeTable { - typedef MatrixSetDiagOptions TableType; - MatrixSetDiagOptionsT() { - } -}; +template<> inline const tflite::BidirectionalSequenceRNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_BidirectionalSequenceRNNOptions(); +} -struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatrixSetDiagOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MatrixSetDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::UnidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnidirectionalSequenceLSTMOptions(); +} -struct MatrixSetDiagOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MatrixSetDiagOptionsBuilder &operator=(const MatrixSetDiagOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::FloorModOptions *Operator::builtin_options_as() const { + return builtin_options_as_FloorModOptions(); +} -inline flatbuffers::Offset CreateMatrixSetDiagOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MatrixSetDiagOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::RangeOptions *Operator::builtin_options_as() const { + return builtin_options_as_RangeOptions(); } -flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::ResizeNearestNeighborOptions *Operator::builtin_options_as() const { + return builtin_options_as_ResizeNearestNeighborOptions(); +} -struct IfOptionsT : public flatbuffers::NativeTable { - typedef IfOptions TableType; - int32_t then_subgraph_index; - int32_t else_subgraph_index; - IfOptionsT() - : then_subgraph_index(0), - else_subgraph_index(0) { - } -}; +template<> inline const tflite::LeakyReluOptions *Operator::builtin_options_as() const { + return builtin_options_as_LeakyReluOptions(); +} -struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef IfOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_THEN_SUBGRAPH_INDEX = 4, - VT_ELSE_SUBGRAPH_INDEX = 6 - }; - int32_t then_subgraph_index() const { - return GetField(VT_THEN_SUBGRAPH_INDEX, 0); - } - int32_t else_subgraph_index() const { - return GetField(VT_ELSE_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_THEN_SUBGRAPH_INDEX) && - VerifyField(verifier, VT_ELSE_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - IfOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::SquaredDifferenceOptions *Operator::builtin_options_as() const { + return builtin_options_as_SquaredDifferenceOptions(); +} -struct IfOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_then_subgraph_index(int32_t then_subgraph_index) { - fbb_.AddElement(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); - } - void add_else_subgraph_index(int32_t else_subgraph_index) { - fbb_.AddElement(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); - } - explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - IfOptionsBuilder &operator=(const IfOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::MirrorPadOptions *Operator::builtin_options_as() const { + return builtin_options_as_MirrorPadOptions(); +} -inline flatbuffers::Offset CreateIfOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t then_subgraph_index = 0, - int32_t else_subgraph_index = 0) { - IfOptionsBuilder builder_(_fbb); - builder_.add_else_subgraph_index(else_subgraph_index); - builder_.add_then_subgraph_index(then_subgraph_index); - return builder_.Finish(); +template<> inline const tflite::AbsOptions *Operator::builtin_options_as() const { + return builtin_options_as_AbsOptions(); } -flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::SplitVOptions *Operator::builtin_options_as() const { + return builtin_options_as_SplitVOptions(); +} -struct CallOnceOptionsT : public flatbuffers::NativeTable { - typedef CallOnceOptions TableType; - int32_t init_subgraph_index; - CallOnceOptionsT() - : init_subgraph_index(0) { - } -}; +template<> inline const tflite::UniqueOptions *Operator::builtin_options_as() const { + return builtin_options_as_UniqueOptions(); +} -struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOnceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INIT_SUBGRAPH_INDEX = 4 - }; - int32_t init_subgraph_index() const { - return GetField(VT_INIT_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_INIT_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - CallOnceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::ReverseV2Options *Operator::builtin_options_as() const { + return builtin_options_as_ReverseV2Options(); +} -struct CallOnceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_init_subgraph_index(int32_t init_subgraph_index) { - fbb_.AddElement(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0); - } - explicit CallOnceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CallOnceOptionsBuilder &operator=(const CallOnceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::AddNOptions *Operator::builtin_options_as() const { + return builtin_options_as_AddNOptions(); +} -inline flatbuffers::Offset CreateCallOnceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t init_subgraph_index = 0) { - CallOnceOptionsBuilder builder_(_fbb); - builder_.add_init_subgraph_index(init_subgraph_index); - return builder_.Finish(); +template<> inline const tflite::GatherNdOptions *Operator::builtin_options_as() const { + return builtin_options_as_GatherNdOptions(); } -flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::CosOptions *Operator::builtin_options_as() const { + return builtin_options_as_CosOptions(); +} -struct WhileOptionsT : public flatbuffers::NativeTable { - typedef WhileOptions TableType; - int32_t cond_subgraph_index; - int32_t body_subgraph_index; - WhileOptionsT() - : cond_subgraph_index(0), - body_subgraph_index(0) { - } -}; +template<> inline const tflite::WhereOptions *Operator::builtin_options_as() const { + return builtin_options_as_WhereOptions(); +} -struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef WhileOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COND_SUBGRAPH_INDEX = 4, - VT_BODY_SUBGRAPH_INDEX = 6 - }; - int32_t cond_subgraph_index() const { - return GetField(VT_COND_SUBGRAPH_INDEX, 0); - } - int32_t body_subgraph_index() const { - return GetField(VT_BODY_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_COND_SUBGRAPH_INDEX) && - VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - WhileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::RankOptions *Operator::builtin_options_as() const { + return builtin_options_as_RankOptions(); +} + +template<> inline const tflite::ReverseSequenceOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReverseSequenceOptions(); +} + +template<> inline const tflite::MatrixDiagOptions *Operator::builtin_options_as() const { + return builtin_options_as_MatrixDiagOptions(); +} + +template<> inline const tflite::QuantizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_QuantizeOptions(); +} -struct WhileOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_cond_subgraph_index(int32_t cond_subgraph_index) { - fbb_.AddElement(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); - } - void add_body_subgraph_index(int32_t body_subgraph_index) { - fbb_.AddElement(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); - } - explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - WhileOptionsBuilder &operator=(const WhileOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::MatrixSetDiagOptions *Operator::builtin_options_as() const { + return builtin_options_as_MatrixSetDiagOptions(); +} -inline flatbuffers::Offset CreateWhileOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t cond_subgraph_index = 0, - int32_t body_subgraph_index = 0) { - WhileOptionsBuilder builder_(_fbb); - builder_.add_body_subgraph_index(body_subgraph_index); - builder_.add_cond_subgraph_index(cond_subgraph_index); - return builder_.Finish(); +template<> inline const tflite::HardSwishOptions *Operator::builtin_options_as() const { + return builtin_options_as_HardSwishOptions(); } -flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::IfOptions *Operator::builtin_options_as() const { + return builtin_options_as_IfOptions(); +} -struct NonMaxSuppressionV4OptionsT : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV4Options TableType; - NonMaxSuppressionV4OptionsT() { - } -}; +template<> inline const tflite::WhileOptions *Operator::builtin_options_as() const { + return builtin_options_as_WhileOptions(); +} -struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV4OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV4OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::DepthToSpaceOptions *Operator::builtin_options_as() const { + return builtin_options_as_DepthToSpaceOptions(); +} -struct NonMaxSuppressionV4OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NonMaxSuppressionV4OptionsBuilder &operator=(const NonMaxSuppressionV4OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::NonMaxSuppressionV4Options *Operator::builtin_options_as() const { + return builtin_options_as_NonMaxSuppressionV4Options(); +} -inline flatbuffers::Offset CreateNonMaxSuppressionV4Options( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV4OptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::NonMaxSuppressionV5Options *Operator::builtin_options_as() const { + return builtin_options_as_NonMaxSuppressionV5Options(); } -flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::ScatterNdOptions *Operator::builtin_options_as() const { + return builtin_options_as_ScatterNdOptions(); +} -struct NonMaxSuppressionV5OptionsT : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV5Options TableType; - NonMaxSuppressionV5OptionsT() { - } -}; +template<> inline const tflite::SelectV2Options *Operator::builtin_options_as() const { + return builtin_options_as_SelectV2Options(); +} -struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV5OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV5OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::DensifyOptions *Operator::builtin_options_as() const { + return builtin_options_as_DensifyOptions(); +} -struct NonMaxSuppressionV5OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NonMaxSuppressionV5OptionsBuilder &operator=(const NonMaxSuppressionV5OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::SegmentSumOptions *Operator::builtin_options_as() const { + return builtin_options_as_SegmentSumOptions(); +} -inline flatbuffers::Offset CreateNonMaxSuppressionV5Options( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV5OptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::BatchMatMulOptions *Operator::builtin_options_as() const { + return builtin_options_as_BatchMatMulOptions(); } -flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::CumsumOptions *Operator::builtin_options_as() const { + return builtin_options_as_CumsumOptions(); +} -struct ScatterNdOptionsT : public flatbuffers::NativeTable { - typedef ScatterNdOptions TableType; - ScatterNdOptionsT() { - } -}; +template<> inline const tflite::CallOnceOptions *Operator::builtin_options_as() const { + return builtin_options_as_CallOnceOptions(); +} -struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ScatterNdOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ScatterNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::BroadcastToOptions *Operator::builtin_options_as() const { + return builtin_options_as_BroadcastToOptions(); +} -struct ScatterNdOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ScatterNdOptionsBuilder &operator=(const ScatterNdOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::Rfft2dOptions *Operator::builtin_options_as() const { + return builtin_options_as_Rfft2dOptions(); +} -inline flatbuffers::Offset CreateScatterNdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ScatterNdOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::Conv3DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Conv3DOptions(); } -flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::HashtableOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableOptions(); +} -struct SelectV2OptionsT : public flatbuffers::NativeTable { - typedef SelectV2Options TableType; - SelectV2OptionsT() { - } -}; +template<> inline const tflite::HashtableFindOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableFindOptions(); +} -struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SelectV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SelectV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::HashtableImportOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableImportOptions(); +} -struct SelectV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SelectV2OptionsBuilder &operator=(const SelectV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::HashtableSizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableSizeOptions(); +} -inline flatbuffers::Offset CreateSelectV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - SelectV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::VarHandleOptions *Operator::builtin_options_as() const { + return builtin_options_as_VarHandleOptions(); } -flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::ReadVariableOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReadVariableOptions(); +} -struct DensifyOptionsT : public flatbuffers::NativeTable { - typedef DensifyOptions TableType; - DensifyOptionsT() { - } -}; +template<> inline const tflite::AssignVariableOptions *Operator::builtin_options_as() const { + return builtin_options_as_AssignVariableOptions(); +} -struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DensifyOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - DensifyOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::RandomOptions *Operator::builtin_options_as() const { + return builtin_options_as_RandomOptions(); +} -struct DensifyOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DensifyOptionsBuilder &operator=(const DensifyOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::BucketizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_BucketizeOptions(); +} -inline flatbuffers::Offset CreateDensifyOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DensifyOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::GeluOptions *Operator::builtin_options_as() const { + return builtin_options_as_GeluOptions(); } -flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::DynamicUpdateSliceOptions *Operator::builtin_options_as() const { + return builtin_options_as_DynamicUpdateSliceOptions(); +} -struct SegmentSumOptionsT : public flatbuffers::NativeTable { - typedef SegmentSumOptions TableType; - SegmentSumOptionsT() { - } -}; +template<> inline const tflite::UnsortedSegmentProdOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnsortedSegmentProdOptions(); +} -struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SegmentSumOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::UnsortedSegmentMaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnsortedSegmentMaxOptions(); +} -struct SegmentSumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SegmentSumOptionsBuilder &operator=(const SegmentSumOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::UnsortedSegmentMinOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnsortedSegmentMinOptions(); +} -inline flatbuffers::Offset CreateSegmentSumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SegmentSumOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::UnsortedSegmentSumOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnsortedSegmentSumOptions(); } -flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::ATan2Options *Operator::builtin_options_as() const { + return builtin_options_as_ATan2Options(); +} -struct BatchMatMulOptionsT : public flatbuffers::NativeTable { - typedef BatchMatMulOptions TableType; - bool adj_x; - bool adj_y; - bool asymmetric_quantize_inputs; - BatchMatMulOptionsT() - : adj_x(false), - adj_y(false), - asymmetric_quantize_inputs(false) { - } -}; +template<> inline const tflite::SignOptions *Operator::builtin_options_as() const { + return builtin_options_as_SignOptions(); +} -struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchMatMulOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ADJ_X = 4, - VT_ADJ_Y = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - bool adj_x() const { - return GetField(VT_ADJ_X, 0) != 0; - } - bool adj_y() const { - return GetField(VT_ADJ_Y, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ADJ_X) && - VerifyField(verifier, VT_ADJ_Y) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - BatchMatMulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::BitcastOptions *Operator::builtin_options_as() const { + return builtin_options_as_BitcastOptions(); +} -struct BatchMatMulOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_adj_x(bool adj_x) { - fbb_.AddElement(BatchMatMulOptions::VT_ADJ_X, static_cast(adj_x), 0); - } - void add_adj_y(bool adj_y) { - fbb_.AddElement(BatchMatMulOptions::VT_ADJ_Y, static_cast(adj_y), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BatchMatMulOptionsBuilder &operator=(const BatchMatMulOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::BitwiseXorOptions *Operator::builtin_options_as() const { + return builtin_options_as_BitwiseXorOptions(); +} -inline flatbuffers::Offset CreateBatchMatMulOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool adj_x = false, - bool adj_y = false, - bool asymmetric_quantize_inputs = false) { - BatchMatMulOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_adj_y(adj_y); - builder_.add_adj_x(adj_x); - return builder_.Finish(); +template<> inline const tflite::RightShiftOptions *Operator::builtin_options_as() const { + return builtin_options_as_RightShiftOptions(); } -flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::StablehloConcatenateOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloConcatenateOptions(); +} -struct CumsumOptionsT : public flatbuffers::NativeTable { - typedef CumsumOptions TableType; - bool exclusive; - bool reverse; - CumsumOptionsT() - : exclusive(false), - reverse(false) { - } -}; +template<> inline const tflite::StablehloBroadcastInDimOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloBroadcastInDimOptions(); +} -struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CumsumOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_EXCLUSIVE = 4, - VT_REVERSE = 6 - }; - bool exclusive() const { - return GetField(VT_EXCLUSIVE, 0) != 0; - } - bool reverse() const { - return GetField(VT_REVERSE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_EXCLUSIVE) && - VerifyField(verifier, VT_REVERSE) && - verifier.EndTable(); - } - CumsumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::StablehloSliceOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloSliceOptions(); +} -struct CumsumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_exclusive(bool exclusive) { - fbb_.AddElement(CumsumOptions::VT_EXCLUSIVE, static_cast(exclusive), 0); - } - void add_reverse(bool reverse) { - fbb_.AddElement(CumsumOptions::VT_REVERSE, static_cast(reverse), 0); - } - explicit CumsumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CumsumOptionsBuilder &operator=(const CumsumOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::StablehloConvolutionOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloConvolutionOptions(); +} -inline flatbuffers::Offset CreateCumsumOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool exclusive = false, - bool reverse = false) { - CumsumOptionsBuilder builder_(_fbb); - builder_.add_reverse(reverse); - builder_.add_exclusive(exclusive); - return builder_.Finish(); +template<> inline const tflite::StablehloCustomCallOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloCustomCallOptions(); } -flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::StablehloReduceOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloReduceOptions(); +} -struct BroadcastToOptionsT : public flatbuffers::NativeTable { - typedef BroadcastToOptions TableType; - BroadcastToOptionsT() { - } -}; +template<> inline const tflite::StablehloScatterOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloScatterOptions(); +} -struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BroadcastToOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - BroadcastToOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::StablehloCompareOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloCompareOptions(); +} -struct BroadcastToOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit BroadcastToOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BroadcastToOptionsBuilder &operator=(const BroadcastToOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::StablehloDynamicSliceOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloDynamicSliceOptions(); +} -inline flatbuffers::Offset CreateBroadcastToOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - BroadcastToOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::StablehloPadOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloPadOptions(); } -flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::StablehloIotaOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloIotaOptions(); +} -struct Rfft2dOptionsT : public flatbuffers::NativeTable { - typedef Rfft2dOptions TableType; - Rfft2dOptionsT() {} -}; +template<> inline const tflite::StablehloDotGeneralOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloDotGeneralOptions(); +} -struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Rfft2dOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && verifier.EndTable(); - } - Rfft2dOptionsT *UnPack( - const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo( - Rfft2dOptionsT *_o, - const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack( - flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, - const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::StablehloReduceWindowOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloReduceWindowOptions(); +} -struct Rfft2dOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit Rfft2dOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Rfft2dOptionsBuilder &operator=(const Rfft2dOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::StablehloSortOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloSortOptions(); +} -inline flatbuffers::Offset CreateRfft2dOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - Rfft2dOptionsBuilder builder_(_fbb); - return builder_.Finish(); +template<> inline const tflite::StablehloWhileOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloWhileOptions(); } -flatbuffers::Offset CreateRfft2dOptions( - flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, - const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::StablehloGatherOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloGatherOptions(); +} -struct OperatorCodeT : public flatbuffers::NativeTable { - typedef OperatorCode TableType; - int8_t deprecated_builtin_code; - std::string custom_code; - int32_t version; - tflite::BuiltinOperator builtin_code; - OperatorCodeT() - : deprecated_builtin_code(0), - version(1), - builtin_code(tflite::BuiltinOperator_ADD) { - } -}; +template<> inline const tflite::StablehloTransposeOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloTransposeOptions(); +} -struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorCodeT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DEPRECATED_BUILTIN_CODE = 4, - VT_CUSTOM_CODE = 6, - VT_VERSION = 8, - VT_BUILTIN_CODE = 10 - }; - int8_t deprecated_builtin_code() const { - return GetField(VT_DEPRECATED_BUILTIN_CODE, 0); - } - const flatbuffers::String *custom_code() const { - return GetPointer(VT_CUSTOM_CODE); - } - int32_t version() const { - return GetField(VT_VERSION, 1); - } - tflite::BuiltinOperator builtin_code() const { - return static_cast(GetField(VT_BUILTIN_CODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_DEPRECATED_BUILTIN_CODE) && - VerifyOffset(verifier, VT_CUSTOM_CODE) && - verifier.VerifyString(custom_code()) && - VerifyField(verifier, VT_VERSION) && - VerifyField(verifier, VT_BUILTIN_CODE) && - verifier.EndTable(); - } - OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +template<> inline const tflite::DilateOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_DilateOptions(); +} -struct OperatorCodeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_deprecated_builtin_code(int8_t deprecated_builtin_code) { - fbb_.AddElement(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0); - } - void add_custom_code(flatbuffers::Offset custom_code) { - fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); - } - void add_version(int32_t version) { - fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); - } - void add_builtin_code(tflite::BuiltinOperator builtin_code) { - fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); - } - explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OperatorCodeBuilder &operator=(const OperatorCodeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +template<> inline const tflite::StablehloRngBitGeneratorOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloRngBitGeneratorOptions(); +} -inline flatbuffers::Offset CreateOperatorCode( - flatbuffers::FlatBufferBuilder &_fbb, - int8_t deprecated_builtin_code = 0, - flatbuffers::Offset custom_code = 0, - int32_t version = 1, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { - OperatorCodeBuilder builder_(_fbb); - builder_.add_builtin_code(builtin_code); - builder_.add_version(version); - builder_.add_custom_code(custom_code); - builder_.add_deprecated_builtin_code(deprecated_builtin_code); - return builder_.Finish(); +template<> inline const tflite::ReduceWindowOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_ReduceWindowOptions(); } -inline flatbuffers::Offset CreateOperatorCodeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int8_t deprecated_builtin_code = 0, - const char *custom_code = nullptr, - int32_t version = 1, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { - auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; - return tflite::CreateOperatorCode( - _fbb, - deprecated_builtin_code, - custom_code__, - version, - builtin_code); +template<> inline const tflite::StableHLOCompositeOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StableHLOCompositeOptions(); } -flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const tflite::StablehloShiftLeftOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloShiftLeftOptions(); +} -struct OperatorT : public flatbuffers::NativeTable { - typedef Operator TableType; - uint32_t opcode_index; - std::vector inputs; - std::vector outputs; - tflite::BuiltinOptionsUnion builtin_options; - std::vector custom_options; - tflite::CustomOptionsFormat custom_options_format; - std::vector mutating_variable_inputs; - std::vector intermediates; - OperatorT() - : opcode_index(0), - custom_options_format(tflite::CustomOptionsFormat_FLEXBUFFERS) { - } -}; +template<> inline const tflite::StablehloCaseOptions *Operator::builtin_options_2_as() const { + return builtin_options_2_as_StablehloCaseOptions(); +} -struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OPCODE_INDEX = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_BUILTIN_OPTIONS_TYPE = 10, - VT_BUILTIN_OPTIONS = 12, - VT_CUSTOM_OPTIONS = 14, - VT_CUSTOM_OPTIONS_FORMAT = 16, - VT_MUTATING_VARIABLE_INPUTS = 18, - VT_INTERMEDIATES = 20 - }; - uint32_t opcode_index() const { - return GetField(VT_OPCODE_INDEX, 0); - } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); - } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); - } - tflite::BuiltinOptions builtin_options_type() const { - return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); - } - const void *builtin_options() const { - return GetPointer(VT_BUILTIN_OPTIONS); - } - template const T *builtin_options_as() const; - const tflite::Conv2DOptions *builtin_options_as_Conv2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Conv2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DepthwiseConv2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ConcatEmbeddingsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LSHProjectionOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Pool2DOptions *builtin_options_as_Pool2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Pool2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SVDFOptions *builtin_options_as_SVDFOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SVDFOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RNNOptions *builtin_options_as_RNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FullyConnectedOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SoftmaxOptions *builtin_options_as_SoftmaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SoftmaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ConcatenationOptions *builtin_options_as_ConcatenationOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ConcatenationOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::AddOptions *builtin_options_as_AddOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AddOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::L2NormOptions *builtin_options_as_L2NormOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_L2NormOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LocalResponseNormalizationOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LSTMOptions *builtin_options_as_LSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ResizeBilinearOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CallOptions *builtin_options_as_CallOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CallOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReshapeOptions *builtin_options_as_ReshapeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReshapeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SkipGramOptions *builtin_options_as_SkipGramOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SkipGramOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SpaceToDepthOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MulOptions *builtin_options_as_MulOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MulOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PadOptions *builtin_options_as_PadOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PadOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GatherOptions *builtin_options_as_GatherOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GatherOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BatchToSpaceNDOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SpaceToBatchNDOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TransposeOptions *builtin_options_as_TransposeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TransposeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReducerOptions *builtin_options_as_ReducerOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReducerOptions ? static_cast(builtin_options()) : nullptr; +struct OperatorBuilder { + typedef Operator Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_opcode_index(uint32_t opcode_index) { + fbb_.AddElement(Operator::VT_OPCODE_INDEX, opcode_index, 0); } - const tflite::SubOptions *builtin_options_as_SubOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SubOptions ? static_cast(builtin_options()) : nullptr; + void add_inputs(::flatbuffers::Offset<::flatbuffers::Vector> inputs) { + fbb_.AddOffset(Operator::VT_INPUTS, inputs); } - const tflite::DivOptions *builtin_options_as_DivOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DivOptions ? static_cast(builtin_options()) : nullptr; + void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector> outputs) { + fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); } - const tflite::SqueezeOptions *builtin_options_as_SqueezeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SqueezeOptions ? static_cast(builtin_options()) : nullptr; + void add_builtin_options_type(tflite::BuiltinOptions builtin_options_type) { + fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast(builtin_options_type), 0); } - const tflite::SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SequenceRNNOptions ? static_cast(builtin_options()) : nullptr; + void add_builtin_options(::flatbuffers::Offset builtin_options) { + fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); } - const tflite::StridedSliceOptions *builtin_options_as_StridedSliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_StridedSliceOptions ? static_cast(builtin_options()) : nullptr; + void add_custom_options(::flatbuffers::Offset<::flatbuffers::Vector> custom_options) { + fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); } - const tflite::ExpOptions *builtin_options_as_ExpOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ExpOptions ? static_cast(builtin_options()) : nullptr; + void add_custom_options_format(tflite::CustomOptionsFormat custom_options_format) { + fbb_.AddElement(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast(custom_options_format), 0); } - const tflite::TopKV2Options *builtin_options_as_TopKV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_TopKV2Options ? static_cast(builtin_options()) : nullptr; + void add_mutating_variable_inputs(::flatbuffers::Offset<::flatbuffers::Vector> mutating_variable_inputs) { + fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); } - const tflite::SplitOptions *builtin_options_as_SplitOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SplitOptions ? static_cast(builtin_options()) : nullptr; + void add_intermediates(::flatbuffers::Offset<::flatbuffers::Vector> intermediates) { + fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); } - const tflite::LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogSoftmaxOptions ? static_cast(builtin_options()) : nullptr; + void add_large_custom_options_offset(uint64_t large_custom_options_offset) { + fbb_.AddElement(Operator::VT_LARGE_CUSTOM_OPTIONS_OFFSET, large_custom_options_offset, 0); } - const tflite::CastOptions *builtin_options_as_CastOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CastOptions ? static_cast(builtin_options()) : nullptr; + void add_large_custom_options_size(uint64_t large_custom_options_size) { + fbb_.AddElement(Operator::VT_LARGE_CUSTOM_OPTIONS_SIZE, large_custom_options_size, 0); } - const tflite::DequantizeOptions *builtin_options_as_DequantizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DequantizeOptions ? static_cast(builtin_options()) : nullptr; + void add_builtin_options_2_type(tflite::BuiltinOptions2 builtin_options_2_type) { + fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_2_TYPE, static_cast(builtin_options_2_type), 0); } - const tflite::MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MaximumMinimumOptions ? static_cast(builtin_options()) : nullptr; + void add_builtin_options_2(::flatbuffers::Offset builtin_options_2) { + fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS_2, builtin_options_2); } - const tflite::ArgMaxOptions *builtin_options_as_ArgMaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ArgMaxOptions ? static_cast(builtin_options()) : nullptr; + void add_debug_metadata_index(int32_t debug_metadata_index) { + fbb_.AddElement(Operator::VT_DEBUG_METADATA_INDEX, debug_metadata_index, -1); } - const tflite::LessOptions *builtin_options_as_LessOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LessOptions ? static_cast(builtin_options()) : nullptr; + explicit OperatorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::NegOptions *builtin_options_as_NegOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_NegOptions ? static_cast(builtin_options()) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::PadV2Options *builtin_options_as_PadV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_PadV2Options ? static_cast(builtin_options()) : nullptr; +}; + +inline ::flatbuffers::Offset CreateOperator( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t opcode_index = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> inputs = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> outputs = 0, + tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, + ::flatbuffers::Offset builtin_options = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> custom_options = 0, + tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, + ::flatbuffers::Offset<::flatbuffers::Vector> mutating_variable_inputs = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> intermediates = 0, + uint64_t large_custom_options_offset = 0, + uint64_t large_custom_options_size = 0, + tflite::BuiltinOptions2 builtin_options_2_type = tflite::BuiltinOptions2_NONE, + ::flatbuffers::Offset builtin_options_2 = 0, + int32_t debug_metadata_index = -1) { + OperatorBuilder builder_(_fbb); + builder_.add_large_custom_options_size(large_custom_options_size); + builder_.add_large_custom_options_offset(large_custom_options_offset); + builder_.add_debug_metadata_index(debug_metadata_index); + builder_.add_builtin_options_2(builtin_options_2); + builder_.add_intermediates(intermediates); + builder_.add_mutating_variable_inputs(mutating_variable_inputs); + builder_.add_custom_options(custom_options); + builder_.add_builtin_options(builtin_options); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_opcode_index(opcode_index); + builder_.add_builtin_options_2_type(builtin_options_2_type); + builder_.add_custom_options_format(custom_options_format); + builder_.add_builtin_options_type(builtin_options_type); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateOperatorDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t opcode_index = 0, + const std::vector *inputs = nullptr, + const std::vector *outputs = nullptr, + tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, + ::flatbuffers::Offset builtin_options = 0, + const std::vector *custom_options = nullptr, + tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, + const std::vector *mutating_variable_inputs = nullptr, + const std::vector *intermediates = nullptr, + uint64_t large_custom_options_offset = 0, + uint64_t large_custom_options_size = 0, + tflite::BuiltinOptions2 builtin_options_2_type = tflite::BuiltinOptions2_NONE, + ::flatbuffers::Offset builtin_options_2 = 0, + int32_t debug_metadata_index = -1) { + auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; + auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; + auto custom_options__ = custom_options ? _fbb.CreateVector(*custom_options) : 0; + auto mutating_variable_inputs__ = mutating_variable_inputs ? _fbb.CreateVector(*mutating_variable_inputs) : 0; + auto intermediates__ = intermediates ? _fbb.CreateVector(*intermediates) : 0; + return tflite::CreateOperator( + _fbb, + opcode_index, + inputs__, + outputs__, + builtin_options_type, + builtin_options, + custom_options__, + custom_options_format, + mutating_variable_inputs__, + intermediates__, + large_custom_options_offset, + large_custom_options_size, + builtin_options_2_type, + builtin_options_2, + debug_metadata_index); +} + +::flatbuffers::Offset CreateOperator(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SubGraphT : public ::flatbuffers::NativeTable { + typedef SubGraph TableType; + std::vector> tensors{}; + std::vector inputs{}; + std::vector outputs{}; + std::vector> operators{}; + std::string name{}; + int32_t debug_metadata_index = -1; + SubGraphT() = default; + SubGraphT(const SubGraphT &o); + SubGraphT(SubGraphT&&) FLATBUFFERS_NOEXCEPT = default; + SubGraphT &operator=(SubGraphT o) FLATBUFFERS_NOEXCEPT; +}; + +struct SubGraph FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SubGraphT NativeTableType; + typedef SubGraphBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TENSORS = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_OPERATORS = 10, + VT_NAME = 12, + VT_DEBUG_METADATA_INDEX = 14 + }; + const ::flatbuffers::Vector<::flatbuffers::Offset> *tensors() const { + return GetPointer> *>(VT_TENSORS); } - const tflite::GreaterOptions *builtin_options_as_GreaterOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GreaterOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector *inputs() const { + return GetPointer *>(VT_INPUTS); } - const tflite::GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GreaterEqualOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector *outputs() const { + return GetPointer *>(VT_OUTPUTS); } - const tflite::LessEqualOptions *builtin_options_as_LessEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LessEqualOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector<::flatbuffers::Offset> *operators() const { + return GetPointer> *>(VT_OPERATORS); } - const tflite::SelectOptions *builtin_options_as_SelectOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SelectOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); } - const tflite::SliceOptions *builtin_options_as_SliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SliceOptions ? static_cast(builtin_options()) : nullptr; + int32_t debug_metadata_index() const { + return GetField(VT_DEBUG_METADATA_INDEX, -1); } - const tflite::TransposeConvOptions *builtin_options_as_TransposeConvOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TransposeConvOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TENSORS) && + verifier.VerifyVector(tensors()) && + verifier.VerifyVectorOfTables(tensors()) && + VerifyOffset(verifier, VT_INPUTS) && + verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && + verifier.VerifyVector(outputs()) && + VerifyOffset(verifier, VT_OPERATORS) && + verifier.VerifyVector(operators()) && + verifier.VerifyVectorOfTables(operators()) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_DEBUG_METADATA_INDEX, 4) && + verifier.EndTable(); } - const tflite::SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SparseToDenseOptions ? static_cast(builtin_options()) : nullptr; + SubGraphT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SubGraphT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SubGraphBuilder { + typedef SubGraph Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_tensors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> tensors) { + fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); } - const tflite::TileOptions *builtin_options_as_TileOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TileOptions ? static_cast(builtin_options()) : nullptr; + void add_inputs(::flatbuffers::Offset<::flatbuffers::Vector> inputs) { + fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); } - const tflite::ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ExpandDimsOptions ? static_cast(builtin_options()) : nullptr; + void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector> outputs) { + fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); } - const tflite::EqualOptions *builtin_options_as_EqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_EqualOptions ? static_cast(builtin_options()) : nullptr; + void add_operators(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operators) { + fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); } - const tflite::NotEqualOptions *builtin_options_as_NotEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_NotEqualOptions ? static_cast(builtin_options()) : nullptr; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(SubGraph::VT_NAME, name); } - const tflite::ShapeOptions *builtin_options_as_ShapeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ShapeOptions ? static_cast(builtin_options()) : nullptr; + void add_debug_metadata_index(int32_t debug_metadata_index) { + fbb_.AddElement(SubGraph::VT_DEBUG_METADATA_INDEX, debug_metadata_index, -1); } - const tflite::PowOptions *builtin_options_as_PowOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PowOptions ? static_cast(builtin_options()) : nullptr; + explicit SubGraphBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::ArgMinOptions *builtin_options_as_ArgMinOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ArgMinOptions ? static_cast(builtin_options()) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::FakeQuantOptions *builtin_options_as_FakeQuantOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FakeQuantOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSubGraph( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> tensors = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> inputs = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> outputs = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operators = 0, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + int32_t debug_metadata_index = -1) { + SubGraphBuilder builder_(_fbb); + builder_.add_debug_metadata_index(debug_metadata_index); + builder_.add_name(name); + builder_.add_operators(operators); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_tensors(tensors); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateSubGraphDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<::flatbuffers::Offset> *tensors = nullptr, + const std::vector *inputs = nullptr, + const std::vector *outputs = nullptr, + const std::vector<::flatbuffers::Offset> *operators = nullptr, + const char *name = nullptr, + int32_t debug_metadata_index = -1) { + auto tensors__ = tensors ? _fbb.CreateVector<::flatbuffers::Offset>(*tensors) : 0; + auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; + auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; + auto operators__ = operators ? _fbb.CreateVector<::flatbuffers::Offset>(*operators) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + return tflite::CreateSubGraph( + _fbb, + tensors__, + inputs__, + outputs__, + operators__, + name__, + debug_metadata_index); +} + +::flatbuffers::Offset CreateSubGraph(::flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BufferT : public ::flatbuffers::NativeTable { + typedef Buffer TableType; + std::vector data{}; + uint64_t offset = 0; + uint64_t size = 0; +}; + +struct Buffer FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BufferT NativeTableType; + typedef BufferBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATA = 4, + VT_OFFSET = 6, + VT_SIZE = 8 + }; + const ::flatbuffers::Vector *data() const { + return GetPointer *>(VT_DATA); } - const tflite::PackOptions *builtin_options_as_PackOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PackOptions ? static_cast(builtin_options()) : nullptr; + uint64_t offset() const { + return GetField(VT_OFFSET, 0); } - const tflite::LogicalOrOptions *builtin_options_as_LogicalOrOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalOrOptions ? static_cast(builtin_options()) : nullptr; + uint64_t size() const { + return GetField(VT_SIZE, 0); } - const tflite::OneHotOptions *builtin_options_as_OneHotOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_OneHotOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && + VerifyField(verifier, VT_OFFSET, 8) && + VerifyField(verifier, VT_SIZE, 8) && + verifier.EndTable(); } - const tflite::LogicalAndOptions *builtin_options_as_LogicalAndOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalAndOptions ? static_cast(builtin_options()) : nullptr; + BufferT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BufferT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BufferBuilder { + typedef Buffer Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_data(::flatbuffers::Offset<::flatbuffers::Vector> data) { + fbb_.AddOffset(Buffer::VT_DATA, data); } - const tflite::LogicalNotOptions *builtin_options_as_LogicalNotOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalNotOptions ? static_cast(builtin_options()) : nullptr; + void add_offset(uint64_t offset) { + fbb_.AddElement(Buffer::VT_OFFSET, offset, 0); } - const tflite::UnpackOptions *builtin_options_as_UnpackOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnpackOptions ? static_cast(builtin_options()) : nullptr; + void add_size(uint64_t size) { + fbb_.AddElement(Buffer::VT_SIZE, size, 0); } - const tflite::FloorDivOptions *builtin_options_as_FloorDivOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FloorDivOptions ? static_cast(builtin_options()) : nullptr; + explicit BufferBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::SquareOptions *builtin_options_as_SquareOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SquareOptions ? static_cast(builtin_options()) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ZerosLikeOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline ::flatbuffers::Offset CreateBuffer( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> data = 0, + uint64_t offset = 0, + uint64_t size = 0) { + BufferBuilder builder_(_fbb); + builder_.add_size(size); + builder_.add_offset(offset); + builder_.add_data(data); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateBufferDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *data = nullptr, + uint64_t offset = 0, + uint64_t size = 0) { + if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16); } + auto data__ = data ? _fbb.CreateVector(*data) : 0; + return tflite::CreateBuffer( + _fbb, + data__, + offset, + size); +} + +::flatbuffers::Offset CreateBuffer(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MetadataT : public ::flatbuffers::NativeTable { + typedef Metadata TableType; + std::string name{}; + uint32_t buffer = 0; +}; + +struct Metadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef MetadataT NativeTableType; + typedef MetadataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_BUFFER = 6 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); } - const tflite::FillOptions *builtin_options_as_FillOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FillOptions ? static_cast(builtin_options()) : nullptr; + uint32_t buffer() const { + return GetField(VT_BUFFER, 0); } - const tflite::BidirectionalSequenceLSTMOptions *builtin_options_as_BidirectionalSequenceLSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_BUFFER, 4) && + verifier.EndTable(); } - const tflite::BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceRNNOptions ? static_cast(builtin_options()) : nullptr; + MetadataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MetadataBuilder { + typedef Metadata Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(Metadata::VT_NAME, name); } - const tflite::UnidirectionalSequenceLSTMOptions *builtin_options_as_UnidirectionalSequenceLSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; + void add_buffer(uint32_t buffer) { + fbb_.AddElement(Metadata::VT_BUFFER, buffer, 0); } - const tflite::FloorModOptions *builtin_options_as_FloorModOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FloorModOptions ? static_cast(builtin_options()) : nullptr; + explicit MetadataBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::RangeOptions *builtin_options_as_RangeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RangeOptions ? static_cast(builtin_options()) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ResizeNearestNeighborOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline ::flatbuffers::Offset CreateMetadata( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + uint32_t buffer = 0) { + MetadataBuilder builder_(_fbb); + builder_.add_buffer(buffer); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateMetadataDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + uint32_t buffer = 0) { + auto name__ = name ? _fbb.CreateString(name) : 0; + return tflite::CreateMetadata( + _fbb, + name__, + buffer); +} + +::flatbuffers::Offset CreateMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TensorMapT : public ::flatbuffers::NativeTable { + typedef TensorMap TableType; + std::string name{}; + uint32_t tensor_index = 0; +}; + +struct TensorMap FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TensorMapT NativeTableType; + typedef TensorMapBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_TENSOR_INDEX = 6 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); } - const tflite::LeakyReluOptions *builtin_options_as_LeakyReluOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LeakyReluOptions ? static_cast(builtin_options()) : nullptr; + uint32_t tensor_index() const { + return GetField(VT_TENSOR_INDEX, 0); } - const tflite::SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SquaredDifferenceOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_TENSOR_INDEX, 4) && + verifier.EndTable(); } - const tflite::MirrorPadOptions *builtin_options_as_MirrorPadOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MirrorPadOptions ? static_cast(builtin_options()) : nullptr; + TensorMapT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TensorMapT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TensorMapBuilder { + typedef TensorMap Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(TensorMap::VT_NAME, name); } - const tflite::AbsOptions *builtin_options_as_AbsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AbsOptions ? static_cast(builtin_options()) : nullptr; + void add_tensor_index(uint32_t tensor_index) { + fbb_.AddElement(TensorMap::VT_TENSOR_INDEX, tensor_index, 0); } - const tflite::SplitVOptions *builtin_options_as_SplitVOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SplitVOptions ? static_cast(builtin_options()) : nullptr; + explicit TensorMapBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::UniqueOptions *builtin_options_as_UniqueOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UniqueOptions ? static_cast(builtin_options()) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::ReverseV2Options *builtin_options_as_ReverseV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_ReverseV2Options ? static_cast(builtin_options()) : nullptr; +}; + +inline ::flatbuffers::Offset CreateTensorMap( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + uint32_t tensor_index = 0) { + TensorMapBuilder builder_(_fbb); + builder_.add_tensor_index(tensor_index); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTensorMapDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + uint32_t tensor_index = 0) { + auto name__ = name ? _fbb.CreateString(name) : 0; + return tflite::CreateTensorMap( + _fbb, + name__, + tensor_index); +} + +::flatbuffers::Offset CreateTensorMap(::flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SignatureDefT : public ::flatbuffers::NativeTable { + typedef SignatureDef TableType; + std::vector> inputs{}; + std::vector> outputs{}; + std::string signature_key{}; + uint32_t subgraph_index = 0; + SignatureDefT() = default; + SignatureDefT(const SignatureDefT &o); + SignatureDefT(SignatureDefT&&) FLATBUFFERS_NOEXCEPT = default; + SignatureDefT &operator=(SignatureDefT o) FLATBUFFERS_NOEXCEPT; +}; + +struct SignatureDef FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SignatureDefT NativeTableType; + typedef SignatureDefBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUTS = 4, + VT_OUTPUTS = 6, + VT_SIGNATURE_KEY = 8, + VT_SUBGRAPH_INDEX = 12 + }; + const ::flatbuffers::Vector<::flatbuffers::Offset> *inputs() const { + return GetPointer> *>(VT_INPUTS); } - const tflite::AddNOptions *builtin_options_as_AddNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AddNOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector<::flatbuffers::Offset> *outputs() const { + return GetPointer> *>(VT_OUTPUTS); } - const tflite::GatherNdOptions *builtin_options_as_GatherNdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GatherNdOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::String *signature_key() const { + return GetPointer(VT_SIGNATURE_KEY); } - const tflite::CosOptions *builtin_options_as_CosOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CosOptions ? static_cast(builtin_options()) : nullptr; + uint32_t subgraph_index() const { + return GetField(VT_SUBGRAPH_INDEX, 0); } - const tflite::WhereOptions *builtin_options_as_WhereOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_WhereOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUTS) && + verifier.VerifyVector(inputs()) && + verifier.VerifyVectorOfTables(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && + verifier.VerifyVector(outputs()) && + verifier.VerifyVectorOfTables(outputs()) && + VerifyOffset(verifier, VT_SIGNATURE_KEY) && + verifier.VerifyString(signature_key()) && + VerifyField(verifier, VT_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); } - const tflite::RankOptions *builtin_options_as_RankOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RankOptions ? static_cast(builtin_options()) : nullptr; + SignatureDefT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SignatureDefT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SignatureDefBuilder { + typedef SignatureDef Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_inputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> inputs) { + fbb_.AddOffset(SignatureDef::VT_INPUTS, inputs); } - const tflite::ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReverseSequenceOptions ? static_cast(builtin_options()) : nullptr; + void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> outputs) { + fbb_.AddOffset(SignatureDef::VT_OUTPUTS, outputs); } - const tflite::MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MatrixDiagOptions ? static_cast(builtin_options()) : nullptr; + void add_signature_key(::flatbuffers::Offset<::flatbuffers::String> signature_key) { + fbb_.AddOffset(SignatureDef::VT_SIGNATURE_KEY, signature_key); } - const tflite::QuantizeOptions *builtin_options_as_QuantizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_QuantizeOptions ? static_cast(builtin_options()) : nullptr; + void add_subgraph_index(uint32_t subgraph_index) { + fbb_.AddElement(SignatureDef::VT_SUBGRAPH_INDEX, subgraph_index, 0); } - const tflite::MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MatrixSetDiagOptions ? static_cast(builtin_options()) : nullptr; + explicit SignatureDefBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const tflite::HardSwishOptions *builtin_options_as_HardSwishOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HardSwishOptions ? static_cast(builtin_options()) : nullptr; + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - const tflite::IfOptions *builtin_options_as_IfOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_IfOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline ::flatbuffers::Offset CreateSignatureDef( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> inputs = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> outputs = 0, + ::flatbuffers::Offset<::flatbuffers::String> signature_key = 0, + uint32_t subgraph_index = 0) { + SignatureDefBuilder builder_(_fbb); + builder_.add_subgraph_index(subgraph_index); + builder_.add_signature_key(signature_key); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateSignatureDefDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<::flatbuffers::Offset> *inputs = nullptr, + const std::vector<::flatbuffers::Offset> *outputs = nullptr, + const char *signature_key = nullptr, + uint32_t subgraph_index = 0) { + auto inputs__ = inputs ? _fbb.CreateVector<::flatbuffers::Offset>(*inputs) : 0; + auto outputs__ = outputs ? _fbb.CreateVector<::flatbuffers::Offset>(*outputs) : 0; + auto signature_key__ = signature_key ? _fbb.CreateString(signature_key) : 0; + return tflite::CreateSignatureDef( + _fbb, + inputs__, + outputs__, + signature_key__, + subgraph_index); +} + +::flatbuffers::Offset CreateSignatureDef(::flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ModelT : public ::flatbuffers::NativeTable { + typedef Model TableType; + uint32_t version = 0; + std::vector> operator_codes{}; + std::vector> subgraphs{}; + std::string description{}; + std::vector> buffers{}; + std::vector metadata_buffer{}; + std::vector> metadata{}; + std::vector> signature_defs{}; + ModelT() = default; + ModelT(const ModelT &o); + ModelT(ModelT&&) FLATBUFFERS_NOEXCEPT = default; + ModelT &operator=(ModelT o) FLATBUFFERS_NOEXCEPT; +}; + +struct Model FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ModelT NativeTableType; + typedef ModelBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VERSION = 4, + VT_OPERATOR_CODES = 6, + VT_SUBGRAPHS = 8, + VT_DESCRIPTION = 10, + VT_BUFFERS = 12, + VT_METADATA_BUFFER = 14, + VT_METADATA = 16, + VT_SIGNATURE_DEFS = 18 + }; + uint32_t version() const { + return GetField(VT_VERSION, 0); } - const tflite::WhileOptions *builtin_options_as_WhileOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_WhileOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector<::flatbuffers::Offset> *operator_codes() const { + return GetPointer> *>(VT_OPERATOR_CODES); } - const tflite::DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DepthToSpaceOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector<::flatbuffers::Offset> *subgraphs() const { + return GetPointer> *>(VT_SUBGRAPHS); } - const tflite::NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const { - return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV4Options ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::String *description() const { + return GetPointer(VT_DESCRIPTION); } - const tflite::NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const { - return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV5Options ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector<::flatbuffers::Offset> *buffers() const { + return GetPointer> *>(VT_BUFFERS); } - const tflite::ScatterNdOptions *builtin_options_as_ScatterNdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ScatterNdOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector *metadata_buffer() const { + return GetPointer *>(VT_METADATA_BUFFER); } - const tflite::SelectV2Options *builtin_options_as_SelectV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_SelectV2Options ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector<::flatbuffers::Offset> *metadata() const { + return GetPointer> *>(VT_METADATA); } - const tflite::DensifyOptions *builtin_options_as_DensifyOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DensifyOptions ? static_cast(builtin_options()) : nullptr; + const ::flatbuffers::Vector<::flatbuffers::Offset> *signature_defs() const { + return GetPointer> *>(VT_SIGNATURE_DEFS); } - const tflite::SegmentSumOptions *builtin_options_as_SegmentSumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SegmentSumOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_VERSION, 4) && + VerifyOffset(verifier, VT_OPERATOR_CODES) && + verifier.VerifyVector(operator_codes()) && + verifier.VerifyVectorOfTables(operator_codes()) && + VerifyOffset(verifier, VT_SUBGRAPHS) && + verifier.VerifyVector(subgraphs()) && + verifier.VerifyVectorOfTables(subgraphs()) && + VerifyOffset(verifier, VT_DESCRIPTION) && + verifier.VerifyString(description()) && + VerifyOffset(verifier, VT_BUFFERS) && + verifier.VerifyVector(buffers()) && + verifier.VerifyVectorOfTables(buffers()) && + VerifyOffset(verifier, VT_METADATA_BUFFER) && + verifier.VerifyVector(metadata_buffer()) && + VerifyOffset(verifier, VT_METADATA) && + verifier.VerifyVector(metadata()) && + verifier.VerifyVectorOfTables(metadata()) && + VerifyOffset(verifier, VT_SIGNATURE_DEFS) && + verifier.VerifyVector(signature_defs()) && + verifier.VerifyVectorOfTables(signature_defs()) && + verifier.EndTable(); } - const tflite::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BatchMatMulOptions ? static_cast(builtin_options()) : nullptr; + ModelT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ModelT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ModelBuilder { + typedef Model Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_version(uint32_t version) { + fbb_.AddElement(Model::VT_VERSION, version, 0); } - const tflite::CumsumOptions *builtin_options_as_CumsumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CumsumOptions ? static_cast(builtin_options()) : nullptr; + void add_operator_codes(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operator_codes) { + fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); } - const tflite::CallOnceOptions *builtin_options_as_CallOnceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CallOnceOptions ? static_cast(builtin_options()) : nullptr; + void add_subgraphs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> subgraphs) { + fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); } - const tflite::BroadcastToOptions *builtin_options_as_BroadcastToOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BroadcastToOptions ? static_cast(builtin_options()) : nullptr; + void add_description(::flatbuffers::Offset<::flatbuffers::String> description) { + fbb_.AddOffset(Model::VT_DESCRIPTION, description); } - const tflite::Rfft2dOptions *builtin_options_as_Rfft2dOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Rfft2dOptions - ? static_cast(builtin_options()) - : nullptr; + void add_buffers(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> buffers) { + fbb_.AddOffset(Model::VT_BUFFERS, buffers); } - const flatbuffers::Vector *custom_options() const { - return GetPointer *>(VT_CUSTOM_OPTIONS); + void add_metadata_buffer(::flatbuffers::Offset<::flatbuffers::Vector> metadata_buffer) { + fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); } - tflite::CustomOptionsFormat custom_options_format() const { - return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); + void add_metadata(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> metadata) { + fbb_.AddOffset(Model::VT_METADATA, metadata); } - const flatbuffers::Vector *mutating_variable_inputs() const { - return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); + void add_signature_defs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> signature_defs) { + fbb_.AddOffset(Model::VT_SIGNATURE_DEFS, signature_defs); } - const flatbuffers::Vector *intermediates() const { - return GetPointer *>(VT_INTERMEDIATES); + explicit ModelBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OPCODE_INDEX) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyField(verifier, VT_BUILTIN_OPTIONS_TYPE) && - VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && - VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && - VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && - verifier.VerifyVector(custom_options()) && - VerifyField(verifier, VT_CUSTOM_OPTIONS_FORMAT) && - VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && - verifier.VerifyVector(mutating_variable_inputs()) && - VerifyOffset(verifier, VT_INTERMEDIATES) && - verifier.VerifyVector(intermediates()) && - verifier.EndTable(); + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; } - OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -template<> inline const tflite::Conv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Conv2DOptions(); +inline ::flatbuffers::Offset CreateModel( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t version = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operator_codes = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> subgraphs = 0, + ::flatbuffers::Offset<::flatbuffers::String> description = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> buffers = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> metadata_buffer = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> metadata = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> signature_defs = 0) { + ModelBuilder builder_(_fbb); + builder_.add_signature_defs(signature_defs); + builder_.add_metadata(metadata); + builder_.add_metadata_buffer(metadata_buffer); + builder_.add_buffers(buffers); + builder_.add_description(description); + builder_.add_subgraphs(subgraphs); + builder_.add_operator_codes(operator_codes); + builder_.add_version(version); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateModelDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t version = 0, + const std::vector<::flatbuffers::Offset> *operator_codes = nullptr, + const std::vector<::flatbuffers::Offset> *subgraphs = nullptr, + const char *description = nullptr, + const std::vector<::flatbuffers::Offset> *buffers = nullptr, + const std::vector *metadata_buffer = nullptr, + const std::vector<::flatbuffers::Offset> *metadata = nullptr, + const std::vector<::flatbuffers::Offset> *signature_defs = nullptr) { + auto operator_codes__ = operator_codes ? _fbb.CreateVector<::flatbuffers::Offset>(*operator_codes) : 0; + auto subgraphs__ = subgraphs ? _fbb.CreateVector<::flatbuffers::Offset>(*subgraphs) : 0; + auto description__ = description ? _fbb.CreateString(description) : 0; + auto buffers__ = buffers ? _fbb.CreateVector<::flatbuffers::Offset>(*buffers) : 0; + auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector(*metadata_buffer) : 0; + auto metadata__ = metadata ? _fbb.CreateVector<::flatbuffers::Offset>(*metadata) : 0; + auto signature_defs__ = signature_defs ? _fbb.CreateVector<::flatbuffers::Offset>(*signature_defs) : 0; + return tflite::CreateModel( + _fbb, + version, + operator_codes__, + subgraphs__, + description__, + buffers__, + metadata_buffer__, + metadata__, + signature_defs__); +} + +::flatbuffers::Offset CreateModel(::flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline CustomQuantizationT *CustomQuantization::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CustomQuantizationT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::DepthwiseConv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthwiseConv2DOptions(); +inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = custom(); if (_e) { _o->custom.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom.begin()); } } } -template<> inline const tflite::ConcatEmbeddingsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatEmbeddingsOptions(); +inline ::flatbuffers::Offset CustomQuantization::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateCustomQuantization(_fbb, _o, _rehasher); } -template<> inline const tflite::LSHProjectionOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSHProjectionOptions(); +inline ::flatbuffers::Offset CreateCustomQuantization(::flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + _fbb.ForceVectorAlignment(_o->custom.size(), sizeof(uint8_t), 16); + auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0; + return tflite::CreateCustomQuantization( + _fbb, + _custom); } -template<> inline const tflite::Pool2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Pool2DOptions(); +inline BlockwiseQuantizationT *BlockwiseQuantization::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BlockwiseQuantizationT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::SVDFOptions *Operator::builtin_options_as() const { - return builtin_options_as_SVDFOptions(); +inline void BlockwiseQuantization::UnPackTo(BlockwiseQuantizationT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = scales(); _o->scales = _e; } + { auto _e = zero_points(); _o->zero_points = _e; } + { auto _e = block_size(); _o->block_size = _e; } } -template<> inline const tflite::RNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_RNNOptions(); +inline ::flatbuffers::Offset BlockwiseQuantization::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BlockwiseQuantizationT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBlockwiseQuantization(_fbb, _o, _rehasher); } -template<> inline const tflite::FullyConnectedOptions *Operator::builtin_options_as() const { - return builtin_options_as_FullyConnectedOptions(); +inline ::flatbuffers::Offset CreateBlockwiseQuantization(::flatbuffers::FlatBufferBuilder &_fbb, const BlockwiseQuantizationT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BlockwiseQuantizationT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _scales = _o->scales; + auto _zero_points = _o->zero_points; + auto _block_size = _o->block_size; + return tflite::CreateBlockwiseQuantization( + _fbb, + _scales, + _zero_points, + _block_size); } -template<> inline const tflite::SoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_SoftmaxOptions(); +inline QuantizationParametersT *QuantizationParameters::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new QuantizationParametersT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::ConcatenationOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatenationOptions(); +inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } else { _o->min.resize(0); } } + { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } else { _o->max.resize(0); } } + { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } else { _o->scale.resize(0); } } + { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } else { _o->zero_point.resize(0); } } + { auto _e = details_type(); _o->details.type = _e; } + { auto _e = details(); if (_e) _o->details.value = tflite::QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver); } + { auto _e = quantized_dimension(); _o->quantized_dimension = _e; } } -template<> inline const tflite::AddOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddOptions(); +inline ::flatbuffers::Offset QuantizationParameters::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizationParameters(_fbb, _o, _rehasher); } -template<> inline const tflite::L2NormOptions *Operator::builtin_options_as() const { - return builtin_options_as_L2NormOptions(); +inline ::flatbuffers::Offset CreateQuantizationParameters(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0; + auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0; + auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; + auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0; + auto _details_type = _o->details.type; + auto _details = _o->details.Pack(_fbb); + auto _quantized_dimension = _o->quantized_dimension; + return tflite::CreateQuantizationParameters( + _fbb, + _min, + _max, + _scale, + _zero_point, + _details_type, + _details, + _quantized_dimension); } -template<> inline const tflite::LocalResponseNormalizationOptions *Operator::builtin_options_as() const { - return builtin_options_as_LocalResponseNormalizationOptions(); +inline Int32VectorT *Int32Vector::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Int32VectorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::LSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSTMOptions(); +inline void Int32Vector::UnPackTo(Int32VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } else { _o->values.resize(0); } } } -template<> inline const tflite::ResizeBilinearOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeBilinearOptions(); +inline ::flatbuffers::Offset Int32Vector::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateInt32Vector(_fbb, _o, _rehasher); } -template<> inline const tflite::CallOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOptions(); +inline ::flatbuffers::Offset CreateInt32Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Int32VectorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; + return tflite::CreateInt32Vector( + _fbb, + _values); } -template<> inline const tflite::ReshapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReshapeOptions(); +inline Uint16VectorT *Uint16Vector::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Uint16VectorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::SkipGramOptions *Operator::builtin_options_as() const { - return builtin_options_as_SkipGramOptions(); +inline void Uint16Vector::UnPackTo(Uint16VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } else { _o->values.resize(0); } } } -template<> inline const tflite::SpaceToDepthOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToDepthOptions(); +inline ::flatbuffers::Offset Uint16Vector::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUint16Vector(_fbb, _o, _rehasher); } -template<> inline const tflite::EmbeddingLookupSparseOptions *Operator::builtin_options_as() const { - return builtin_options_as_EmbeddingLookupSparseOptions(); +inline ::flatbuffers::Offset CreateUint16Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Uint16VectorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint16_t), 4); + auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; + return tflite::CreateUint16Vector( + _fbb, + _values); } -template<> inline const tflite::MulOptions *Operator::builtin_options_as() const { - return builtin_options_as_MulOptions(); +inline Uint8VectorT *Uint8Vector::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Uint8VectorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::PadOptions *Operator::builtin_options_as() const { - return builtin_options_as_PadOptions(); +inline void Uint8Vector::UnPackTo(Uint8VectorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = values(); if (_e) { _o->values.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->values.begin()); } } } -template<> inline const tflite::GatherOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherOptions(); +inline ::flatbuffers::Offset Uint8Vector::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUint8Vector(_fbb, _o, _rehasher); } -template<> inline const tflite::BatchToSpaceNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchToSpaceNDOptions(); +inline ::flatbuffers::Offset CreateUint8Vector(::flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Uint8VectorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint8_t), 4); + auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; + return tflite::CreateUint8Vector( + _fbb, + _values); +} + +inline DimensionMetadataT *DimensionMetadata::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DimensionMetadataT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = format(); _o->format = _e; } + { auto _e = dense_size(); _o->dense_size = _e; } + { auto _e = array_segments_type(); _o->array_segments.type = _e; } + { auto _e = array_segments(); if (_e) _o->array_segments.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_segments_type(), _resolver); } + { auto _e = array_indices_type(); _o->array_indices.type = _e; } + { auto _e = array_indices(); if (_e) _o->array_indices.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_indices_type(), _resolver); } +} + +inline ::flatbuffers::Offset DimensionMetadata::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateDimensionMetadata(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateDimensionMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DimensionMetadataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _format = _o->format; + auto _dense_size = _o->dense_size; + auto _array_segments_type = _o->array_segments.type; + auto _array_segments = _o->array_segments.Pack(_fbb); + auto _array_indices_type = _o->array_indices.type; + auto _array_indices = _o->array_indices.Pack(_fbb); + return tflite::CreateDimensionMetadata( + _fbb, + _format, + _dense_size, + _array_segments_type, + _array_segments, + _array_indices_type, + _array_indices); +} + +inline SparsityParametersT::SparsityParametersT(const SparsityParametersT &o) + : traversal_order(o.traversal_order), + block_map(o.block_map) { + dim_metadata.reserve(o.dim_metadata.size()); + for (const auto &dim_metadata_ : o.dim_metadata) { dim_metadata.emplace_back((dim_metadata_) ? new tflite::DimensionMetadataT(*dim_metadata_) : nullptr); } +} + +inline SparsityParametersT &SparsityParametersT::operator=(SparsityParametersT o) FLATBUFFERS_NOEXCEPT { + std::swap(traversal_order, o.traversal_order); + std::swap(block_map, o.block_map); + std::swap(dim_metadata, o.dim_metadata); + return *this; +} + +inline SparsityParametersT *SparsityParameters::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SparsityParametersT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SparsityParameters::UnPackTo(SparsityParametersT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = traversal_order(); if (_e) { _o->traversal_order.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->traversal_order[_i] = _e->Get(_i); } } else { _o->traversal_order.resize(0); } } + { auto _e = block_map(); if (_e) { _o->block_map.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->block_map[_i] = _e->Get(_i); } } else { _o->block_map.resize(0); } } + { auto _e = dim_metadata(); if (_e) { _o->dim_metadata.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->dim_metadata[_i]) { _e->Get(_i)->UnPackTo(_o->dim_metadata[_i].get(), _resolver); } else { _o->dim_metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->dim_metadata.resize(0); } } +} + +inline ::flatbuffers::Offset SparsityParameters::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSparsityParameters(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateSparsityParameters(::flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SparsityParametersT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _traversal_order = _o->traversal_order.size() ? _fbb.CreateVector(_o->traversal_order) : 0; + auto _block_map = _o->block_map.size() ? _fbb.CreateVector(_o->block_map) : 0; + auto _dim_metadata = _o->dim_metadata.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->dim_metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateDimensionMetadata(*__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher); }, &_va ) : 0; + return tflite::CreateSparsityParameters( + _fbb, + _traversal_order, + _block_map, + _dim_metadata); +} + +inline VariantSubTypeT *VariantSubType::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new VariantSubTypeT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void VariantSubType::UnPackTo(VariantSubTypeT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } else { _o->shape.resize(0); } } + { auto _e = type(); _o->type = _e; } + { auto _e = has_rank(); _o->has_rank = _e; } +} + +inline ::flatbuffers::Offset VariantSubType::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateVariantSubType(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateVariantSubType(::flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const VariantSubTypeT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; + auto _type = _o->type; + auto _has_rank = _o->has_rank; + return tflite::CreateVariantSubType( + _fbb, + _shape, + _type, + _has_rank); +} + +inline TensorT::TensorT(const TensorT &o) + : shape(o.shape), + type(o.type), + buffer(o.buffer), + name(o.name), + quantization((o.quantization) ? new tflite::QuantizationParametersT(*o.quantization) : nullptr), + is_variable(o.is_variable), + sparsity((o.sparsity) ? new tflite::SparsityParametersT(*o.sparsity) : nullptr), + shape_signature(o.shape_signature), + has_rank(o.has_rank) { + variant_tensors.reserve(o.variant_tensors.size()); + for (const auto &variant_tensors_ : o.variant_tensors) { variant_tensors.emplace_back((variant_tensors_) ? new tflite::VariantSubTypeT(*variant_tensors_) : nullptr); } +} + +inline TensorT &TensorT::operator=(TensorT o) FLATBUFFERS_NOEXCEPT { + std::swap(shape, o.shape); + std::swap(type, o.type); + std::swap(buffer, o.buffer); + std::swap(name, o.name); + std::swap(quantization, o.quantization); + std::swap(is_variable, o.is_variable); + std::swap(sparsity, o.sparsity); + std::swap(shape_signature, o.shape_signature); + std::swap(has_rank, o.has_rank); + std::swap(variant_tensors, o.variant_tensors); + return *this; +} + +inline TensorT *Tensor::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TensorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Tensor::UnPackTo(TensorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } else { _o->shape.resize(0); } } + { auto _e = type(); _o->type = _e; } + { auto _e = buffer(); _o->buffer = _e; } + { auto _e = name(); if (_e) _o->name = _e->str(); } + { auto _e = quantization(); if (_e) { if(_o->quantization) { _e->UnPackTo(_o->quantization.get(), _resolver); } else { _o->quantization = std::unique_ptr(_e->UnPack(_resolver)); } } else if (_o->quantization) { _o->quantization.reset(); } } + { auto _e = is_variable(); _o->is_variable = _e; } + { auto _e = sparsity(); if (_e) { if(_o->sparsity) { _e->UnPackTo(_o->sparsity.get(), _resolver); } else { _o->sparsity = std::unique_ptr(_e->UnPack(_resolver)); } } else if (_o->sparsity) { _o->sparsity.reset(); } } + { auto _e = shape_signature(); if (_e) { _o->shape_signature.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape_signature[_i] = _e->Get(_i); } } else { _o->shape_signature.resize(0); } } + { auto _e = has_rank(); _o->has_rank = _e; } + { auto _e = variant_tensors(); if (_e) { _o->variant_tensors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->variant_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->variant_tensors[_i].get(), _resolver); } else { _o->variant_tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->variant_tensors.resize(0); } } +} + +inline ::flatbuffers::Offset Tensor::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateTensor(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateTensor(::flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; + auto _type = _o->type; + auto _buffer = _o->buffer; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0; + auto _is_variable = _o->is_variable; + auto _sparsity = _o->sparsity ? CreateSparsityParameters(_fbb, _o->sparsity.get(), _rehasher) : 0; + auto _shape_signature = _o->shape_signature.size() ? _fbb.CreateVector(_o->shape_signature) : 0; + auto _has_rank = _o->has_rank; + auto _variant_tensors = _o->variant_tensors.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->variant_tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateVariantSubType(*__va->__fbb, __va->__o->variant_tensors[i].get(), __va->__rehasher); }, &_va ) : 0; + return tflite::CreateTensor( + _fbb, + _shape, + _type, + _buffer, + _name, + _quantization, + _is_variable, + _sparsity, + _shape_signature, + _has_rank, + _variant_tensors); +} + +inline StablehloGatherOptionsT *StablehloGatherOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloGatherOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void StablehloGatherOptions::UnPackTo(StablehloGatherOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = offset_dims(); if (_e) { _o->offset_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->offset_dims[_i] = _e->Get(_i); } } else { _o->offset_dims.resize(0); } } + { auto _e = collapsed_slice_dims(); if (_e) { _o->collapsed_slice_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->collapsed_slice_dims[_i] = _e->Get(_i); } } else { _o->collapsed_slice_dims.resize(0); } } + { auto _e = start_index_map(); if (_e) { _o->start_index_map.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->start_index_map[_i] = _e->Get(_i); } } else { _o->start_index_map.resize(0); } } + { auto _e = index_vector_dim(); _o->index_vector_dim = _e; } + { auto _e = slice_sizes(); if (_e) { _o->slice_sizes.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slice_sizes[_i] = _e->Get(_i); } } else { _o->slice_sizes.resize(0); } } + { auto _e = indices_are_sorted(); _o->indices_are_sorted = _e; } +} + +inline ::flatbuffers::Offset StablehloGatherOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloGatherOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloGatherOptions(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateStablehloGatherOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloGatherOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloGatherOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _offset_dims = _o->offset_dims.size() ? _fbb.CreateVector(_o->offset_dims) : 0; + auto _collapsed_slice_dims = _o->collapsed_slice_dims.size() ? _fbb.CreateVector(_o->collapsed_slice_dims) : 0; + auto _start_index_map = _o->start_index_map.size() ? _fbb.CreateVector(_o->start_index_map) : 0; + auto _index_vector_dim = _o->index_vector_dim; + auto _slice_sizes = _o->slice_sizes.size() ? _fbb.CreateVector(_o->slice_sizes) : 0; + auto _indices_are_sorted = _o->indices_are_sorted; + return tflite::CreateStablehloGatherOptions( + _fbb, + _offset_dims, + _collapsed_slice_dims, + _start_index_map, + _index_vector_dim, + _slice_sizes, + _indices_are_sorted); +} + +inline StablehloTransposeOptionsT *StablehloTransposeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloTransposeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void StablehloTransposeOptions::UnPackTo(StablehloTransposeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = permutation(); if (_e) { _o->permutation.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->permutation[_i] = _e->Get(_i); } } else { _o->permutation.resize(0); } } +} + +inline ::flatbuffers::Offset StablehloTransposeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloTransposeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloTransposeOptions(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateStablehloTransposeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloTransposeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloTransposeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _permutation = _o->permutation.size() ? _fbb.CreateVector(_o->permutation) : 0; + return tflite::CreateStablehloTransposeOptions( + _fbb, + _permutation); +} + +inline StablehloDotGeneralOptionsT *StablehloDotGeneralOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloDotGeneralOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::SpaceToBatchNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToBatchNDOptions(); +inline void StablehloDotGeneralOptions::UnPackTo(StablehloDotGeneralOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = lhs_batching_dimensions(); if (_e) { _o->lhs_batching_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->lhs_batching_dimensions[_i] = _e->Get(_i); } } else { _o->lhs_batching_dimensions.resize(0); } } + { auto _e = rhs_batching_dimensions(); if (_e) { _o->rhs_batching_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->rhs_batching_dimensions[_i] = _e->Get(_i); } } else { _o->rhs_batching_dimensions.resize(0); } } + { auto _e = lhs_contracting_dimensions(); if (_e) { _o->lhs_contracting_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->lhs_contracting_dimensions[_i] = _e->Get(_i); } } else { _o->lhs_contracting_dimensions.resize(0); } } + { auto _e = rhs_contracting_dimensions(); if (_e) { _o->rhs_contracting_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->rhs_contracting_dimensions[_i] = _e->Get(_i); } } else { _o->rhs_contracting_dimensions.resize(0); } } + { auto _e = precision_config(); if (_e) { _o->precision_config.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->precision_config[_i] = static_cast(_e->Get(_i)); } } else { _o->precision_config.resize(0); } } } -template<> inline const tflite::TransposeOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeOptions(); +inline ::flatbuffers::Offset StablehloDotGeneralOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDotGeneralOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloDotGeneralOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::ReducerOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReducerOptions(); +inline ::flatbuffers::Offset CreateStablehloDotGeneralOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDotGeneralOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloDotGeneralOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _lhs_batching_dimensions = _o->lhs_batching_dimensions.size() ? _fbb.CreateVector(_o->lhs_batching_dimensions) : 0; + auto _rhs_batching_dimensions = _o->rhs_batching_dimensions.size() ? _fbb.CreateVector(_o->rhs_batching_dimensions) : 0; + auto _lhs_contracting_dimensions = _o->lhs_contracting_dimensions.size() ? _fbb.CreateVector(_o->lhs_contracting_dimensions) : 0; + auto _rhs_contracting_dimensions = _o->rhs_contracting_dimensions.size() ? _fbb.CreateVector(_o->rhs_contracting_dimensions) : 0; + auto _precision_config = _o->precision_config.size() ? _fbb.CreateVectorScalarCast(::flatbuffers::data(_o->precision_config), _o->precision_config.size()) : 0; + return tflite::CreateStablehloDotGeneralOptions( + _fbb, + _lhs_batching_dimensions, + _rhs_batching_dimensions, + _lhs_contracting_dimensions, + _rhs_contracting_dimensions, + _precision_config); } -template<> inline const tflite::SubOptions *Operator::builtin_options_as() const { - return builtin_options_as_SubOptions(); +inline StablehloReduceWindowOptionsT *StablehloReduceWindowOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloReduceWindowOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::DivOptions *Operator::builtin_options_as() const { - return builtin_options_as_DivOptions(); +inline void StablehloReduceWindowOptions::UnPackTo(StablehloReduceWindowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = window_dimensions(); if (_e) { _o->window_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_dimensions[_i] = _e->Get(_i); } } else { _o->window_dimensions.resize(0); } } + { auto _e = window_strides(); if (_e) { _o->window_strides.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_strides[_i] = _e->Get(_i); } } else { _o->window_strides.resize(0); } } + { auto _e = base_dilations(); if (_e) { _o->base_dilations.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->base_dilations[_i] = _e->Get(_i); } } else { _o->base_dilations.resize(0); } } + { auto _e = window_dilations(); if (_e) { _o->window_dilations.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_dilations[_i] = _e->Get(_i); } } else { _o->window_dilations.resize(0); } } + { auto _e = padding(); if (_e) { _o->padding.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->padding[_i] = _e->Get(_i); } } else { _o->padding.resize(0); } } + { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } } -template<> inline const tflite::SqueezeOptions *Operator::builtin_options_as() const { - return builtin_options_as_SqueezeOptions(); +inline ::flatbuffers::Offset StablehloReduceWindowOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceWindowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloReduceWindowOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::SequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_SequenceRNNOptions(); +inline ::flatbuffers::Offset CreateStablehloReduceWindowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceWindowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloReduceWindowOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _window_dimensions = _o->window_dimensions.size() ? _fbb.CreateVector(_o->window_dimensions) : 0; + auto _window_strides = _o->window_strides.size() ? _fbb.CreateVector(_o->window_strides) : 0; + auto _base_dilations = _o->base_dilations.size() ? _fbb.CreateVector(_o->base_dilations) : 0; + auto _window_dilations = _o->window_dilations.size() ? _fbb.CreateVector(_o->window_dilations) : 0; + auto _padding = _o->padding.size() ? _fbb.CreateVector(_o->padding) : 0; + auto _body_subgraph_index = _o->body_subgraph_index; + return tflite::CreateStablehloReduceWindowOptions( + _fbb, + _window_dimensions, + _window_strides, + _base_dilations, + _window_dilations, + _padding, + _body_subgraph_index); } -template<> inline const tflite::StridedSliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_StridedSliceOptions(); +inline StablehloWhileOptionsT *StablehloWhileOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloWhileOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::ExpOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpOptions(); +inline void StablehloWhileOptions::UnPackTo(StablehloWhileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; } + { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } } -template<> inline const tflite::TopKV2Options *Operator::builtin_options_as() const { - return builtin_options_as_TopKV2Options(); +inline ::flatbuffers::Offset StablehloWhileOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloWhileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloWhileOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::SplitOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitOptions(); +inline ::flatbuffers::Offset CreateStablehloWhileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloWhileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloWhileOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _cond_subgraph_index = _o->cond_subgraph_index; + auto _body_subgraph_index = _o->body_subgraph_index; + return tflite::CreateStablehloWhileOptions( + _fbb, + _cond_subgraph_index, + _body_subgraph_index); } -template<> inline const tflite::LogSoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogSoftmaxOptions(); +inline StablehloSortOptionsT *StablehloSortOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloSortOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::CastOptions *Operator::builtin_options_as() const { - return builtin_options_as_CastOptions(); +inline void StablehloSortOptions::UnPackTo(StablehloSortOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dimension(); _o->dimension = _e; } + { auto _e = is_stable(); _o->is_stable = _e; } + { auto _e = comparator_subgraph_index(); _o->comparator_subgraph_index = _e; } } -template<> inline const tflite::DequantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_DequantizeOptions(); +inline ::flatbuffers::Offset StablehloSortOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSortOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloSortOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::MaximumMinimumOptions *Operator::builtin_options_as() const { - return builtin_options_as_MaximumMinimumOptions(); +inline ::flatbuffers::Offset CreateStablehloSortOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSortOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloSortOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dimension = _o->dimension; + auto _is_stable = _o->is_stable; + auto _comparator_subgraph_index = _o->comparator_subgraph_index; + return tflite::CreateStablehloSortOptions( + _fbb, + _dimension, + _is_stable, + _comparator_subgraph_index); } -template<> inline const tflite::ArgMaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMaxOptions(); +inline StablehloConcatenateOptionsT *StablehloConcatenateOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloConcatenateOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::LessOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessOptions(); +inline void StablehloConcatenateOptions::UnPackTo(StablehloConcatenateOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dimension(); _o->dimension = _e; } } -template<> inline const tflite::NegOptions *Operator::builtin_options_as() const { - return builtin_options_as_NegOptions(); +inline ::flatbuffers::Offset StablehloConcatenateOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConcatenateOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloConcatenateOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::PadV2Options *Operator::builtin_options_as() const { - return builtin_options_as_PadV2Options(); +inline ::flatbuffers::Offset CreateStablehloConcatenateOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConcatenateOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloConcatenateOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dimension = _o->dimension; + return tflite::CreateStablehloConcatenateOptions( + _fbb, + _dimension); } -template<> inline const tflite::GreaterOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterOptions(); +inline StablehloBroadcastInDimOptionsT *StablehloBroadcastInDimOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloBroadcastInDimOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::GreaterEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterEqualOptions(); +inline void StablehloBroadcastInDimOptions::UnPackTo(StablehloBroadcastInDimOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = broadcast_dimensions(); if (_e) { _o->broadcast_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->broadcast_dimensions[_i] = _e->Get(_i); } } else { _o->broadcast_dimensions.resize(0); } } } -template<> inline const tflite::LessEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessEqualOptions(); +inline ::flatbuffers::Offset StablehloBroadcastInDimOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloBroadcastInDimOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloBroadcastInDimOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::SelectOptions *Operator::builtin_options_as() const { - return builtin_options_as_SelectOptions(); +inline ::flatbuffers::Offset CreateStablehloBroadcastInDimOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloBroadcastInDimOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloBroadcastInDimOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _broadcast_dimensions = _o->broadcast_dimensions.size() ? _fbb.CreateVector(_o->broadcast_dimensions) : 0; + return tflite::CreateStablehloBroadcastInDimOptions( + _fbb, + _broadcast_dimensions); } -template<> inline const tflite::SliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SliceOptions(); +inline StablehloCompareOptionsT *StablehloCompareOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloCompareOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::TransposeConvOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeConvOptions(); +inline void StablehloCompareOptions::UnPackTo(StablehloCompareOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = comparison_direction(); _o->comparison_direction = _e; } + { auto _e = compare_type(); _o->compare_type = _e; } } -template<> inline const tflite::SparseToDenseOptions *Operator::builtin_options_as() const { - return builtin_options_as_SparseToDenseOptions(); +inline ::flatbuffers::Offset StablehloCompareOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCompareOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloCompareOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::TileOptions *Operator::builtin_options_as() const { - return builtin_options_as_TileOptions(); +inline ::flatbuffers::Offset CreateStablehloCompareOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCompareOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloCompareOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _comparison_direction = _o->comparison_direction; + auto _compare_type = _o->compare_type; + return tflite::CreateStablehloCompareOptions( + _fbb, + _comparison_direction, + _compare_type); } -template<> inline const tflite::ExpandDimsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpandDimsOptions(); +inline StablehloDynamicSliceOptionsT *StablehloDynamicSliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloDynamicSliceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::EqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_EqualOptions(); +inline void StablehloDynamicSliceOptions::UnPackTo(StablehloDynamicSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = slice_sizes(); if (_e) { _o->slice_sizes.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slice_sizes[_i] = _e->Get(_i); } } else { _o->slice_sizes.resize(0); } } } -template<> inline const tflite::NotEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_NotEqualOptions(); +inline ::flatbuffers::Offset StablehloDynamicSliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDynamicSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloDynamicSliceOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::ShapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ShapeOptions(); +inline ::flatbuffers::Offset CreateStablehloDynamicSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloDynamicSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloDynamicSliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _slice_sizes = _o->slice_sizes.size() ? _fbb.CreateVector(_o->slice_sizes) : 0; + return tflite::CreateStablehloDynamicSliceOptions( + _fbb, + _slice_sizes); } -template<> inline const tflite::PowOptions *Operator::builtin_options_as() const { - return builtin_options_as_PowOptions(); +inline StablehloPadOptionsT *StablehloPadOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloPadOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::ArgMinOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMinOptions(); +inline void StablehloPadOptions::UnPackTo(StablehloPadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = edge_padding_low(); if (_e) { _o->edge_padding_low.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->edge_padding_low[_i] = _e->Get(_i); } } else { _o->edge_padding_low.resize(0); } } + { auto _e = edge_padding_high(); if (_e) { _o->edge_padding_high.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->edge_padding_high[_i] = _e->Get(_i); } } else { _o->edge_padding_high.resize(0); } } + { auto _e = interior_padding(); if (_e) { _o->interior_padding.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->interior_padding[_i] = _e->Get(_i); } } else { _o->interior_padding.resize(0); } } } -template<> inline const tflite::FakeQuantOptions *Operator::builtin_options_as() const { - return builtin_options_as_FakeQuantOptions(); +inline ::flatbuffers::Offset StablehloPadOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloPadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloPadOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::PackOptions *Operator::builtin_options_as() const { - return builtin_options_as_PackOptions(); +inline ::flatbuffers::Offset CreateStablehloPadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloPadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloPadOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _edge_padding_low = _o->edge_padding_low.size() ? _fbb.CreateVector(_o->edge_padding_low) : 0; + auto _edge_padding_high = _o->edge_padding_high.size() ? _fbb.CreateVector(_o->edge_padding_high) : 0; + auto _interior_padding = _o->interior_padding.size() ? _fbb.CreateVector(_o->interior_padding) : 0; + return tflite::CreateStablehloPadOptions( + _fbb, + _edge_padding_low, + _edge_padding_high, + _interior_padding); } -template<> inline const tflite::LogicalOrOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalOrOptions(); +inline StablehloIotaOptionsT *StablehloIotaOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloIotaOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::OneHotOptions *Operator::builtin_options_as() const { - return builtin_options_as_OneHotOptions(); +inline void StablehloIotaOptions::UnPackTo(StablehloIotaOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = iota_dimension(); _o->iota_dimension = _e; } } -template<> inline const tflite::LogicalAndOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalAndOptions(); +inline ::flatbuffers::Offset StablehloIotaOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloIotaOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloIotaOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::LogicalNotOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalNotOptions(); +inline ::flatbuffers::Offset CreateStablehloIotaOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloIotaOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloIotaOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _iota_dimension = _o->iota_dimension; + return tflite::CreateStablehloIotaOptions( + _fbb, + _iota_dimension); } -template<> inline const tflite::UnpackOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnpackOptions(); +inline StablehloCustomCallOptionsT *StablehloCustomCallOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloCustomCallOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::FloorDivOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorDivOptions(); +inline void StablehloCustomCallOptions::UnPackTo(StablehloCustomCallOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = call_target_name(); if (_e) _o->call_target_name = _e->str(); } + { auto _e = has_side_effect(); _o->has_side_effect = _e; } + { auto _e = backend_config(); if (_e) _o->backend_config = _e->str(); } + { auto _e = api_version(); _o->api_version = _e; } + { auto _e = called_computations(); if (_e) { _o->called_computations.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->called_computations[_i] = _e->Get(_i); } } else { _o->called_computations.resize(0); } } + { auto _e = custom_attributes(); if (_e) { _o->custom_attributes.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom_attributes.begin()); } } } -template<> inline const tflite::SquareOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquareOptions(); +inline ::flatbuffers::Offset StablehloCustomCallOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCustomCallOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloCustomCallOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::ZerosLikeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ZerosLikeOptions(); +inline ::flatbuffers::Offset CreateStablehloCustomCallOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCustomCallOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloCustomCallOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _call_target_name = _o->call_target_name.empty() ? 0 : _fbb.CreateString(_o->call_target_name); + auto _has_side_effect = _o->has_side_effect; + auto _backend_config = _o->backend_config.empty() ? 0 : _fbb.CreateString(_o->backend_config); + auto _api_version = _o->api_version; + auto _called_computations = _o->called_computations.size() ? _fbb.CreateVector(_o->called_computations) : 0; + auto _custom_attributes = _o->custom_attributes.size() ? _fbb.CreateVector(_o->custom_attributes) : 0; + return tflite::CreateStablehloCustomCallOptions( + _fbb, + _call_target_name, + _has_side_effect, + _backend_config, + _api_version, + _called_computations, + _custom_attributes); } -template<> inline const tflite::FillOptions *Operator::builtin_options_as() const { - return builtin_options_as_FillOptions(); +inline StablehloReduceOptionsT *StablehloReduceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloReduceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::BidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_BidirectionalSequenceLSTMOptions(); +inline void StablehloReduceOptions::UnPackTo(StablehloReduceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dimensions(); if (_e) { _o->dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dimensions[_i] = _e->Get(_i); } } else { _o->dimensions.resize(0); } } + { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } } -template<> inline const tflite::BidirectionalSequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_BidirectionalSequenceRNNOptions(); +inline ::flatbuffers::Offset StablehloReduceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloReduceOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::UnidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnidirectionalSequenceLSTMOptions(); +inline ::flatbuffers::Offset CreateStablehloReduceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloReduceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloReduceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dimensions = _o->dimensions.size() ? _fbb.CreateVector(_o->dimensions) : 0; + auto _body_subgraph_index = _o->body_subgraph_index; + return tflite::CreateStablehloReduceOptions( + _fbb, + _dimensions, + _body_subgraph_index); } -template<> inline const tflite::FloorModOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorModOptions(); +inline StablehloSliceOptionsT *StablehloSliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloSliceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::RangeOptions *Operator::builtin_options_as() const { - return builtin_options_as_RangeOptions(); +inline void StablehloSliceOptions::UnPackTo(StablehloSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = start_indices(); if (_e) { _o->start_indices.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->start_indices[_i] = _e->Get(_i); } } else { _o->start_indices.resize(0); } } + { auto _e = limit_indices(); if (_e) { _o->limit_indices.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->limit_indices[_i] = _e->Get(_i); } } else { _o->limit_indices.resize(0); } } + { auto _e = strides(); if (_e) { _o->strides.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->strides[_i] = _e->Get(_i); } } else { _o->strides.resize(0); } } } -template<> inline const tflite::ResizeNearestNeighborOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeNearestNeighborOptions(); +inline ::flatbuffers::Offset StablehloSliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloSliceOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::LeakyReluOptions *Operator::builtin_options_as() const { - return builtin_options_as_LeakyReluOptions(); +inline ::flatbuffers::Offset CreateStablehloSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloSliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _start_indices = _o->start_indices.size() ? _fbb.CreateVector(_o->start_indices) : 0; + auto _limit_indices = _o->limit_indices.size() ? _fbb.CreateVector(_o->limit_indices) : 0; + auto _strides = _o->strides.size() ? _fbb.CreateVector(_o->strides) : 0; + return tflite::CreateStablehloSliceOptions( + _fbb, + _start_indices, + _limit_indices, + _strides); } -template<> inline const tflite::SquaredDifferenceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquaredDifferenceOptions(); +inline StablehloConvolutionOptionsT *StablehloConvolutionOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloConvolutionOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::MirrorPadOptions *Operator::builtin_options_as() const { - return builtin_options_as_MirrorPadOptions(); +inline void StablehloConvolutionOptions::UnPackTo(StablehloConvolutionOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = window_strides(); if (_e) { _o->window_strides.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_strides[_i] = _e->Get(_i); } } else { _o->window_strides.resize(0); } } + { auto _e = padding(); if (_e) { _o->padding.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->padding[_i] = _e->Get(_i); } } else { _o->padding.resize(0); } } + { auto _e = lhs_dilation(); if (_e) { _o->lhs_dilation.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->lhs_dilation[_i] = _e->Get(_i); } } else { _o->lhs_dilation.resize(0); } } + { auto _e = rhs_dilation(); if (_e) { _o->rhs_dilation.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->rhs_dilation[_i] = _e->Get(_i); } } else { _o->rhs_dilation.resize(0); } } + { auto _e = window_reversal(); if (_e) { _o->window_reversal.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->window_reversal[_i] = _e->Get(_i) != 0; } } else { _o->window_reversal.resize(0); } } + { auto _e = input_batch_dimension(); _o->input_batch_dimension = _e; } + { auto _e = input_feature_dimension(); _o->input_feature_dimension = _e; } + { auto _e = input_spatial_dimensions(); if (_e) { _o->input_spatial_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->input_spatial_dimensions[_i] = _e->Get(_i); } } else { _o->input_spatial_dimensions.resize(0); } } + { auto _e = kernel_input_feature_dimension(); _o->kernel_input_feature_dimension = _e; } + { auto _e = kernel_output_feature_dimension(); _o->kernel_output_feature_dimension = _e; } + { auto _e = kernel_spatial_dimensions(); if (_e) { _o->kernel_spatial_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->kernel_spatial_dimensions[_i] = _e->Get(_i); } } else { _o->kernel_spatial_dimensions.resize(0); } } + { auto _e = output_batch_dimension(); _o->output_batch_dimension = _e; } + { auto _e = output_feature_dimension(); _o->output_feature_dimension = _e; } + { auto _e = output_spatial_dimensions(); if (_e) { _o->output_spatial_dimensions.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->output_spatial_dimensions[_i] = _e->Get(_i); } } else { _o->output_spatial_dimensions.resize(0); } } + { auto _e = feature_group_count(); _o->feature_group_count = _e; } + { auto _e = batch_group_count(); _o->batch_group_count = _e; } + { auto _e = precision_config(); if (_e) { _o->precision_config.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->precision_config[_i] = static_cast(_e->Get(_i)); } } else { _o->precision_config.resize(0); } } +} + +inline ::flatbuffers::Offset StablehloConvolutionOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConvolutionOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloConvolutionOptions(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateStablehloConvolutionOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloConvolutionOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloConvolutionOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _window_strides = _o->window_strides.size() ? _fbb.CreateVector(_o->window_strides) : 0; + auto _padding = _o->padding.size() ? _fbb.CreateVector(_o->padding) : 0; + auto _lhs_dilation = _o->lhs_dilation.size() ? _fbb.CreateVector(_o->lhs_dilation) : 0; + auto _rhs_dilation = _o->rhs_dilation.size() ? _fbb.CreateVector(_o->rhs_dilation) : 0; + auto _window_reversal = _o->window_reversal.size() ? _fbb.CreateVector(_o->window_reversal) : 0; + auto _input_batch_dimension = _o->input_batch_dimension; + auto _input_feature_dimension = _o->input_feature_dimension; + auto _input_spatial_dimensions = _o->input_spatial_dimensions.size() ? _fbb.CreateVector(_o->input_spatial_dimensions) : 0; + auto _kernel_input_feature_dimension = _o->kernel_input_feature_dimension; + auto _kernel_output_feature_dimension = _o->kernel_output_feature_dimension; + auto _kernel_spatial_dimensions = _o->kernel_spatial_dimensions.size() ? _fbb.CreateVector(_o->kernel_spatial_dimensions) : 0; + auto _output_batch_dimension = _o->output_batch_dimension; + auto _output_feature_dimension = _o->output_feature_dimension; + auto _output_spatial_dimensions = _o->output_spatial_dimensions.size() ? _fbb.CreateVector(_o->output_spatial_dimensions) : 0; + auto _feature_group_count = _o->feature_group_count; + auto _batch_group_count = _o->batch_group_count; + auto _precision_config = _o->precision_config.size() ? _fbb.CreateVectorScalarCast(::flatbuffers::data(_o->precision_config), _o->precision_config.size()) : 0; + return tflite::CreateStablehloConvolutionOptions( + _fbb, + _window_strides, + _padding, + _lhs_dilation, + _rhs_dilation, + _window_reversal, + _input_batch_dimension, + _input_feature_dimension, + _input_spatial_dimensions, + _kernel_input_feature_dimension, + _kernel_output_feature_dimension, + _kernel_spatial_dimensions, + _output_batch_dimension, + _output_feature_dimension, + _output_spatial_dimensions, + _feature_group_count, + _batch_group_count, + _precision_config); +} + +inline StablehloScatterOptionsT *StablehloScatterOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloScatterOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void StablehloScatterOptions::UnPackTo(StablehloScatterOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = indices_are_sorted(); _o->indices_are_sorted = _e; } + { auto _e = update_window_dims(); if (_e) { _o->update_window_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->update_window_dims[_i] = _e->Get(_i); } } else { _o->update_window_dims.resize(0); } } + { auto _e = inserted_window_dims(); if (_e) { _o->inserted_window_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inserted_window_dims[_i] = _e->Get(_i); } } else { _o->inserted_window_dims.resize(0); } } + { auto _e = scatter_dims_to_operand_dims(); if (_e) { _o->scatter_dims_to_operand_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scatter_dims_to_operand_dims[_i] = _e->Get(_i); } } else { _o->scatter_dims_to_operand_dims.resize(0); } } + { auto _e = index_vector_dim(); _o->index_vector_dim = _e; } + { auto _e = unique_indices(); _o->unique_indices = _e; } + { auto _e = update_computation_subgraph_index(); _o->update_computation_subgraph_index = _e; } } -template<> inline const tflite::AbsOptions *Operator::builtin_options_as() const { - return builtin_options_as_AbsOptions(); +inline ::flatbuffers::Offset StablehloScatterOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloScatterOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloScatterOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::SplitVOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitVOptions(); +inline ::flatbuffers::Offset CreateStablehloScatterOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloScatterOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloScatterOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _indices_are_sorted = _o->indices_are_sorted; + auto _update_window_dims = _o->update_window_dims.size() ? _fbb.CreateVector(_o->update_window_dims) : 0; + auto _inserted_window_dims = _o->inserted_window_dims.size() ? _fbb.CreateVector(_o->inserted_window_dims) : 0; + auto _scatter_dims_to_operand_dims = _o->scatter_dims_to_operand_dims.size() ? _fbb.CreateVector(_o->scatter_dims_to_operand_dims) : 0; + auto _index_vector_dim = _o->index_vector_dim; + auto _unique_indices = _o->unique_indices; + auto _update_computation_subgraph_index = _o->update_computation_subgraph_index; + return tflite::CreateStablehloScatterOptions( + _fbb, + _indices_are_sorted, + _update_window_dims, + _inserted_window_dims, + _scatter_dims_to_operand_dims, + _index_vector_dim, + _unique_indices, + _update_computation_subgraph_index); } -template<> inline const tflite::UniqueOptions *Operator::builtin_options_as() const { - return builtin_options_as_UniqueOptions(); +inline StablehloCaseOptionsT *StablehloCaseOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloCaseOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::ReverseV2Options *Operator::builtin_options_as() const { - return builtin_options_as_ReverseV2Options(); +inline void StablehloCaseOptions::UnPackTo(StablehloCaseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = branch_subgraph_indices(); if (_e) { _o->branch_subgraph_indices.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->branch_subgraph_indices[_i] = _e->Get(_i); } } else { _o->branch_subgraph_indices.resize(0); } } } -template<> inline const tflite::AddNOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddNOptions(); +inline ::flatbuffers::Offset StablehloCaseOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCaseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloCaseOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::GatherNdOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherNdOptions(); +inline ::flatbuffers::Offset CreateStablehloCaseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloCaseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloCaseOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _branch_subgraph_indices = _o->branch_subgraph_indices.size() ? _fbb.CreateVector(_o->branch_subgraph_indices) : 0; + return tflite::CreateStablehloCaseOptions( + _fbb, + _branch_subgraph_indices); } -template<> inline const tflite::CosOptions *Operator::builtin_options_as() const { - return builtin_options_as_CosOptions(); +inline StablehloRngBitGeneratorOptionsT *StablehloRngBitGeneratorOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloRngBitGeneratorOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::WhereOptions *Operator::builtin_options_as() const { - return builtin_options_as_WhereOptions(); +inline void StablehloRngBitGeneratorOptions::UnPackTo(StablehloRngBitGeneratorOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = algorithm(); _o->algorithm = _e; } } -template<> inline const tflite::RankOptions *Operator::builtin_options_as() const { - return builtin_options_as_RankOptions(); +inline ::flatbuffers::Offset StablehloRngBitGeneratorOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloRngBitGeneratorOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloRngBitGeneratorOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::ReverseSequenceOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReverseSequenceOptions(); +inline ::flatbuffers::Offset CreateStablehloRngBitGeneratorOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloRngBitGeneratorOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloRngBitGeneratorOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _algorithm = _o->algorithm; + return tflite::CreateStablehloRngBitGeneratorOptions( + _fbb, + _algorithm); } -template<> inline const tflite::MatrixDiagOptions *Operator::builtin_options_as() const { - return builtin_options_as_MatrixDiagOptions(); +inline Conv2DOptionsT *Conv2DOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Conv2DOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::QuantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_QuantizeOptions(); +inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } + { auto _e = quantized_bias_type(); _o->quantized_bias_type = _e; } } -template<> inline const tflite::MatrixSetDiagOptions *Operator::builtin_options_as() const { - return builtin_options_as_MatrixSetDiagOptions(); +inline ::flatbuffers::Offset Conv2DOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateConv2DOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::HardSwishOptions *Operator::builtin_options_as() const { - return builtin_options_as_HardSwishOptions(); +inline ::flatbuffers::Offset CreateConv2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + auto _quantized_bias_type = _o->quantized_bias_type; + return tflite::CreateConv2DOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _fused_activation_function, + _dilation_w_factor, + _dilation_h_factor, + _quantized_bias_type); } -template<> inline const tflite::IfOptions *Operator::builtin_options_as() const { - return builtin_options_as_IfOptions(); +inline Conv3DOptionsT *Conv3DOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Conv3DOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::WhileOptions *Operator::builtin_options_as() const { - return builtin_options_as_WhileOptions(); +inline void Conv3DOptions::UnPackTo(Conv3DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_d(); _o->stride_d = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = dilation_d_factor(); _o->dilation_d_factor = _e; } + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } } -template<> inline const tflite::DepthToSpaceOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthToSpaceOptions(); +inline ::flatbuffers::Offset Conv3DOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateConv3DOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::NonMaxSuppressionV4Options *Operator::builtin_options_as() const { - return builtin_options_as_NonMaxSuppressionV4Options(); +inline ::flatbuffers::Offset CreateConv3DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Conv3DOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_d = _o->stride_d; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_d_factor = _o->dilation_d_factor; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + return tflite::CreateConv3DOptions( + _fbb, + _padding, + _stride_d, + _stride_w, + _stride_h, + _fused_activation_function, + _dilation_d_factor, + _dilation_w_factor, + _dilation_h_factor); } -template<> inline const tflite::NonMaxSuppressionV5Options *Operator::builtin_options_as() const { - return builtin_options_as_NonMaxSuppressionV5Options(); +inline Pool2DOptionsT *Pool2DOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Pool2DOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::ScatterNdOptions *Operator::builtin_options_as() const { - return builtin_options_as_ScatterNdOptions(); +inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = filter_width(); _o->filter_width = _e; } + { auto _e = filter_height(); _o->filter_height = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } } -template<> inline const tflite::SelectV2Options *Operator::builtin_options_as() const { - return builtin_options_as_SelectV2Options(); +inline ::flatbuffers::Offset Pool2DOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreatePool2DOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::DensifyOptions *Operator::builtin_options_as() const { - return builtin_options_as_DensifyOptions(); +inline ::flatbuffers::Offset CreatePool2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _filter_width = _o->filter_width; + auto _filter_height = _o->filter_height; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreatePool2DOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _filter_width, + _filter_height, + _fused_activation_function); } -template<> inline const tflite::SegmentSumOptions *Operator::builtin_options_as() const { - return builtin_options_as_SegmentSumOptions(); +inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DepthwiseConv2DOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template<> inline const tflite::BatchMatMulOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchMatMulOptions(); +inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = depth_multiplier(); _o->depth_multiplier = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } } -template<> inline const tflite::CumsumOptions *Operator::builtin_options_as() const { - return builtin_options_as_CumsumOptions(); +inline ::flatbuffers::Offset DepthwiseConv2DOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher); } -template<> inline const tflite::CallOnceOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOnceOptions(); +inline ::flatbuffers::Offset CreateDepthwiseConv2DOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _depth_multiplier = _o->depth_multiplier; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + return tflite::CreateDepthwiseConv2DOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _depth_multiplier, + _fused_activation_function, + _dilation_w_factor, + _dilation_h_factor); } -template<> inline const tflite::BroadcastToOptions *Operator::builtin_options_as() const { - return builtin_options_as_BroadcastToOptions(); +inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ConcatEmbeddingsOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -template <> -inline const tflite::Rfft2dOptions * -Operator::builtin_options_as() const { - return builtin_options_as_Rfft2dOptions(); +inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = num_channels(); _o->num_channels = _e; } + { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } else { _o->num_columns_per_channel.resize(0); } } + { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } else { _o->embedding_dim_per_channel.resize(0); } } } -struct OperatorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_opcode_index(uint32_t opcode_index) { - fbb_.AddElement(Operator::VT_OPCODE_INDEX, opcode_index, 0); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(Operator::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); - } - void add_builtin_options_type(tflite::BuiltinOptions builtin_options_type) { - fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast(builtin_options_type), 0); - } - void add_builtin_options(flatbuffers::Offset builtin_options) { - fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); - } - void add_custom_options(flatbuffers::Offset> custom_options) { - fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); - } - void add_custom_options_format(tflite::CustomOptionsFormat custom_options_format) { - fbb_.AddElement(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast(custom_options_format), 0); - } - void add_mutating_variable_inputs(flatbuffers::Offset> mutating_variable_inputs) { - fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); - } - void add_intermediates(flatbuffers::Offset> intermediates) { - fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); - } - explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OperatorBuilder &operator=(const OperatorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOperator( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - flatbuffers::Offset> custom_options = 0, - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, - flatbuffers::Offset> mutating_variable_inputs = 0, - flatbuffers::Offset> intermediates = 0) { - OperatorBuilder builder_(_fbb); - builder_.add_intermediates(intermediates); - builder_.add_mutating_variable_inputs(mutating_variable_inputs); - builder_.add_custom_options(custom_options); - builder_.add_builtin_options(builtin_options); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_opcode_index(opcode_index); - builder_.add_custom_options_format(custom_options_format); - builder_.add_builtin_options_type(builtin_options_type); - return builder_.Finish(); +inline ::flatbuffers::Offset ConcatEmbeddingsOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateOperatorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - const std::vector *custom_options = nullptr, - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, - const std::vector *mutating_variable_inputs = nullptr, - const std::vector *intermediates = nullptr) { - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto custom_options__ = custom_options ? _fbb.CreateVector(*custom_options) : 0; - auto mutating_variable_inputs__ = mutating_variable_inputs ? _fbb.CreateVector(*mutating_variable_inputs) : 0; - auto intermediates__ = intermediates ? _fbb.CreateVector(*intermediates) : 0; - return tflite::CreateOperator( +inline ::flatbuffers::Offset CreateConcatEmbeddingsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_channels = _o->num_channels; + auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0; + auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0; + return tflite::CreateConcatEmbeddingsOptions( _fbb, - opcode_index, - inputs__, - outputs__, - builtin_options_type, - builtin_options, - custom_options__, - custom_options_format, - mutating_variable_inputs__, - intermediates__); + _num_channels, + _num_columns_per_channel, + _embedding_dim_per_channel); } -flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubGraphT : public flatbuffers::NativeTable { - typedef SubGraph TableType; - std::vector> tensors; - std::vector inputs; - std::vector outputs; - std::vector> operators; - std::string name; - SubGraphT() { - } -}; - -struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubGraphT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TENSORS = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_OPERATORS = 10, - VT_NAME = 12 - }; - const flatbuffers::Vector> *tensors() const { - return GetPointer> *>(VT_TENSORS); - } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); - } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); - } - const flatbuffers::Vector> *operators() const { - return GetPointer> *>(VT_OPERATORS); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TENSORS) && - verifier.VerifyVector(tensors()) && - verifier.VerifyVectorOfTables(tensors()) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyOffset(verifier, VT_OPERATORS) && - verifier.VerifyVector(operators()) && - verifier.VerifyVectorOfTables(operators()) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - verifier.EndTable(); - } - SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SubGraphBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_tensors(flatbuffers::Offset>> tensors) { - fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); - } - void add_operators(flatbuffers::Offset>> operators) { - fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(SubGraph::VT_NAME, name); - } - explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SubGraphBuilder &operator=(const SubGraphBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSubGraph( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> tensors = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - flatbuffers::Offset>> operators = 0, - flatbuffers::Offset name = 0) { - SubGraphBuilder builder_(_fbb); - builder_.add_name(name); - builder_.add_operators(operators); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_tensors(tensors); - return builder_.Finish(); +inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LSHProjectionOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline flatbuffers::Offset CreateSubGraphDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *tensors = nullptr, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - const std::vector> *operators = nullptr, - const char *name = nullptr) { - auto tensors__ = tensors ? _fbb.CreateVector>(*tensors) : 0; - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto operators__ = operators ? _fbb.CreateVector>(*operators) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateSubGraph( - _fbb, - tensors__, - inputs__, - outputs__, - operators__, - name__); +inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); _o->type = _e; } } -flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline ::flatbuffers::Offset LSHProjectionOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLSHProjectionOptions(_fbb, _o, _rehasher); +} -struct BufferT : public flatbuffers::NativeTable { - typedef Buffer TableType; - std::vector data; - BufferT() { - } -}; +inline ::flatbuffers::Offset CreateLSHProjectionOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type; + return tflite::CreateLSHProjectionOptions( + _fbb, + _type); +} -struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BufferT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DATA = 4 - }; - const flatbuffers::Vector *data() const { - return GetPointer *>(VT_DATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DATA) && - verifier.VerifyVector(data()) && - verifier.EndTable(); - } - BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +inline SVDFOptionsT *SVDFOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SVDFOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} -struct BufferBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_data(flatbuffers::Offset> data) { - fbb_.AddOffset(Buffer::VT_DATA, data); - } - explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BufferBuilder &operator=(const BufferBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = rank(); _o->rank = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} -inline flatbuffers::Offset CreateBuffer( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> data = 0) { - BufferBuilder builder_(_fbb); - builder_.add_data(data); - return builder_.Finish(); +inline ::flatbuffers::Offset SVDFOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSVDFOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateBufferDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *data = nullptr) { - if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16); } - auto data__ = data ? _fbb.CreateVector(*data) : 0; - return tflite::CreateBuffer( +inline ::flatbuffers::Offset CreateSVDFOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _rank = _o->rank; + auto _fused_activation_function = _o->fused_activation_function; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateSVDFOptions( _fbb, - data__); + _rank, + _fused_activation_function, + _asymmetric_quantize_inputs); } -flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline RNNOptionsT *RNNOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RNNOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} -struct MetadataT : public flatbuffers::NativeTable { - typedef Metadata TableType; - std::string name; - uint32_t buffer; - MetadataT() - : buffer(0) { - } -}; +inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} -struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MetadataT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NAME = 4, - VT_BUFFER = 6 - }; - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - uint32_t buffer() const { - return GetField(VT_BUFFER, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyField(verifier, VT_BUFFER) && - verifier.EndTable(); - } - MetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +inline ::flatbuffers::Offset RNNOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateRNNOptions(_fbb, _o, _rehasher); +} -struct MetadataBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Metadata::VT_NAME, name); - } - void add_buffer(uint32_t buffer) { - fbb_.AddElement(Metadata::VT_BUFFER, buffer, 0); - } - explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MetadataBuilder &operator=(const MetadataBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +inline ::flatbuffers::Offset CreateRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateRNNOptions( + _fbb, + _fused_activation_function, + _asymmetric_quantize_inputs); +} -inline flatbuffers::Offset CreateMetadata( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset name = 0, - uint32_t buffer = 0) { - MetadataBuilder builder_(_fbb); - builder_.add_buffer(buffer); - builder_.add_name(name); - return builder_.Finish(); +inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SequenceRNNOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline flatbuffers::Offset CreateMetadataDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, - uint32_t buffer = 0) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateMetadata( - _fbb, - name__, - buffer); +inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = time_major(); _o->time_major = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } } -flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline ::flatbuffers::Offset SequenceRNNOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSequenceRNNOptions(_fbb, _o, _rehasher); +} -struct TensorMapT : public flatbuffers::NativeTable { - typedef TensorMap TableType; - std::string name; - uint32_t tensor_index; - TensorMapT() - : tensor_index(0) { - } -}; +inline ::flatbuffers::Offset CreateSequenceRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _time_major = _o->time_major; + auto _fused_activation_function = _o->fused_activation_function; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateSequenceRNNOptions( + _fbb, + _time_major, + _fused_activation_function, + _asymmetric_quantize_inputs); +} -struct TensorMap FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorMapT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NAME = 4, - VT_TENSOR_INDEX = 6 - }; - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - uint32_t tensor_index() const { - return GetField(VT_TENSOR_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyField(verifier, VT_TENSOR_INDEX) && - verifier.EndTable(); - } - TensorMapT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BidirectionalSequenceRNNOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} -struct TensorMapBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(TensorMap::VT_NAME, name); - } - void add_tensor_index(uint32_t tensor_index) { - fbb_.AddElement(TensorMap::VT_TENSOR_INDEX, tensor_index, 0); - } - explicit TensorMapBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TensorMapBuilder &operator=(const TensorMapBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = time_major(); _o->time_major = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = merge_outputs(); _o->merge_outputs = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} -inline flatbuffers::Offset CreateTensorMap( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset name = 0, - uint32_t tensor_index = 0) { - TensorMapBuilder builder_(_fbb); - builder_.add_tensor_index(tensor_index); - builder_.add_name(name); - return builder_.Finish(); +inline ::flatbuffers::Offset BidirectionalSequenceRNNOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTensorMapDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, - uint32_t tensor_index = 0) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateTensorMap( +inline ::flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _time_major = _o->time_major; + auto _fused_activation_function = _o->fused_activation_function; + auto _merge_outputs = _o->merge_outputs; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateBidirectionalSequenceRNNOptions( _fbb, - name__, - tensor_index); + _time_major, + _fused_activation_function, + _merge_outputs, + _asymmetric_quantize_inputs); } -flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FullyConnectedOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} -struct SignatureDefT : public flatbuffers::NativeTable { - typedef SignatureDef TableType; - std::vector> inputs; - std::vector> outputs; - std::string method_name; - std::string key; - SignatureDefT() { - } -}; +inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = weights_format(); _o->weights_format = _e; } + { auto _e = keep_num_dims(); _o->keep_num_dims = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = quantized_bias_type(); _o->quantized_bias_type = _e; } +} -struct SignatureDef FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SignatureDefT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INPUTS = 4, - VT_OUTPUTS = 6, - VT_METHOD_NAME = 8, - VT_KEY = 10 - }; - const flatbuffers::Vector> *inputs() const { - return GetPointer> *>(VT_INPUTS); - } - const flatbuffers::Vector> *outputs() const { - return GetPointer> *>(VT_OUTPUTS); - } - const flatbuffers::String *method_name() const { - return GetPointer(VT_METHOD_NAME); - } - const flatbuffers::String *key() const { - return GetPointer(VT_KEY); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - verifier.VerifyVectorOfTables(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - verifier.VerifyVectorOfTables(outputs()) && - VerifyOffset(verifier, VT_METHOD_NAME) && - verifier.VerifyString(method_name()) && - VerifyOffset(verifier, VT_KEY) && - verifier.VerifyString(key()) && - verifier.EndTable(); - } - SignatureDefT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +inline ::flatbuffers::Offset FullyConnectedOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateFullyConnectedOptions(_fbb, _o, _rehasher); +} -struct SignatureDefBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_inputs(flatbuffers::Offset>> inputs) { - fbb_.AddOffset(SignatureDef::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset>> outputs) { - fbb_.AddOffset(SignatureDef::VT_OUTPUTS, outputs); - } - void add_method_name(flatbuffers::Offset method_name) { - fbb_.AddOffset(SignatureDef::VT_METHOD_NAME, method_name); - } - void add_key(flatbuffers::Offset key) { - fbb_.AddOffset(SignatureDef::VT_KEY, key); - } - explicit SignatureDefBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SignatureDefBuilder &operator=(const SignatureDefBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +inline ::flatbuffers::Offset CreateFullyConnectedOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _weights_format = _o->weights_format; + auto _keep_num_dims = _o->keep_num_dims; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + auto _quantized_bias_type = _o->quantized_bias_type; + return tflite::CreateFullyConnectedOptions( + _fbb, + _fused_activation_function, + _weights_format, + _keep_num_dims, + _asymmetric_quantize_inputs, + _quantized_bias_type); +} + +inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SoftmaxOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = beta(); _o->beta = _e; } +} -inline flatbuffers::Offset CreateSignatureDef( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> inputs = 0, - flatbuffers::Offset>> outputs = 0, - flatbuffers::Offset method_name = 0, - flatbuffers::Offset key = 0) { - SignatureDefBuilder builder_(_fbb); - builder_.add_key(key); - builder_.add_method_name(method_name); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - return builder_.Finish(); +inline ::flatbuffers::Offset SoftmaxOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSoftmaxOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSignatureDefDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *inputs = nullptr, - const std::vector> *outputs = nullptr, - const char *method_name = nullptr, - const char *key = nullptr) { - auto inputs__ = inputs ? _fbb.CreateVector>(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector>(*outputs) : 0; - auto method_name__ = method_name ? _fbb.CreateString(method_name) : 0; - auto key__ = key ? _fbb.CreateString(key) : 0; - return tflite::CreateSignatureDef( +inline ::flatbuffers::Offset CreateSoftmaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _beta = _o->beta; + return tflite::CreateSoftmaxOptions( _fbb, - inputs__, - outputs__, - method_name__, - key__); + _beta); } -flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ConcatenationOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} -struct ModelT : public flatbuffers::NativeTable { - typedef Model TableType; - uint32_t version; - std::vector> operator_codes; - std::vector> subgraphs; - std::string description; - std::vector> buffers; - std::vector metadata_buffer; - std::vector> metadata; - std::vector> signature_defs; - ModelT() - : version(0) { - } -}; +inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } +} -struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ModelT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VERSION = 4, - VT_OPERATOR_CODES = 6, - VT_SUBGRAPHS = 8, - VT_DESCRIPTION = 10, - VT_BUFFERS = 12, - VT_METADATA_BUFFER = 14, - VT_METADATA = 16, - VT_SIGNATURE_DEFS = 18 - }; - uint32_t version() const { - return GetField(VT_VERSION, 0); - } - const flatbuffers::Vector> *operator_codes() const { - return GetPointer> *>(VT_OPERATOR_CODES); - } - const flatbuffers::Vector> *subgraphs() const { - return GetPointer> *>(VT_SUBGRAPHS); - } - const flatbuffers::String *description() const { - return GetPointer(VT_DESCRIPTION); - } - const flatbuffers::Vector> *buffers() const { - return GetPointer> *>(VT_BUFFERS); - } - const flatbuffers::Vector *metadata_buffer() const { - return GetPointer *>(VT_METADATA_BUFFER); - } - const flatbuffers::Vector> *metadata() const { - return GetPointer> *>(VT_METADATA); - } - const flatbuffers::Vector> *signature_defs() const { - return GetPointer> *>(VT_SIGNATURE_DEFS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VERSION) && - VerifyOffset(verifier, VT_OPERATOR_CODES) && - verifier.VerifyVector(operator_codes()) && - verifier.VerifyVectorOfTables(operator_codes()) && - VerifyOffset(verifier, VT_SUBGRAPHS) && - verifier.VerifyVector(subgraphs()) && - verifier.VerifyVectorOfTables(subgraphs()) && - VerifyOffset(verifier, VT_DESCRIPTION) && - verifier.VerifyString(description()) && - VerifyOffset(verifier, VT_BUFFERS) && - verifier.VerifyVector(buffers()) && - verifier.VerifyVectorOfTables(buffers()) && - VerifyOffset(verifier, VT_METADATA_BUFFER) && - verifier.VerifyVector(metadata_buffer()) && - VerifyOffset(verifier, VT_METADATA) && - verifier.VerifyVector(metadata()) && - verifier.VerifyVectorOfTables(metadata()) && - VerifyOffset(verifier, VT_SIGNATURE_DEFS) && - verifier.VerifyVector(signature_defs()) && - verifier.VerifyVectorOfTables(signature_defs()) && - verifier.EndTable(); - } - ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; +inline ::flatbuffers::Offset ConcatenationOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateConcatenationOptions(_fbb, _o, _rehasher); +} -struct ModelBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_version(uint32_t version) { - fbb_.AddElement(Model::VT_VERSION, version, 0); - } - void add_operator_codes(flatbuffers::Offset>> operator_codes) { - fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); - } - void add_subgraphs(flatbuffers::Offset>> subgraphs) { - fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); - } - void add_description(flatbuffers::Offset description) { - fbb_.AddOffset(Model::VT_DESCRIPTION, description); - } - void add_buffers(flatbuffers::Offset>> buffers) { - fbb_.AddOffset(Model::VT_BUFFERS, buffers); - } - void add_metadata_buffer(flatbuffers::Offset> metadata_buffer) { - fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); - } - void add_metadata(flatbuffers::Offset>> metadata) { - fbb_.AddOffset(Model::VT_METADATA, metadata); - } - void add_signature_defs(flatbuffers::Offset>> signature_defs) { - fbb_.AddOffset(Model::VT_SIGNATURE_DEFS, signature_defs); - } - explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ModelBuilder &operator=(const ModelBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; +inline ::flatbuffers::Offset CreateConcatenationOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateConcatenationOptions( + _fbb, + _axis, + _fused_activation_function); +} -inline flatbuffers::Offset CreateModel( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - flatbuffers::Offset>> operator_codes = 0, - flatbuffers::Offset>> subgraphs = 0, - flatbuffers::Offset description = 0, - flatbuffers::Offset>> buffers = 0, - flatbuffers::Offset> metadata_buffer = 0, - flatbuffers::Offset>> metadata = 0, - flatbuffers::Offset>> signature_defs = 0) { - ModelBuilder builder_(_fbb); - builder_.add_signature_defs(signature_defs); - builder_.add_metadata(metadata); - builder_.add_metadata_buffer(metadata_buffer); - builder_.add_buffers(buffers); - builder_.add_description(description); - builder_.add_subgraphs(subgraphs); - builder_.add_operator_codes(operator_codes); - builder_.add_version(version); - return builder_.Finish(); +inline AddOptionsT *AddOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new AddOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline flatbuffers::Offset CreateModelDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - const std::vector> *operator_codes = nullptr, - const std::vector> *subgraphs = nullptr, - const char *description = nullptr, - const std::vector> *buffers = nullptr, - const std::vector *metadata_buffer = nullptr, - const std::vector> *metadata = nullptr, - const std::vector> *signature_defs = nullptr) { - auto operator_codes__ = operator_codes ? _fbb.CreateVector>(*operator_codes) : 0; - auto subgraphs__ = subgraphs ? _fbb.CreateVector>(*subgraphs) : 0; - auto description__ = description ? _fbb.CreateString(description) : 0; - auto buffers__ = buffers ? _fbb.CreateVector>(*buffers) : 0; - auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector(*metadata_buffer) : 0; - auto metadata__ = metadata ? _fbb.CreateVector>(*metadata) : 0; - auto signature_defs__ = signature_defs ? _fbb.CreateVector>(*signature_defs) : 0; - return tflite::CreateModel( - _fbb, - version, - operator_codes__, - subgraphs__, - description__, - buffers__, - metadata_buffer__, - metadata__, - signature_defs__); +inline void AddOptions::UnPackTo(AddOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } } -flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +inline ::flatbuffers::Offset AddOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateAddOptions(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateAddOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _pot_scale_int16 = _o->pot_scale_int16; + return tflite::CreateAddOptions( + _fbb, + _fused_activation_function, + _pot_scale_int16); +} -inline CustomQuantizationT *CustomQuantization::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CustomQuantizationT(); - UnPackTo(_o, _resolver); - return _o; +inline MulOptionsT *MulOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MulOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void MulOptions::UnPackTo(MulOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = custom(); if (_e) { _o->custom.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom.begin()); } } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } } -inline flatbuffers::Offset CustomQuantization::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCustomQuantization(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset MulOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateMulOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateMulOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->custom.size(), sizeof(uint8_t), 16); - auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0; - return tflite::CreateCustomQuantization( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateMulOptions( _fbb, - _custom); + _fused_activation_function); } -inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizationParametersT(); - UnPackTo(_o, _resolver); - return _o; +inline L2NormOptionsT *L2NormOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new L2NormOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } } - { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } } - { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } } - { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } } - { auto _e = details_type(); _o->details.type = _e; } - { auto _e = details(); if (_e) _o->details.value = tflite::QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver); } - { auto _e = quantized_dimension(); _o->quantized_dimension = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } } -inline flatbuffers::Offset QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizationParameters(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset L2NormOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateL2NormOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateL2NormOptions(::flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0; - auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0; - auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; - auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0; - auto _details_type = _o->details.type; - auto _details = _o->details.Pack(_fbb); - auto _quantized_dimension = _o->quantized_dimension; - return tflite::CreateQuantizationParameters( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateL2NormOptions( _fbb, - _min, - _max, - _scale, - _zero_point, - _details_type, - _details, - _quantized_dimension); + _fused_activation_function); } -inline Int32VectorT *Int32Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Int32VectorT(); - UnPackTo(_o, _resolver); - return _o; +inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LocalResponseNormalizationOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Int32Vector::UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } + { auto _e = radius(); _o->radius = _e; } + { auto _e = bias(); _o->bias = _e; } + { auto _e = alpha(); _o->alpha = _e; } + { auto _e = beta(); _o->beta = _e; } } -inline flatbuffers::Offset Int32Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateInt32Vector(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LocalResponseNormalizationOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLocalResponseNormalizationOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Int32VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateInt32Vector( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _radius = _o->radius; + auto _bias = _o->bias; + auto _alpha = _o->alpha; + auto _beta = _o->beta; + return tflite::CreateLocalResponseNormalizationOptions( _fbb, - _values); + _radius, + _bias, + _alpha, + _beta); } -inline Uint16VectorT *Uint16Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Uint16VectorT(); - UnPackTo(_o, _resolver); - return _o; +inline LSTMOptionsT *LSTMOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LSTMOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Uint16Vector::UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = cell_clip(); _o->cell_clip = _e; } + { auto _e = proj_clip(); _o->proj_clip = _e; } + { auto _e = kernel_type(); _o->kernel_type = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } } -inline flatbuffers::Offset Uint16Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUint16Vector(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LSTMOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLSTMOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint16VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint16_t), 4); - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateUint16Vector( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _kernel_type = _o->kernel_type; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateLSTMOptions( _fbb, - _values); + _fused_activation_function, + _cell_clip, + _proj_clip, + _kernel_type, + _asymmetric_quantize_inputs); } -inline Uint8VectorT *Uint8Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Uint8VectorT(); - UnPackTo(_o, _resolver); - return _o; +inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnidirectionalSequenceLSTMOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Uint8Vector::UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->values.begin()); } } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = cell_clip(); _o->cell_clip = _e; } + { auto _e = proj_clip(); _o->proj_clip = _e; } + { auto _e = time_major(); _o->time_major = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = diagonal_recurrent_tensors(); _o->diagonal_recurrent_tensors = _e; } } -inline flatbuffers::Offset Uint8Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUint8Vector(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset UnidirectionalSequenceLSTMOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint8VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint8_t), 4); - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateUint8Vector( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _time_major = _o->time_major; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + auto _diagonal_recurrent_tensors = _o->diagonal_recurrent_tensors; + return tflite::CreateUnidirectionalSequenceLSTMOptions( _fbb, - _values); + _fused_activation_function, + _cell_clip, + _proj_clip, + _time_major, + _asymmetric_quantize_inputs, + _diagonal_recurrent_tensors); } -inline DimensionMetadataT *DimensionMetadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DimensionMetadataT(); - UnPackTo(_o, _resolver); - return _o; +inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BidirectionalSequenceLSTMOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = format(); _o->format = _e; } - { auto _e = dense_size(); _o->dense_size = _e; } - { auto _e = array_segments_type(); _o->array_segments.type = _e; } - { auto _e = array_segments(); if (_e) _o->array_segments.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_segments_type(), _resolver); } - { auto _e = array_indices_type(); _o->array_indices.type = _e; } - { auto _e = array_indices(); if (_e) _o->array_indices.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_indices_type(), _resolver); } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = cell_clip(); _o->cell_clip = _e; } + { auto _e = proj_clip(); _o->proj_clip = _e; } + { auto _e = merge_outputs(); _o->merge_outputs = _e; } + { auto _e = time_major(); _o->time_major = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } } -inline flatbuffers::Offset DimensionMetadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDimensionMetadata(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset BidirectionalSequenceLSTMOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DimensionMetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _format = _o->format; - auto _dense_size = _o->dense_size; - auto _array_segments_type = _o->array_segments.type; - auto _array_segments = _o->array_segments.Pack(_fbb); - auto _array_indices_type = _o->array_indices.type; - auto _array_indices = _o->array_indices.Pack(_fbb); - return tflite::CreateDimensionMetadata( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _merge_outputs = _o->merge_outputs; + auto _time_major = _o->time_major; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateBidirectionalSequenceLSTMOptions( _fbb, - _format, - _dense_size, - _array_segments_type, - _array_segments, - _array_indices_type, - _array_indices); + _fused_activation_function, + _cell_clip, + _proj_clip, + _merge_outputs, + _time_major, + _asymmetric_quantize_inputs); } -inline SparsityParametersT *SparsityParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SparsityParametersT(); - UnPackTo(_o, _resolver); - return _o; +inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ResizeBilinearOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SparsityParameters::UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = traversal_order(); if (_e) { _o->traversal_order.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->traversal_order[_i] = _e->Get(_i); } } } - { auto _e = block_map(); if (_e) { _o->block_map.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->block_map[_i] = _e->Get(_i); } } } - { auto _e = dim_metadata(); if (_e) { _o->dim_metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dim_metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } + { auto _e = align_corners(); _o->align_corners = _e; } + { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } } -inline flatbuffers::Offset SparsityParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparsityParameters(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ResizeBilinearOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateResizeBilinearOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateResizeBilinearOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparsityParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _traversal_order = _o->traversal_order.size() ? _fbb.CreateVector(_o->traversal_order) : 0; - auto _block_map = _o->block_map.size() ? _fbb.CreateVector(_o->block_map) : 0; - auto _dim_metadata = _o->dim_metadata.size() ? _fbb.CreateVector> (_o->dim_metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateDimensionMetadata(*__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::CreateSparsityParameters( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _align_corners = _o->align_corners; + auto _half_pixel_centers = _o->half_pixel_centers; + return tflite::CreateResizeBilinearOptions( _fbb, - _traversal_order, - _block_map, - _dim_metadata); + _align_corners, + _half_pixel_centers); } -inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorT(); - UnPackTo(_o, _resolver); - return _o; +inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ResizeNearestNeighborOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } } - { auto _e = type(); _o->type = _e; } - { auto _e = buffer(); _o->buffer = _e; } - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = quantization(); if (_e) _o->quantization = std::unique_ptr(_e->UnPack(_resolver)); } - { auto _e = is_variable(); _o->is_variable = _e; } - { auto _e = sparsity(); if (_e) _o->sparsity = std::unique_ptr(_e->UnPack(_resolver)); } - { auto _e = shape_signature(); if (_e) { _o->shape_signature.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape_signature[_i] = _e->Get(_i); } } } + { auto _e = align_corners(); _o->align_corners = _e; } + { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } } -inline flatbuffers::Offset Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensor(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ResizeNearestNeighborOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateResizeNearestNeighborOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; - auto _type = _o->type; - auto _buffer = _o->buffer; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0; - auto _is_variable = _o->is_variable; - auto _sparsity = _o->sparsity ? CreateSparsityParameters(_fbb, _o->sparsity.get(), _rehasher) : 0; - auto _shape_signature = _o->shape_signature.size() ? _fbb.CreateVector(_o->shape_signature) : 0; - return tflite::CreateTensor( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _align_corners = _o->align_corners; + auto _half_pixel_centers = _o->half_pixel_centers; + return tflite::CreateResizeNearestNeighborOptions( _fbb, - _shape, - _type, - _buffer, - _name, - _quantization, - _is_variable, - _sparsity, - _shape_signature); + _align_corners, + _half_pixel_centers); } -inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Conv2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline CallOptionsT *CallOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CallOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void CallOptions::UnPackTo(CallOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } + { auto _e = subgraph(); _o->subgraph = _e; } } -inline flatbuffers::Offset Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConv2DOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset CallOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateCallOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateCallOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateConv2DOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _subgraph = _o->subgraph; + return tflite::CreateCallOptions( _fbb, - _padding, - _stride_w, - _stride_h, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); + _subgraph); } -inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Pool2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline PadOptionsT *PadOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PadOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void PadOptions::UnPackTo(PadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = filter_width(); _o->filter_width = _e; } - { auto _e = filter_height(); _o->filter_height = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } } -inline flatbuffers::Offset Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePool2DOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset PadOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreatePadOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreatePadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _filter_width = _o->filter_width; - auto _filter_height = _o->filter_height; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreatePool2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _filter_width, - _filter_height, - _fused_activation_function); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePadOptions( + _fbb); } -inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DepthwiseConv2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline PadV2OptionsT *PadV2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PadV2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = depth_multiplier(); _o->depth_multiplier = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } } -inline flatbuffers::Offset DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset PadV2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreatePadV2Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreatePadV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _depth_multiplier = _o->depth_multiplier; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateDepthwiseConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _depth_multiplier, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePadV2Options( + _fbb); } -inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ConcatEmbeddingsOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ReshapeOptionsT *ReshapeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReshapeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = num_channels(); _o->num_channels = _e; } - { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } } - { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } } + { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } else { _o->new_shape.resize(0); } } } -inline flatbuffers::Offset ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ReshapeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateReshapeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateReshapeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_channels = _o->num_channels; - auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0; - auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0; + return tflite::CreateReshapeOptions( _fbb, - _num_channels, - _num_columns_per_channel, - _embedding_dim_per_channel); + _new_shape); } -inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSHProjectionOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SpaceToBatchNDOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = type(); _o->type = _e; } } -inline flatbuffers::Offset LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSHProjectionOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SpaceToBatchNDOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSpaceToBatchNDOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - return tflite::CreateLSHProjectionOptions( - _fbb, - _type); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSpaceToBatchNDOptions( + _fbb); } -inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SVDFOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BatchToSpaceNDOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = rank(); _o->rank = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } } -inline flatbuffers::Offset SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSVDFOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset BatchToSpaceNDOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateBatchToSpaceNDOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _rank = _o->rank; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateSVDFOptions( - _fbb, - _rank, - _fused_activation_function, - _asymmetric_quantize_inputs); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateBatchToSpaceNDOptions( + _fbb); } -inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SkipGramOptionsT *SkipGramOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SkipGramOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = ngram_size(); _o->ngram_size = _e; } + { auto _e = max_skip_size(); _o->max_skip_size = _e; } + { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; } } -inline flatbuffers::Offset RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRNNOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SkipGramOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSkipGramOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSkipGramOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateRNNOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _ngram_size = _o->ngram_size; + auto _max_skip_size = _o->max_skip_size; + auto _include_all_ngrams = _o->include_all_ngrams; + return tflite::CreateSkipGramOptions( _fbb, - _fused_activation_function, - _asymmetric_quantize_inputs); + _ngram_size, + _max_skip_size, + _include_all_ngrams); } -inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SequenceRNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SpaceToDepthOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = block_size(); _o->block_size = _e; } } -inline flatbuffers::Offset SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSequenceRNNOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SpaceToDepthOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSpaceToDepthOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSpaceToDepthOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateSequenceRNNOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _block_size = _o->block_size; + return tflite::CreateSpaceToDepthOptions( _fbb, - _time_major, - _fused_activation_function, - _asymmetric_quantize_inputs); + _block_size); } -inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BidirectionalSequenceRNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DepthToSpaceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = merge_outputs(); _o->merge_outputs = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = block_size(); _o->block_size = _e; } } -inline flatbuffers::Offset BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset DepthToSpaceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateDepthToSpaceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateDepthToSpaceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - auto _merge_outputs = _o->merge_outputs; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBidirectionalSequenceRNNOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _block_size = _o->block_size; + return tflite::CreateDepthToSpaceOptions( _fbb, - _time_major, - _fused_activation_function, - _merge_outputs, - _asymmetric_quantize_inputs); + _block_size); } -inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FullyConnectedOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SubOptionsT *SubOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SubOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SubOptions::UnPackTo(SubOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = weights_format(); _o->weights_format = _e; } - { auto _e = keep_num_dims(); _o->keep_num_dims = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } } -inline flatbuffers::Offset FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFullyConnectedOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SubOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSubOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSubOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _fused_activation_function = _o->fused_activation_function; - auto _weights_format = _o->weights_format; - auto _keep_num_dims = _o->keep_num_dims; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateFullyConnectedOptions( + auto _pot_scale_int16 = _o->pot_scale_int16; + return tflite::CreateSubOptions( _fbb, _fused_activation_function, - _weights_format, - _keep_num_dims, - _asymmetric_quantize_inputs); + _pot_scale_int16); } -inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SoftmaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline DivOptionsT *DivOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DivOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DivOptions::UnPackTo(DivOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = beta(); _o->beta = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } } -inline flatbuffers::Offset SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSoftmaxOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset DivOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateDivOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateDivOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _beta = _o->beta; - return tflite::CreateSoftmaxOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateDivOptions( _fbb, - _beta); + _fused_activation_function); } -inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ConcatenationOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline TopKV2OptionsT *TopKV2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TopKV2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = axis(); _o->axis = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } } -inline flatbuffers::Offset ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatenationOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset TopKV2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateTopKV2Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateTopKV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateConcatenationOptions( - _fbb, - _axis, - _fused_activation_function); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTopKV2Options( + _fbb); } -inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AddOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new EmbeddingLookupSparseOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } + { auto _e = combiner(); _o->combiner = _e; } } -inline flatbuffers::Offset AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset EmbeddingLookupSparseOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateEmbeddingLookupSparseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _pot_scale_int16 = _o->pot_scale_int16; - return tflite::CreateAddOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _combiner = _o->combiner; + return tflite::CreateEmbeddingLookupSparseOptions( _fbb, - _fused_activation_function, - _pot_scale_int16); + _combiner); } -inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MulOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline GatherOptionsT *GatherOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GatherOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = axis(); _o->axis = _e; } + { auto _e = batch_dims(); _o->batch_dims = _e; } } -inline flatbuffers::Offset MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMulOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset GatherOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateGatherOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateGatherOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateMulOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + auto _batch_dims = _o->batch_dims; + return tflite::CreateGatherOptions( _fbb, - _fused_activation_function); + _axis, + _batch_dims); } -inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new L2NormOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline TransposeOptionsT *TransposeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TransposeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } } -inline flatbuffers::Offset L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateL2NormOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset TransposeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateTransposeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateTransposeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateL2NormOptions( - _fbb, - _fused_activation_function); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTransposeOptions( + _fbb); } -inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LocalResponseNormalizationOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ExpOptionsT *ExpOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ExpOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = radius(); _o->radius = _e; } - { auto _e = bias(); _o->bias = _e; } - { auto _e = alpha(); _o->alpha = _e; } - { auto _e = beta(); _o->beta = _e; } } -inline flatbuffers::Offset LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ExpOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateExpOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateExpOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _radius = _o->radius; - auto _bias = _o->bias; - auto _alpha = _o->alpha; - auto _beta = _o->beta; - return tflite::CreateLocalResponseNormalizationOptions( - _fbb, - _radius, - _bias, - _alpha, - _beta); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateExpOptions( + _fbb); } -inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline CosOptionsT *CosOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CosOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void CosOptions::UnPackTo(CosOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = kernel_type(); _o->kernel_type = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } } -inline flatbuffers::Offset LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSTMOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset CosOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateCosOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateCosOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _kernel_type = _o->kernel_type; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _kernel_type, - _asymmetric_quantize_inputs); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateCosOptions( + _fbb); } -inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UnidirectionalSequenceLSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ReducerOptionsT *ReducerOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReducerOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = keep_dims(); _o->keep_dims = _e; } } -inline flatbuffers::Offset UnidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ReducerOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateReducerOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateReducerOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _time_major = _o->time_major; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateUnidirectionalSequenceLSTMOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _keep_dims = _o->keep_dims; + return tflite::CreateReducerOptions( _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _time_major, - _asymmetric_quantize_inputs); + _keep_dims); } -inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BidirectionalSequenceLSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SqueezeOptionsT *SqueezeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SqueezeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = merge_outputs(); _o->merge_outputs = _e; } - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } else { _o->squeeze_dims.resize(0); } } } -inline flatbuffers::Offset BidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SqueezeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSqueezeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSqueezeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _merge_outputs = _o->merge_outputs; - auto _time_major = _o->time_major; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBidirectionalSequenceLSTMOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0; + return tflite::CreateSqueezeOptions( _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _merge_outputs, - _time_major, - _asymmetric_quantize_inputs); + _squeeze_dims); } -inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ResizeBilinearOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SplitOptionsT *SplitOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SplitOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = align_corners(); _o->align_corners = _e; } - { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } + { auto _e = num_splits(); _o->num_splits = _e; } } -inline flatbuffers::Offset ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeBilinearOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SplitOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSplitOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSplitOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _align_corners = _o->align_corners; - auto _half_pixel_centers = _o->half_pixel_centers; - return tflite::CreateResizeBilinearOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_splits = _o->num_splits; + return tflite::CreateSplitOptions( _fbb, - _align_corners, - _half_pixel_centers); + _num_splits); } -inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ResizeNearestNeighborOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SplitVOptionsT *SplitVOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SplitVOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = align_corners(); _o->align_corners = _e; } - { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } + { auto _e = num_splits(); _o->num_splits = _e; } } -inline flatbuffers::Offset ResizeNearestNeighborOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SplitVOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSplitVOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSplitVOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _align_corners = _o->align_corners; - auto _half_pixel_centers = _o->half_pixel_centers; - return tflite::CreateResizeNearestNeighborOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_splits = _o->num_splits; + return tflite::CreateSplitVOptions( _fbb, - _align_corners, - _half_pixel_centers); + _num_splits); } -inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CallOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StridedSliceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = subgraph(); _o->subgraph = _e; } + { auto _e = begin_mask(); _o->begin_mask = _e; } + { auto _e = end_mask(); _o->end_mask = _e; } + { auto _e = ellipsis_mask(); _o->ellipsis_mask = _e; } + { auto _e = new_axis_mask(); _o->new_axis_mask = _e; } + { auto _e = shrink_axis_mask(); _o->shrink_axis_mask = _e; } + { auto _e = offset(); _o->offset = _e; } } -inline flatbuffers::Offset CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset StridedSliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStridedSliceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateStridedSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _subgraph = _o->subgraph; - return tflite::CreateCallOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _begin_mask = _o->begin_mask; + auto _end_mask = _o->end_mask; + auto _ellipsis_mask = _o->ellipsis_mask; + auto _new_axis_mask = _o->new_axis_mask; + auto _shrink_axis_mask = _o->shrink_axis_mask; + auto _offset = _o->offset; + return tflite::CreateStridedSliceOptions( _fbb, - _subgraph); + _begin_mask, + _end_mask, + _ellipsis_mask, + _new_axis_mask, + _shrink_axis_mask, + _offset); } -inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LogSoftmaxOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void PadOptions::UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LogSoftmaxOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogSoftmaxOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLogSoftmaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogSoftmaxOptions( _fbb); } -inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline CastOptionsT *CastOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CastOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void CastOptions::UnPackTo(CastOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = in_data_type(); _o->in_data_type = _e; } + { auto _e = out_data_type(); _o->out_data_type = _e; } } -inline flatbuffers::Offset PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadV2Options(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset CastOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateCastOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateCastOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadV2Options( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _in_data_type = _o->in_data_type; + auto _out_data_type = _o->out_data_type; + return tflite::CreateCastOptions( + _fbb, + _in_data_type, + _out_data_type); } -inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReshapeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline DequantizeOptionsT *DequantizeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DequantizeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } } } -inline flatbuffers::Offset ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReshapeOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset DequantizeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateDequantizeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateDequantizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - _new_shape); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateDequantizeOptions( + _fbb); } -inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceToBatchNDOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MaximumMinimumOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset MaximumMinimumOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateMaximumMinimumOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateMaximumMinimumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSpaceToBatchNDOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMaximumMinimumOptions( _fbb); } -inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchToSpaceNDOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline TileOptionsT *TileOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TileOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void TileOptions::UnPackTo(TileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset TileOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateTileOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateTileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBatchToSpaceNDOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTileOptions( _fbb); } -inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SkipGramOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ArgMaxOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = ngram_size(); _o->ngram_size = _e; } - { auto _e = max_skip_size(); _o->max_skip_size = _e; } - { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; } + { auto _e = output_type(); _o->output_type = _e; } } -inline flatbuffers::Offset SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSkipGramOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ArgMaxOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateArgMaxOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateArgMaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _ngram_size = _o->ngram_size; - auto _max_skip_size = _o->max_skip_size; - auto _include_all_ngrams = _o->include_all_ngrams; - return tflite::CreateSkipGramOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _output_type = _o->output_type; + return tflite::CreateArgMaxOptions( _fbb, - _ngram_size, - _max_skip_size, - _include_all_ngrams); + _output_type); } -inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceToDepthOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ArgMinOptionsT *ArgMinOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ArgMinOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; } + { auto _e = output_type(); _o->output_type = _e; } } -inline flatbuffers::Offset SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToDepthOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ArgMinOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateArgMinOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateArgMinOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateSpaceToDepthOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _output_type = _o->output_type; + return tflite::CreateArgMinOptions( _fbb, - _block_size); + _output_type); } -inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DepthToSpaceOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline GreaterOptionsT *GreaterOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GreaterOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; } } -inline flatbuffers::Offset DepthToSpaceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthToSpaceOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset GreaterOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateGreaterOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateGreaterOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateDepthToSpaceOptions( - _fbb, - _block_size); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGreaterOptions( + _fbb); } -inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SubOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GreaterEqualOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SubOptions::UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } + (void)_resolver; } -inline flatbuffers::Offset SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset GreaterEqualOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateGreaterEqualOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateGreaterEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _pot_scale_int16 = _o->pot_scale_int16; - return tflite::CreateSubOptions( - _fbb, - _fused_activation_function, - _pot_scale_int16); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGreaterEqualOptions( + _fbb); } -inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DivOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline LessOptionsT *LessOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LessOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void DivOptions::UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LessOptions::UnPackTo(LessOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } } -inline flatbuffers::Offset DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDivOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LessOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLessOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLessOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateDivOptions( - _fbb, - _fused_activation_function); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLessOptions( + _fbb); } -inline TopKV2OptionsT *TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TopKV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline LessEqualOptionsT *LessEqualOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LessEqualOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTopKV2Options(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LessEqualOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLessEqualOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLessEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTopKV2Options( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLessEqualOptions( _fbb); } -inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EmbeddingLookupSparseOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline NegOptionsT *NegOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new NegOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void NegOptions::UnPackTo(NegOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = combiner(); _o->combiner = _e; } } -inline flatbuffers::Offset EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset NegOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateNegOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateNegOptions(::flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _combiner = _o->combiner; - return tflite::CreateEmbeddingLookupSparseOptions( - _fbb, - _combiner); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNegOptions( + _fbb); } -inline GatherOptionsT *GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SelectOptionsT *SelectOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SelectOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = axis(); _o->axis = _e; } } -inline flatbuffers::Offset GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SelectOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSelectOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSelectOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return tflite::CreateGatherOptions( - _fbb, - _axis); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSelectOptions( + _fbb); } -inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SliceOptionsT *SliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SliceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSliceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTransposeOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSliceOptions( _fbb); } -inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TransposeConvOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = quantized_bias_type(); _o->quantized_bias_type = _e; } } -inline flatbuffers::Offset ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset TransposeConvOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateTransposeConvOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateTransposeConvOptions(::flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _fused_activation_function = _o->fused_activation_function; + auto _quantized_bias_type = _o->quantized_bias_type; + return tflite::CreateTransposeConvOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _fused_activation_function, + _quantized_bias_type); } -inline CosOptionsT *CosOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CosOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ExpandDimsOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void CosOptions::UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset CosOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCosOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ExpandDimsOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateExpandDimsOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateExpandDimsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateCosOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateExpandDimsOptions( _fbb); } -inline ReducerOptionsT *ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReducerOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SparseToDenseOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = keep_dims(); _o->keep_dims = _e; } + { auto _e = validate_indices(); _o->validate_indices = _e; } } -inline flatbuffers::Offset ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReducerOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SparseToDenseOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSparseToDenseOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSparseToDenseOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _keep_dims = _o->keep_dims; - return tflite::CreateReducerOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _validate_indices = _o->validate_indices; + return tflite::CreateSparseToDenseOptions( _fbb, - _keep_dims); + _validate_indices); } -inline SqueezeOptionsT *SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SqueezeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline EqualOptionsT *EqualOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new EqualOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } } } -inline flatbuffers::Offset SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSqueezeOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset EqualOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateEqualOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0; - return tflite::CreateSqueezeOptions( - _fbb, - _squeeze_dims); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateEqualOptions( + _fbb); } -inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SplitOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline NotEqualOptionsT *NotEqualOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new NotEqualOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; } } -inline flatbuffers::Offset SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset NotEqualOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateNotEqualOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateNotEqualOptions(::flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitOptions( - _fbb, - _num_splits); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNotEqualOptions( + _fbb); } -inline SplitVOptionsT *SplitVOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SplitVOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ShapeOptionsT *ShapeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ShapeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; } + { auto _e = out_type(); _o->out_type = _e; } } -inline flatbuffers::Offset SplitVOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitVOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ShapeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateShapeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateShapeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitVOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _out_type = _o->out_type; + return tflite::CreateShapeOptions( _fbb, - _num_splits); + _out_type); } -inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new StridedSliceOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline RankOptionsT *RankOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RankOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void RankOptions::UnPackTo(RankOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = begin_mask(); _o->begin_mask = _e; } - { auto _e = end_mask(); _o->end_mask = _e; } - { auto _e = ellipsis_mask(); _o->ellipsis_mask = _e; } - { auto _e = new_axis_mask(); _o->new_axis_mask = _e; } - { auto _e = shrink_axis_mask(); _o->shrink_axis_mask = _e; } } -inline flatbuffers::Offset StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateStridedSliceOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset RankOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateRankOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateRankOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _begin_mask = _o->begin_mask; - auto _end_mask = _o->end_mask; - auto _ellipsis_mask = _o->ellipsis_mask; - auto _new_axis_mask = _o->new_axis_mask; - auto _shrink_axis_mask = _o->shrink_axis_mask; - return tflite::CreateStridedSliceOptions( - _fbb, - _begin_mask, - _end_mask, - _ellipsis_mask, - _new_axis_mask, - _shrink_axis_mask); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRankOptions( + _fbb); } -inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogSoftmaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline PowOptionsT *PowOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PowOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void PowOptions::UnPackTo(PowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogSoftmaxOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset PowOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreatePowOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreatePowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogSoftmaxOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePowOptions( _fbb); } -inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CastOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FakeQuantOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void CastOptions::UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = in_data_type(); _o->in_data_type = _e; } - { auto _e = out_data_type(); _o->out_data_type = _e; } + { auto _e = min(); _o->min = _e; } + { auto _e = max(); _o->max = _e; } + { auto _e = num_bits(); _o->num_bits = _e; } + { auto _e = narrow_range(); _o->narrow_range = _e; } } -inline flatbuffers::Offset CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCastOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset FakeQuantOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateFakeQuantOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateFakeQuantOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _in_data_type = _o->in_data_type; - auto _out_data_type = _o->out_data_type; - return tflite::CreateCastOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _min = _o->min; + auto _max = _o->max; + auto _num_bits = _o->num_bits; + auto _narrow_range = _o->narrow_range; + return tflite::CreateFakeQuantOptions( _fbb, - _in_data_type, - _out_data_type); + _min, + _max, + _num_bits, + _narrow_range); } -inline DequantizeOptionsT *DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DequantizeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline PackOptionsT *PackOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PackOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void PackOptions::UnPackTo(PackOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = values_count(); _o->values_count = _e; } + { auto _e = axis(); _o->axis = _e; } } -inline flatbuffers::Offset DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDequantizeOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset PackOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreatePackOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreatePackOptions(::flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDequantizeOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _values_count = _o->values_count; + auto _axis = _o->axis; + return tflite::CreatePackOptions( + _fbb, + _values_count, + _axis); } -inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MaximumMinimumOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LogicalOrOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMaximumMinimumOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LogicalOrOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalOrOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLogicalOrOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMaximumMinimumOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalOrOptions( _fbb); } -inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TileOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline OneHotOptionsT *OneHotOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new OneHotOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void TileOptions::UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = axis(); _o->axis = _e; } } -inline flatbuffers::Offset TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTileOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset OneHotOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateOneHotOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateOneHotOptions(::flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTileOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + return tflite::CreateOneHotOptions( + _fbb, + _axis); } -inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline AbsOptionsT *AbsOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new AbsOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; } } -inline flatbuffers::Offset ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMaxOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset AbsOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateAbsOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateAbsOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMaxOptions( - _fbb, - _output_type); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateAbsOptions( + _fbb); } -inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMinOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline HardSwishOptionsT *HardSwishOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HardSwishOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; } } -inline flatbuffers::Offset ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMinOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset HardSwishOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateHardSwishOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateHardSwishOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMinOptions( - _fbb, - _output_type); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHardSwishOptions( + _fbb); } -inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GreaterOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LogicalAndOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LogicalAndOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalAndOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLogicalAndOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalAndOptions( _fbb); } -inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GreaterEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LogicalNotOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterEqualOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LogicalNotOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalNotOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLogicalNotOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterEqualOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalNotOptions( _fbb); } -inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LessOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline UnpackOptionsT *UnpackOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnpackOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LessOptions::UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = num(); _o->num = _e; } + { auto _e = axis(); _o->axis = _e; } } -inline flatbuffers::Offset LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset UnpackOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnpackOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateUnpackOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num = _o->num; + auto _axis = _o->axis; + return tflite::CreateUnpackOptions( + _fbb, + _num, + _axis); } -inline LessEqualOptionsT *LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LessEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline FloorDivOptionsT *FloorDivOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FloorDivOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessEqualOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset FloorDivOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateFloorDivOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateFloorDivOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessEqualOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFloorDivOptions( _fbb); } -inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NegOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SquareOptionsT *SquareOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SquareOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void NegOptions::UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNegOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SquareOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSquareOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSquareOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNegOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSquareOptions( _fbb); } -inline SelectOptionsT *SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SelectOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ZerosLikeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ZerosLikeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateZerosLikeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateZerosLikeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateZerosLikeOptions( _fbb); } -inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SliceOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline FillOptionsT *FillOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FillOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void FillOptions::UnPackTo(FillOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSliceOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset FillOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateFillOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateFillOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSliceOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFillOptions( _fbb); } -inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeConvOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline FloorModOptionsT *FloorModOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FloorModOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } } -inline flatbuffers::Offset TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeConvOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset FloorModOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateFloorModOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateFloorModOptions(::flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - return tflite::CreateTransposeConvOptions( - _fbb, - _padding, - _stride_w, - _stride_h); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFloorModOptions( + _fbb); } -inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpandDimsOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline RangeOptionsT *RangeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RangeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpandDimsOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset RangeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateRangeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateRangeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpandDimsOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRangeOptions( _fbb); } -inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SparseToDenseOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LeakyReluOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = validate_indices(); _o->validate_indices = _e; } + { auto _e = alpha(); _o->alpha = _e; } } -inline flatbuffers::Offset SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparseToDenseOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset LeakyReluOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateLeakyReluOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateLeakyReluOptions(::flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _validate_indices = _o->validate_indices; - return tflite::CreateSparseToDenseOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _alpha = _o->alpha; + return tflite::CreateLeakyReluOptions( _fbb, - _validate_indices); + _alpha); } -inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SquaredDifferenceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEqualOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SquaredDifferenceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSquaredDifferenceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateEqualOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSquaredDifferenceOptions( _fbb); } -inline NotEqualOptionsT *NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NotEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MirrorPadOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = mode(); _o->mode = _e; } } -inline flatbuffers::Offset NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNotEqualOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset MirrorPadOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateMirrorPadOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateMirrorPadOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNotEqualOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _mode = _o->mode; + return tflite::CreateMirrorPadOptions( + _fbb, + _mode); } -inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ShapeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline UniqueOptionsT *UniqueOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UniqueOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = out_type(); _o->out_type = _e; } + { auto _e = idx_out_type(); _o->idx_out_type = _e; } } -inline flatbuffers::Offset ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateShapeOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset UniqueOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUniqueOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateUniqueOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _out_type = _o->out_type; - return tflite::CreateShapeOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _idx_out_type = _o->idx_out_type; + return tflite::CreateUniqueOptions( _fbb, - _out_type); + _idx_out_type); } -inline RankOptionsT *RankOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RankOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ReverseV2OptionsT *ReverseV2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReverseV2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void RankOptions::UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset RankOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRankOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ReverseV2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateReverseV2Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateReverseV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRankOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateReverseV2Options( _fbb); } -inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PowOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline AddNOptionsT *AddNOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new AddNOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePowOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset AddNOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateAddNOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateAddNOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePowOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateAddNOptions( _fbb); } -inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FakeQuantOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline GatherNdOptionsT *GatherNdOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GatherNdOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = min(); _o->min = _e; } - { auto _e = max(); _o->max = _e; } - { auto _e = num_bits(); _o->num_bits = _e; } - { auto _e = narrow_range(); _o->narrow_range = _e; } } -inline flatbuffers::Offset FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFakeQuantOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset GatherNdOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateGatherNdOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateGatherNdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min; - auto _max = _o->max; - auto _num_bits = _o->num_bits; - auto _narrow_range = _o->narrow_range; - return tflite::CreateFakeQuantOptions( - _fbb, - _min, - _max, - _num_bits, - _narrow_range); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGatherNdOptions( + _fbb); } -inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PackOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline WhereOptionsT *WhereOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new WhereOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = values_count(); _o->values_count = _e; } - { auto _e = axis(); _o->axis = _e; } } -inline flatbuffers::Offset PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePackOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset WhereOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateWhereOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateWhereOptions(::flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values_count = _o->values_count; - auto _axis = _o->axis; - return tflite::CreatePackOptions( - _fbb, - _values_count, - _axis); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateWhereOptions( + _fbb); } -inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalOrOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReverseSequenceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = seq_dim(); _o->seq_dim = _e; } + { auto _e = batch_dim(); _o->batch_dim = _e; } } -inline flatbuffers::Offset LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalOrOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ReverseSequenceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateReverseSequenceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateReverseSequenceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalOrOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _seq_dim = _o->seq_dim; + auto _batch_dim = _o->batch_dim; + return tflite::CreateReverseSequenceOptions( + _fbb, + _seq_dim, + _batch_dim); } -inline OneHotOptionsT *OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OneHotOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MatrixDiagOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = axis(); _o->axis = _e; } } -inline flatbuffers::Offset OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOneHotOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset MatrixDiagOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateMatrixDiagOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateMatrixDiagOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return tflite::CreateOneHotOptions( - _fbb, - _axis); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMatrixDiagOptions( + _fbb); } -inline AbsOptionsT *AbsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AbsOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline QuantizeOptionsT *QuantizeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new QuantizeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset AbsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAbsOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset QuantizeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateQuantizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAbsOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateQuantizeOptions( _fbb); } -inline HardSwishOptionsT *HardSwishOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new HardSwishOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MatrixSetDiagOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset HardSwishOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHardSwishOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset MatrixSetDiagOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateMatrixSetDiagOptions(::flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHardSwishOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMatrixSetDiagOptions( _fbb); } -inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalAndOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline IfOptionsT *IfOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new IfOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void IfOptions::UnPackTo(IfOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = then_subgraph_index(); _o->then_subgraph_index = _e; } + { auto _e = else_subgraph_index(); _o->else_subgraph_index = _e; } } -inline flatbuffers::Offset LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalAndOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset IfOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateIfOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateIfOptions(::flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalAndOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _then_subgraph_index = _o->then_subgraph_index; + auto _else_subgraph_index = _o->else_subgraph_index; + return tflite::CreateIfOptions( + _fbb, + _then_subgraph_index, + _else_subgraph_index); } -inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalNotOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline CallOnceOptionsT *CallOnceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CallOnceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = init_subgraph_index(); _o->init_subgraph_index = _e; } } -inline flatbuffers::Offset LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalNotOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset CallOnceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateCallOnceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateCallOnceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalNotOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _init_subgraph_index = _o->init_subgraph_index; + return tflite::CreateCallOnceOptions( + _fbb, + _init_subgraph_index); } -inline UnpackOptionsT *UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UnpackOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline WhileOptionsT *WhileOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new WhileOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = num(); _o->num = _e; } - { auto _e = axis(); _o->axis = _e; } + { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; } + { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } } -inline flatbuffers::Offset UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnpackOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset WhileOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateWhileOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateWhileOptions(::flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num = _o->num; - auto _axis = _o->axis; - return tflite::CreateUnpackOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _cond_subgraph_index = _o->cond_subgraph_index; + auto _body_subgraph_index = _o->body_subgraph_index; + return tflite::CreateWhileOptions( _fbb, - _num, - _axis); + _cond_subgraph_index, + _body_subgraph_index); } -inline FloorDivOptionsT *FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FloorDivOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new NonMaxSuppressionV4OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorDivOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset NonMaxSuppressionV4Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateNonMaxSuppressionV4Options(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorDivOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNonMaxSuppressionV4Options( _fbb); } -inline SquareOptionsT *SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SquareOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new NonMaxSuppressionV5OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquareOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset NonMaxSuppressionV5Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateNonMaxSuppressionV5Options(::flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquareOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNonMaxSuppressionV5Options( _fbb); } -inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ZerosLikeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ScatterNdOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateZerosLikeOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ScatterNdOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateScatterNdOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateScatterNdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateZerosLikeOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateScatterNdOptions( _fbb); } -inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FillOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SelectV2OptionsT *SelectV2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SelectV2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void FillOptions::UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFillOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SelectV2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSelectV2Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSelectV2Options(::flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFillOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SelectV2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSelectV2Options( _fbb); } -inline FloorModOptionsT *FloorModOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FloorModOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline DensifyOptionsT *DensifyOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DensifyOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset FloorModOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorModOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset DensifyOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateDensifyOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateDensifyOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorModOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DensifyOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateDensifyOptions( _fbb); } -inline RangeOptionsT *RangeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RangeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SegmentSumOptionsT *SegmentSumOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SegmentSumOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset RangeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRangeOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SegmentSumOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSegmentSumOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSegmentSumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRangeOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SegmentSumOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSegmentSumOptions( _fbb); } -inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LeakyReluOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline BatchMatMulOptionsT *BatchMatMulOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BatchMatMulOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = alpha(); _o->alpha = _e; } + { auto _e = adj_x(); _o->adj_x = _e; } + { auto _e = adj_y(); _o->adj_y = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } } -inline flatbuffers::Offset LeakyReluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLeakyReluOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset BatchMatMulOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBatchMatMulOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateBatchMatMulOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _alpha = _o->alpha; - return tflite::CreateLeakyReluOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _adj_x = _o->adj_x; + auto _adj_y = _o->adj_y; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateBatchMatMulOptions( _fbb, - _alpha); + _adj_x, + _adj_y, + _asymmetric_quantize_inputs); } -inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SquaredDifferenceOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline CumsumOptionsT *CumsumOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CumsumOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = exclusive(); _o->exclusive = _e; } + { auto _e = reverse(); _o->reverse = _e; } } -inline flatbuffers::Offset SquaredDifferenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset CumsumOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateCumsumOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateCumsumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquaredDifferenceOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _exclusive = _o->exclusive; + auto _reverse = _o->reverse; + return tflite::CreateCumsumOptions( + _fbb, + _exclusive, + _reverse); } -inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MirrorPadOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline BroadcastToOptionsT *BroadcastToOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BroadcastToOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = mode(); _o->mode = _e; } } -inline flatbuffers::Offset MirrorPadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMirrorPadOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset BroadcastToOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBroadcastToOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateBroadcastToOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _mode = _o->mode; - return tflite::CreateMirrorPadOptions( - _fbb, - _mode); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BroadcastToOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateBroadcastToOptions( + _fbb); } -inline UniqueOptionsT *UniqueOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UniqueOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline Rfft2dOptionsT *Rfft2dOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Rfft2dOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void Rfft2dOptions::UnPackTo(Rfft2dOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = idx_out_type(); _o->idx_out_type = _e; } } -inline flatbuffers::Offset UniqueOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUniqueOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset Rfft2dOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateRfft2dOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateRfft2dOptions(::flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _idx_out_type = _o->idx_out_type; - return tflite::CreateUniqueOptions( - _fbb, - _idx_out_type); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const Rfft2dOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRfft2dOptions( + _fbb); } -inline ReverseV2OptionsT *ReverseV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReverseV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline HashtableOptionsT *HashtableOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HashtableOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void HashtableOptions::UnPackTo(HashtableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = table_id(); _o->table_id = _e; } + { auto _e = key_dtype(); _o->key_dtype = _e; } + { auto _e = value_dtype(); _o->value_dtype = _e; } } -inline flatbuffers::Offset ReverseV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseV2Options(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset HashtableOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateHashtableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateReverseV2Options( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HashtableOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _table_id = _o->table_id; + auto _key_dtype = _o->key_dtype; + auto _value_dtype = _o->value_dtype; + return tflite::CreateHashtableOptions( + _fbb, + _table_id, + _key_dtype, + _value_dtype); } -inline AddNOptionsT *AddNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AddNOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline HashtableFindOptionsT *HashtableFindOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HashtableFindOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void HashtableFindOptions::UnPackTo(HashtableFindOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset AddNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddNOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset HashtableFindOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableFindOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateHashtableFindOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAddNOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HashtableFindOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableFindOptions( _fbb); } -inline GatherNdOptionsT *GatherNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherNdOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline HashtableImportOptionsT *HashtableImportOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HashtableImportOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void HashtableImportOptions::UnPackTo(HashtableImportOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset GatherNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherNdOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset HashtableImportOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableImportOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateHashtableImportOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGatherNdOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HashtableImportOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableImportOptions( _fbb); } -inline WhereOptionsT *WhereOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new WhereOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline HashtableSizeOptionsT *HashtableSizeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HashtableSizeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void HashtableSizeOptions::UnPackTo(HashtableSizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset WhereOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateWhereOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset HashtableSizeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableSizeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateHashtableSizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateWhereOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HashtableSizeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableSizeOptions( _fbb); } -inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReverseSequenceOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline VarHandleOptionsT *VarHandleOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new VarHandleOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void VarHandleOptions::UnPackTo(VarHandleOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = seq_dim(); _o->seq_dim = _e; } - { auto _e = batch_dim(); _o->batch_dim = _e; } + { auto _e = container(); if (_e) _o->container = _e->str(); } + { auto _e = shared_name(); if (_e) _o->shared_name = _e->str(); } } -inline flatbuffers::Offset ReverseSequenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseSequenceOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset VarHandleOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateVarHandleOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateVarHandleOptions(::flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _seq_dim = _o->seq_dim; - auto _batch_dim = _o->batch_dim; - return tflite::CreateReverseSequenceOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const VarHandleOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _container = _o->container.empty() ? 0 : _fbb.CreateString(_o->container); + auto _shared_name = _o->shared_name.empty() ? 0 : _fbb.CreateString(_o->shared_name); + return tflite::CreateVarHandleOptions( _fbb, - _seq_dim, - _batch_dim); + _container, + _shared_name); } -inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MatrixDiagOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ReadVariableOptionsT *ReadVariableOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReadVariableOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ReadVariableOptions::UnPackTo(ReadVariableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset MatrixDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatrixDiagOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ReadVariableOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateReadVariableOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateReadVariableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMatrixDiagOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReadVariableOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateReadVariableOptions( _fbb); } -inline QuantizeOptionsT *QuantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline AssignVariableOptionsT *AssignVariableOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new AssignVariableOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void AssignVariableOptions::UnPackTo(AssignVariableOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset QuantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizeOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset AssignVariableOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateAssignVariableOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateAssignVariableOptions(::flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateQuantizeOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const AssignVariableOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateAssignVariableOptions( _fbb); } -inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MatrixSetDiagOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline RandomOptionsT *RandomOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RandomOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void RandomOptions::UnPackTo(RandomOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = seed(); _o->seed = _e; } + { auto _e = seed2(); _o->seed2 = _e; } } -inline flatbuffers::Offset MatrixSetDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset RandomOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateRandomOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateRandomOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMatrixSetDiagOptions( - _fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RandomOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _seed = _o->seed; + auto _seed2 = _o->seed2; + return tflite::CreateRandomOptions( + _fbb, + _seed, + _seed2); } -inline IfOptionsT *IfOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new IfOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline BucketizeOptionsT *BucketizeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BucketizeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void IfOptions::UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BucketizeOptions::UnPackTo(BucketizeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = then_subgraph_index(); _o->then_subgraph_index = _e; } - { auto _e = else_subgraph_index(); _o->else_subgraph_index = _e; } + { auto _e = boundaries(); if (_e) { _o->boundaries.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->boundaries[_i] = _e->Get(_i); } } else { _o->boundaries.resize(0); } } } -inline flatbuffers::Offset IfOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateIfOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset BucketizeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBucketizeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateBucketizeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _then_subgraph_index = _o->then_subgraph_index; - auto _else_subgraph_index = _o->else_subgraph_index; - return tflite::CreateIfOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BucketizeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _boundaries = _o->boundaries.size() ? _fbb.CreateVector(_o->boundaries) : 0; + return tflite::CreateBucketizeOptions( _fbb, - _then_subgraph_index, - _else_subgraph_index); + _boundaries); } -inline CallOnceOptionsT *CallOnceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CallOnceOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline GeluOptionsT *GeluOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GeluOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void GeluOptions::UnPackTo(GeluOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = init_subgraph_index(); _o->init_subgraph_index = _e; } + { auto _e = approximate(); _o->approximate = _e; } } -inline flatbuffers::Offset CallOnceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOnceOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset GeluOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateGeluOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateGeluOptions(::flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _init_subgraph_index = _o->init_subgraph_index; - return tflite::CreateCallOnceOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GeluOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _approximate = _o->approximate; + return tflite::CreateGeluOptions( _fbb, - _init_subgraph_index); + _approximate); } -inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new WhileOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline DynamicUpdateSliceOptionsT *DynamicUpdateSliceOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DynamicUpdateSliceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DynamicUpdateSliceOptions::UnPackTo(DynamicUpdateSliceOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; } - { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } } -inline flatbuffers::Offset WhileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateWhileOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset DynamicUpdateSliceOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateDynamicUpdateSliceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateDynamicUpdateSliceOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _cond_subgraph_index = _o->cond_subgraph_index; - auto _body_subgraph_index = _o->body_subgraph_index; - return tflite::CreateWhileOptions( - _fbb, - _cond_subgraph_index, - _body_subgraph_index); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DynamicUpdateSliceOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateDynamicUpdateSliceOptions( + _fbb); +} + +inline UnsortedSegmentProdOptionsT *UnsortedSegmentProdOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnsortedSegmentProdOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void UnsortedSegmentProdOptions::UnPackTo(UnsortedSegmentProdOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline ::flatbuffers::Offset UnsortedSegmentProdOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnsortedSegmentProdOptions(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateUnsortedSegmentProdOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentProdOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateUnsortedSegmentProdOptions( + _fbb); } -inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NonMaxSuppressionV4OptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline UnsortedSegmentMaxOptionsT *UnsortedSegmentMaxOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnsortedSegmentMaxOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void UnsortedSegmentMaxOptions::UnPackTo(UnsortedSegmentMaxOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset NonMaxSuppressionV4Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset UnsortedSegmentMaxOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnsortedSegmentMaxOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateUnsortedSegmentMaxOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNonMaxSuppressionV4Options( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMaxOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateUnsortedSegmentMaxOptions( _fbb); } -inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NonMaxSuppressionV5OptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline UnsortedSegmentSumOptionsT *UnsortedSegmentSumOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnsortedSegmentSumOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void UnsortedSegmentSumOptions::UnPackTo(UnsortedSegmentSumOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset NonMaxSuppressionV5Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset UnsortedSegmentSumOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnsortedSegmentSumOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateUnsortedSegmentSumOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNonMaxSuppressionV5Options( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentSumOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateUnsortedSegmentSumOptions( _fbb); } -inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ScatterNdOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ATan2OptionsT *ATan2Options::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ATan2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ATan2Options::UnPackTo(ATan2OptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset ScatterNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateScatterNdOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ATan2Options::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateATan2Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateATan2Options(::flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateScatterNdOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ATan2OptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateATan2Options( _fbb); } -inline SelectV2OptionsT *SelectV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SelectV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline UnsortedSegmentMinOptionsT *UnsortedSegmentMinOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnsortedSegmentMinOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void UnsortedSegmentMinOptions::UnPackTo(UnsortedSegmentMinOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset SelectV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectV2Options(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset UnsortedSegmentMinOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnsortedSegmentMinOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateUnsortedSegmentMinOptions(::flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectV2Options( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMinOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateUnsortedSegmentMinOptions( _fbb); } -inline DensifyOptionsT *DensifyOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DensifyOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline SignOptionsT *SignOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SignOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SignOptions::UnPackTo(SignOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset DensifyOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDensifyOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset SignOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSignOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSignOptions(::flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DensifyOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDensifyOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SignOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSignOptions( _fbb); } -inline SegmentSumOptionsT *SegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SegmentSumOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline BitcastOptionsT *BitcastOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BitcastOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BitcastOptions::UnPackTo(BitcastOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset SegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSegmentSumOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset BitcastOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBitcastOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateBitcastOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BitcastOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSegmentSumOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BitcastOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateBitcastOptions( _fbb); } -inline BatchMatMulOptionsT *BatchMatMulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchMatMulOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline BitwiseXorOptionsT *BitwiseXorOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BitwiseXorOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BitwiseXorOptions::UnPackTo(BitwiseXorOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = adj_x(); _o->adj_x = _e; } - { auto _e = adj_y(); _o->adj_y = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } } -inline flatbuffers::Offset BatchMatMulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchMatMulOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset BitwiseXorOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBitwiseXorOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateBitwiseXorOptions(::flatbuffers::FlatBufferBuilder &_fbb, const BitwiseXorOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _adj_x = _o->adj_x; - auto _adj_y = _o->adj_y; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBatchMatMulOptions( - _fbb, - _adj_x, - _adj_y, - _asymmetric_quantize_inputs); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BitwiseXorOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateBitwiseXorOptions( + _fbb); } -inline CumsumOptionsT *CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CumsumOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline RightShiftOptionsT *RightShiftOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RightShiftOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void RightShiftOptions::UnPackTo(RightShiftOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = exclusive(); _o->exclusive = _e; } - { auto _e = reverse(); _o->reverse = _e; } } -inline flatbuffers::Offset CumsumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCumsumOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset RightShiftOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateRightShiftOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateRightShiftOptions(::flatbuffers::FlatBufferBuilder &_fbb, const RightShiftOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _exclusive = _o->exclusive; - auto _reverse = _o->reverse; - return tflite::CreateCumsumOptions( - _fbb, - _exclusive, - _reverse); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RightShiftOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRightShiftOptions( + _fbb); } -inline BroadcastToOptionsT *BroadcastToOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BroadcastToOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline DilateOptionsT *DilateOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DilateOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DilateOptions::UnPackTo(DilateOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset BroadcastToOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBroadcastToOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset DilateOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const DilateOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateDilateOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateDilateOptions(::flatbuffers::FlatBufferBuilder &_fbb, const DilateOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BroadcastToOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBroadcastToOptions( + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const DilateOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateDilateOptions( _fbb); } -inline Rfft2dOptionsT *Rfft2dOptions::UnPack( - const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Rfft2dOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline ReduceWindowOptionsT *ReduceWindowOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReduceWindowOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Rfft2dOptions::UnPackTo( - Rfft2dOptionsT *_o, - const flatbuffers::resolver_function_t *_resolver) const { +inline void ReduceWindowOptions::UnPackTo(ReduceWindowOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = reduce_function(); _o->reduce_function = _e; } } -inline flatbuffers::Offset Rfft2dOptions::Pack( - flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, - const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRfft2dOptions(_fbb, _o, _rehasher); +inline ::flatbuffers::Offset ReduceWindowOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ReduceWindowOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateReduceWindowOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateRfft2dOptions( - flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, - const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateReduceWindowOptions(::flatbuffers::FlatBufferBuilder &_fbb, const ReduceWindowOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { - flatbuffers::FlatBufferBuilder *__fbb; - const Rfft2dOptionsT *__o; - const flatbuffers::rehasher_function_t *__rehasher; - } _va = {&_fbb, _o, _rehasher}; - (void)_va; - return tflite::CreateRfft2dOptions(_fbb); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ReduceWindowOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _reduce_function = _o->reduce_function; + return tflite::CreateReduceWindowOptions( + _fbb, + _reduce_function); } -inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OperatorCodeT(); - UnPackTo(_o, _resolver); - return _o; +inline OperatorCodeT *OperatorCode::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new OperatorCodeT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = deprecated_builtin_code(); _o->deprecated_builtin_code = _e; } @@ -14378,14 +21382,14 @@ inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolve { auto _e = builtin_code(); _o->builtin_code = _e; } } -inline flatbuffers::Offset OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset OperatorCode::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { return CreateOperatorCode(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateOperatorCode(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _deprecated_builtin_code = _o->deprecated_builtin_code; auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code); auto _version = _o->version; @@ -14398,34 +21402,100 @@ inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBuf _builtin_code); } -inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OperatorT(); - UnPackTo(_o, _resolver); - return _o; +inline StableHLOCompositeOptionsT *StableHLOCompositeOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StableHLOCompositeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void StableHLOCompositeOptions::UnPackTo(StableHLOCompositeOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = name(); if (_e) _o->name = _e->str(); } + { auto _e = decomposition_subgraph_index(); _o->decomposition_subgraph_index = _e; } + { auto _e = composite_attributes(); if (_e) { _o->composite_attributes.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->composite_attributes.begin()); } } + { auto _e = composite_attributes_format(); _o->composite_attributes_format = _e; } + { auto _e = version(); _o->version = _e; } +} + +inline ::flatbuffers::Offset StableHLOCompositeOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StableHLOCompositeOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStableHLOCompositeOptions(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateStableHLOCompositeOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StableHLOCompositeOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StableHLOCompositeOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _decomposition_subgraph_index = _o->decomposition_subgraph_index; + auto _composite_attributes = _o->composite_attributes.size() ? _fbb.CreateVector(_o->composite_attributes) : 0; + auto _composite_attributes_format = _o->composite_attributes_format; + auto _version = _o->version; + return tflite::CreateStableHLOCompositeOptions( + _fbb, + _name, + _decomposition_subgraph_index, + _composite_attributes, + _composite_attributes_format, + _version); +} + +inline StablehloShiftLeftOptionsT *StablehloShiftLeftOptions::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StablehloShiftLeftOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void StablehloShiftLeftOptions::UnPackTo(StablehloShiftLeftOptionsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline ::flatbuffers::Offset StablehloShiftLeftOptions::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloShiftLeftOptionsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateStablehloShiftLeftOptions(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateStablehloShiftLeftOptions(::flatbuffers::FlatBufferBuilder &_fbb, const StablehloShiftLeftOptionsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StablehloShiftLeftOptionsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateStablehloShiftLeftOptions( + _fbb); +} + +inline OperatorT *Operator::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new OperatorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void Operator::UnPackTo(OperatorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = opcode_index(); _o->opcode_index = _e; } - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } + { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } else { _o->inputs.resize(0); } } + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } else { _o->outputs.resize(0); } } { auto _e = builtin_options_type(); _o->builtin_options.type = _e; } { auto _e = builtin_options(); if (_e) _o->builtin_options.value = tflite::BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); } { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom_options.begin()); } } { auto _e = custom_options_format(); _o->custom_options_format = _e; } - { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } } - { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } } + { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } else { _o->mutating_variable_inputs.resize(0); } } + { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } else { _o->intermediates.resize(0); } } + { auto _e = large_custom_options_offset(); _o->large_custom_options_offset = _e; } + { auto _e = large_custom_options_size(); _o->large_custom_options_size = _e; } + { auto _e = builtin_options_2_type(); _o->builtin_options_2.type = _e; } + { auto _e = builtin_options_2(); if (_e) _o->builtin_options_2.value = tflite::BuiltinOptions2Union::UnPack(_e, builtin_options_2_type(), _resolver); } + { auto _e = debug_metadata_index(); _o->debug_metadata_index = _e; } } -inline flatbuffers::Offset Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset Operator::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { return CreateOperator(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateOperator(::flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _opcode_index = _o->opcode_index; auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; @@ -14435,6 +21505,11 @@ inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuild auto _custom_options_format = _o->custom_options_format; auto _mutating_variable_inputs = _o->mutating_variable_inputs.size() ? _fbb.CreateVector(_o->mutating_variable_inputs) : 0; auto _intermediates = _o->intermediates.size() ? _fbb.CreateVector(_o->intermediates) : 0; + auto _large_custom_options_offset = _o->large_custom_options_offset; + auto _large_custom_options_size = _o->large_custom_options_size; + auto _builtin_options_2_type = _o->builtin_options_2.type; + auto _builtin_options_2 = _o->builtin_options_2.Pack(_fbb); + auto _debug_metadata_index = _o->debug_metadata_index; return tflite::CreateOperator( _fbb, _opcode_index, @@ -14445,95 +21520,130 @@ inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuild _custom_options, _custom_options_format, _mutating_variable_inputs, - _intermediates); -} - -inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SubGraphT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const { + _intermediates, + _large_custom_options_offset, + _large_custom_options_size, + _builtin_options_2_type, + _builtin_options_2, + _debug_metadata_index); +} + +inline SubGraphT::SubGraphT(const SubGraphT &o) + : inputs(o.inputs), + outputs(o.outputs), + name(o.name), + debug_metadata_index(o.debug_metadata_index) { + tensors.reserve(o.tensors.size()); + for (const auto &tensors_ : o.tensors) { tensors.emplace_back((tensors_) ? new tflite::TensorT(*tensors_) : nullptr); } + operators.reserve(o.operators.size()); + for (const auto &operators_ : o.operators) { operators.emplace_back((operators_) ? new tflite::OperatorT(*operators_) : nullptr); } +} + +inline SubGraphT &SubGraphT::operator=(SubGraphT o) FLATBUFFERS_NOEXCEPT { + std::swap(tensors, o.tensors); + std::swap(inputs, o.inputs); + std::swap(outputs, o.outputs); + std::swap(operators, o.operators); + std::swap(name, o.name); + std::swap(debug_metadata_index, o.debug_metadata_index); + return *this; +} + +inline SubGraphT *SubGraph::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SubGraphT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SubGraph::UnPackTo(SubGraphT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } - { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operators[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } + { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->tensors[_i]) { _e->Get(_i)->UnPackTo(_o->tensors[_i].get(), _resolver); } else { _o->tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->tensors.resize(0); } } + { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } else { _o->inputs.resize(0); } } + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } else { _o->outputs.resize(0); } } + { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operators[_i]) { _e->Get(_i)->UnPackTo(_o->operators[_i].get(), _resolver); } else { _o->operators[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->operators.resize(0); } } { auto _e = name(); if (_e) _o->name = _e->str(); } + { auto _e = debug_metadata_index(); _o->debug_metadata_index = _e; } } -inline flatbuffers::Offset SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset SubGraph::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { return CreateSubGraph(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSubGraph(::flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _tensors = _o->tensors.size() ? _fbb.CreateVector> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _tensors = _o->tensors.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0; auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _operators = _o->operators.size() ? _fbb.CreateVector> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _operators = _o->operators.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0; auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _debug_metadata_index = _o->debug_metadata_index; return tflite::CreateSubGraph( _fbb, _tensors, _inputs, _outputs, _operators, - _name); + _name, + _debug_metadata_index); } -inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BufferT(); - UnPackTo(_o, _resolver); - return _o; +inline BufferT *Buffer::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BufferT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void Buffer::UnPackTo(BufferT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } } + { auto _e = offset(); _o->offset = _e; } + { auto _e = size(); _o->size = _e; } } -inline flatbuffers::Offset Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset Buffer::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { return CreateBuffer(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateBuffer(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; _fbb.ForceVectorAlignment(_o->data.size(), sizeof(uint8_t), 16); auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; + auto _offset = _o->offset; + auto _size = _o->size; return tflite::CreateBuffer( _fbb, - _data); + _data, + _offset, + _size); } -inline MetadataT *Metadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MetadataT(); - UnPackTo(_o, _resolver); - return _o; +inline MetadataT *Metadata::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MetadataT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void Metadata::UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void Metadata::UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = name(); if (_e) _o->name = _e->str(); } { auto _e = buffer(); _o->buffer = _e; } } -inline flatbuffers::Offset Metadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset Metadata::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { return CreateMetadata(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateMetadata(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); auto _buffer = _o->buffer; return tflite::CreateMetadata( @@ -14542,27 +21652,27 @@ inline flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuild _buffer); } -inline TensorMapT *TensorMap::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorMapT(); - UnPackTo(_o, _resolver); - return _o; +inline TensorMapT *TensorMap::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TensorMapT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void TensorMap::UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void TensorMap::UnPackTo(TensorMapT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = name(); if (_e) _o->name = _e->str(); } { auto _e = tensor_index(); _o->tensor_index = _e; } } -inline flatbuffers::Offset TensorMap::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset TensorMap::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { return CreateTensorMap(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateTensorMap(::flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorMapT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TensorMapT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); auto _tensor_index = _o->tensor_index; return tflite::CreateTensorMap( @@ -14571,76 +21681,121 @@ inline flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBui _tensor_index); } -inline SignatureDefT *SignatureDef::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SignatureDefT(); - UnPackTo(_o, _resolver); - return _o; +inline SignatureDefT::SignatureDefT(const SignatureDefT &o) + : signature_key(o.signature_key), + subgraph_index(o.subgraph_index) { + inputs.reserve(o.inputs.size()); + for (const auto &inputs_ : o.inputs) { inputs.emplace_back((inputs_) ? new tflite::TensorMapT(*inputs_) : nullptr); } + outputs.reserve(o.outputs.size()); + for (const auto &outputs_ : o.outputs) { outputs.emplace_back((outputs_) ? new tflite::TensorMapT(*outputs_) : nullptr); } +} + +inline SignatureDefT &SignatureDefT::operator=(SignatureDefT o) FLATBUFFERS_NOEXCEPT { + std::swap(inputs, o.inputs); + std::swap(outputs, o.outputs); + std::swap(signature_key, o.signature_key); + std::swap(subgraph_index, o.subgraph_index); + return *this; +} + +inline SignatureDefT *SignatureDef::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SignatureDefT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); } -inline void SignatureDef::UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SignatureDef::UnPackTo(SignatureDefT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = method_name(); if (_e) _o->method_name = _e->str(); } - { auto _e = key(); if (_e) _o->key = _e->str(); } + { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->inputs[_i]) { _e->Get(_i)->UnPackTo(_o->inputs[_i].get(), _resolver); } else { _o->inputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->inputs.resize(0); } } + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->outputs[_i]) { _e->Get(_i)->UnPackTo(_o->outputs[_i].get(), _resolver); } else { _o->outputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->outputs.resize(0); } } + { auto _e = signature_key(); if (_e) _o->signature_key = _e->str(); } + { auto _e = subgraph_index(); _o->subgraph_index = _e; } } -inline flatbuffers::Offset SignatureDef::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset SignatureDef::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { return CreateSignatureDef(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateSignatureDef(::flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignatureDefT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector> (_o->inputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _method_name = _o->method_name.empty() ? 0 : _fbb.CreateString(_o->method_name); - auto _key = _o->key.empty() ? 0 : _fbb.CreateString(_o->key); + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SignatureDefT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _inputs = _o->inputs.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->inputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _outputs = _o->outputs.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _signature_key = _o->signature_key.empty() ? 0 : _fbb.CreateString(_o->signature_key); + auto _subgraph_index = _o->subgraph_index; return tflite::CreateSignatureDef( _fbb, _inputs, _outputs, - _method_name, - _key); -} - -inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ModelT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const { + _signature_key, + _subgraph_index); +} + +inline ModelT::ModelT(const ModelT &o) + : version(o.version), + description(o.description), + metadata_buffer(o.metadata_buffer) { + operator_codes.reserve(o.operator_codes.size()); + for (const auto &operator_codes_ : o.operator_codes) { operator_codes.emplace_back((operator_codes_) ? new tflite::OperatorCodeT(*operator_codes_) : nullptr); } + subgraphs.reserve(o.subgraphs.size()); + for (const auto &subgraphs_ : o.subgraphs) { subgraphs.emplace_back((subgraphs_) ? new tflite::SubGraphT(*subgraphs_) : nullptr); } + buffers.reserve(o.buffers.size()); + for (const auto &buffers_ : o.buffers) { buffers.emplace_back((buffers_) ? new tflite::BufferT(*buffers_) : nullptr); } + metadata.reserve(o.metadata.size()); + for (const auto &metadata_ : o.metadata) { metadata.emplace_back((metadata_) ? new tflite::MetadataT(*metadata_) : nullptr); } + signature_defs.reserve(o.signature_defs.size()); + for (const auto &signature_defs_ : o.signature_defs) { signature_defs.emplace_back((signature_defs_) ? new tflite::SignatureDefT(*signature_defs_) : nullptr); } +} + +inline ModelT &ModelT::operator=(ModelT o) FLATBUFFERS_NOEXCEPT { + std::swap(version, o.version); + std::swap(operator_codes, o.operator_codes); + std::swap(subgraphs, o.subgraphs); + std::swap(description, o.description); + std::swap(buffers, o.buffers); + std::swap(metadata_buffer, o.metadata_buffer); + std::swap(metadata, o.metadata); + std::swap(signature_defs, o.signature_defs); + return *this; +} + +inline ModelT *Model::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ModelT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Model::UnPackTo(ModelT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = version(); _o->version = _e; } - { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operator_codes[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } + { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operator_codes[_i]) { _e->Get(_i)->UnPackTo(_o->operator_codes[_i].get(), _resolver); } else { _o->operator_codes[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->operator_codes.resize(0); } } + { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->subgraphs.resize(0); } } { auto _e = description(); if (_e) _o->description = _e->str(); } - { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } } - { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = signature_defs(); if (_e) { _o->signature_defs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->signature_defs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } + { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->buffers[_i]) { _e->Get(_i)->UnPackTo(_o->buffers[_i].get(), _resolver); } else { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->buffers.resize(0); } } + { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } else { _o->metadata_buffer.resize(0); } } + { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->metadata[_i]) { _e->Get(_i)->UnPackTo(_o->metadata[_i].get(), _resolver); } else { _o->metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->metadata.resize(0); } } + { auto _e = signature_defs(); if (_e) { _o->signature_defs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->signature_defs[_i]) { _e->Get(_i)->UnPackTo(_o->signature_defs[_i].get(), _resolver); } else { _o->signature_defs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->signature_defs.resize(0); } } } -inline flatbuffers::Offset Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset Model::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { return CreateModel(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline ::flatbuffers::Offset CreateModel(::flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _version = _o->version; - auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description); - auto _buffers = _o->buffers.size() ? _fbb.CreateVector> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _buffers = _o->buffers.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0; - auto _metadata = _o->metadata.size() ? _fbb.CreateVector> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _signature_defs = _o->signature_defs.size() ? _fbb.CreateVector> (_o->signature_defs.size(), [](size_t i, _VectorArgs *__va) { return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _metadata = _o->metadata.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _signature_defs = _o->signature_defs.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->signature_defs.size(), [](size_t i, _VectorArgs *__va) { return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(), __va->__rehasher); }, &_va ) : 0; return tflite::CreateModel( _fbb, _version, @@ -14653,2446 +21808,3446 @@ inline flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_f _signature_defs); } -inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) { +inline bool VerifyQuantizationDetails(::flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) { + switch (type) { + case QuantizationDetails_NONE: { + return true; + } + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case QuantizationDetails_BlockwiseQuantization: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return true; + } +} + +inline bool VerifyQuantizationDetailsVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyQuantizationDetails( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const ::flatbuffers::resolver_function_t *resolver) { + (void)resolver; + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case QuantizationDetails_BlockwiseQuantization: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; + } +} + +inline ::flatbuffers::Offset QuantizationDetailsUnion::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher) const { + (void)_rehasher; + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(value); + return CreateCustomQuantization(_fbb, ptr, _rehasher).Union(); + } + case QuantizationDetails_BlockwiseQuantization: { + auto ptr = reinterpret_cast(value); + return CreateBlockwiseQuantization(_fbb, ptr, _rehasher).Union(); + } + default: return 0; + } +} + +inline QuantizationDetailsUnion::QuantizationDetailsUnion(const QuantizationDetailsUnion &u) : type(u.type), value(nullptr) { + switch (type) { + case QuantizationDetails_CustomQuantization: { + value = new tflite::CustomQuantizationT(*reinterpret_cast(u.value)); + break; + } + case QuantizationDetails_BlockwiseQuantization: { + value = new tflite::BlockwiseQuantizationT(*reinterpret_cast(u.value)); + break; + } + default: + break; + } +} + +inline void QuantizationDetailsUnion::Reset() { + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case QuantizationDetails_BlockwiseQuantization: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + default: break; + } + value = nullptr; + type = QuantizationDetails_NONE; +} + +inline bool VerifySparseIndexVector(::flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type) { + switch (type) { + case SparseIndexVector_NONE: { + return true; + } + case SparseIndexVector_Int32Vector: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case SparseIndexVector_Uint16Vector: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case SparseIndexVector_Uint8Vector: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return true; + } +} + +inline bool VerifySparseIndexVectorVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifySparseIndexVector( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type, const ::flatbuffers::resolver_function_t *resolver) { + (void)resolver; + switch (type) { + case SparseIndexVector_Int32Vector: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case SparseIndexVector_Uint16Vector: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case SparseIndexVector_Uint8Vector: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; + } +} + +inline ::flatbuffers::Offset SparseIndexVectorUnion::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher) const { + (void)_rehasher; + switch (type) { + case SparseIndexVector_Int32Vector: { + auto ptr = reinterpret_cast(value); + return CreateInt32Vector(_fbb, ptr, _rehasher).Union(); + } + case SparseIndexVector_Uint16Vector: { + auto ptr = reinterpret_cast(value); + return CreateUint16Vector(_fbb, ptr, _rehasher).Union(); + } + case SparseIndexVector_Uint8Vector: { + auto ptr = reinterpret_cast(value); + return CreateUint8Vector(_fbb, ptr, _rehasher).Union(); + } + default: return 0; + } +} + +inline SparseIndexVectorUnion::SparseIndexVectorUnion(const SparseIndexVectorUnion &u) : type(u.type), value(nullptr) { + switch (type) { + case SparseIndexVector_Int32Vector: { + value = new tflite::Int32VectorT(*reinterpret_cast(u.value)); + break; + } + case SparseIndexVector_Uint16Vector: { + value = new tflite::Uint16VectorT(*reinterpret_cast(u.value)); + break; + } + case SparseIndexVector_Uint8Vector: { + value = new tflite::Uint8VectorT(*reinterpret_cast(u.value)); + break; + } + default: + break; + } +} + +inline void SparseIndexVectorUnion::Reset() { + switch (type) { + case SparseIndexVector_Int32Vector: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case SparseIndexVector_Uint16Vector: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case SparseIndexVector_Uint8Vector: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + default: break; + } + value = nullptr; + type = SparseIndexVector_NONE; +} + +inline bool VerifyBuiltinOptions(::flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) { switch (type) { - case QuantizationDetails_NONE: { + case BuiltinOptions_NONE: { return true; } - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(obj); + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - default: return true; - } -} - -inline bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyQuantizationDetails( - verifier, values->Get(i), types->GetEnum(i))) { - return false; + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - } - return true; -} - -inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - default: return nullptr; - } -} - -inline flatbuffers::Offset QuantizationDetailsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(value); - return CreateCustomQuantization(_fbb, ptr, _rehasher).Union(); + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - default: return 0; - } -} - -inline QuantizationDetailsUnion::QuantizationDetailsUnion(const QuantizationDetailsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case QuantizationDetails_CustomQuantization: { - value = new tflite::CustomQuantizationT(*reinterpret_cast(u.value)); - break; + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - default: - break; - } -} - -inline void QuantizationDetailsUnion::Reset() { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - default: break; - } - value = nullptr; - type = QuantizationDetails_NONE; -} - -inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type) { - switch (type) { - case SparseIndexVector_NONE: { - return true; + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(obj); + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(obj); + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(obj); + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } - default: return true; - } -} - -inline bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifySparseIndexVector( - verifier, values->Get(i), types->GetEnum(i))) { - return false; + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DensifyOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SegmentSumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchMatMulOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CumsumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CallOnceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BroadcastToOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_VarHandleOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReadVariableOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AssignVariableOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RandomOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BucketizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GeluOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DynamicUpdateSliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - } - return true; -} - -inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_UnsortedSegmentProdOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_UnsortedSegmentMaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_UnsortedSegmentMinOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - default: return nullptr; - } -} - -inline flatbuffers::Offset SparseIndexVectorUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(value); - return CreateInt32Vector(_fbb, ptr, _rehasher).Union(); + case BuiltinOptions_UnsortedSegmentSumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(value); - return CreateUint16Vector(_fbb, ptr, _rehasher).Union(); + case BuiltinOptions_ATan2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(value); - return CreateUint8Vector(_fbb, ptr, _rehasher).Union(); + case BuiltinOptions_SignOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - default: return 0; - } -} - -inline SparseIndexVectorUnion::SparseIndexVectorUnion(const SparseIndexVectorUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case SparseIndexVector_Int32Vector: { - value = new tflite::Int32VectorT(*reinterpret_cast(u.value)); - break; + case BuiltinOptions_BitcastOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case SparseIndexVector_Uint16Vector: { - value = new tflite::Uint16VectorT(*reinterpret_cast(u.value)); - break; + case BuiltinOptions_BitwiseXorOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case SparseIndexVector_Uint8Vector: { - value = new tflite::Uint8VectorT(*reinterpret_cast(u.value)); - break; + case BuiltinOptions_RightShiftOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - default: - break; + default: return true; } } -inline void SparseIndexVectorUnion::Reset() { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; +inline bool VerifyBuiltinOptionsVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyBuiltinOptions( + verifier, values->Get(i), types->GetEnum(i))) { + return false; } - default: break; } - value = nullptr; - type = SparseIndexVector_NONE; + return true; } -inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) { +inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const ::flatbuffers::resolver_function_t *resolver) { + (void)resolver; switch (type) { - case BuiltinOptions_NONE: { - return true; - } case BuiltinOptions_Conv2DOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_DepthwiseConv2DOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ConcatEmbeddingsOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LSHProjectionOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_Pool2DOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SVDFOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_RNNOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_FullyConnectedOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SoftmaxOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ConcatenationOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_AddOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_L2NormOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LocalResponseNormalizationOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LSTMOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ResizeBilinearOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_CallOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ReshapeOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SkipGramOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SpaceToDepthOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_EmbeddingLookupSparseOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_MulOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_PadOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_GatherOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_BatchToSpaceNDOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SpaceToBatchNDOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_TransposeOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ReducerOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SubOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_DivOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SqueezeOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SequenceRNNOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_StridedSliceOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ExpOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_TopKV2Options: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SplitOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LogSoftmaxOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_CastOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_DequantizeOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_MaximumMinimumOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ArgMaxOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LessOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_NegOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_PadV2Options: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_GreaterOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_GreaterEqualOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LessEqualOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SelectOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SliceOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_TransposeConvOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SparseToDenseOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_TileOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ExpandDimsOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_EqualOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_NotEqualOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ShapeOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_PowOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ArgMinOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_FakeQuantOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_PackOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LogicalOrOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_OneHotOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LogicalAndOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LogicalNotOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_UnpackOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_FloorDivOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SquareOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ZerosLikeOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_FillOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_BidirectionalSequenceLSTMOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_BidirectionalSequenceRNNOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_FloorModOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_RangeOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ResizeNearestNeighborOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_LeakyReluOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SquaredDifferenceOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_MirrorPadOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_AbsOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SplitVOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_UniqueOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ReverseV2Options: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_AddNOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_GatherNdOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_CosOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_WhereOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_RankOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ReverseSequenceOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_MatrixDiagOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_QuantizeOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_MatrixSetDiagOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_HardSwishOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_IfOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_WhileOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_DepthToSpaceOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_NonMaxSuppressionV4Options: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_NonMaxSuppressionV5Options: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_ScatterNdOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SelectV2Options: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_DensifyOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_SegmentSumOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_BatchMatMulOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_CumsumOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_CallOnceOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_BroadcastToOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } case BuiltinOptions_Rfft2dOptions: { auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); + return ptr->UnPack(resolver); } - default: return true; - } -} - -inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyBuiltinOptions( - verifier, values->Get(i), types->GetEnum(i))) { - return false; + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_VarHandleOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReadVariableOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_AssignVariableOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RandomOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BucketizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); } + case BuiltinOptions_GeluOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DynamicUpdateSliceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnsortedSegmentProdOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnsortedSegmentMaxOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnsortedSegmentMinOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnsortedSegmentSumOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ATan2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SignOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BitcastOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BitwiseXorOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RightShiftOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; } - return true; } -inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) { +inline ::flatbuffers::Offset BuiltinOptionsUnion::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher) const { + (void)_rehasher; switch (type) { case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateConv2DOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreatePool2DOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSVDFOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateRNNOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateAddOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateL2NormOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLSTMOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateCallOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateReshapeOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateMulOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreatePadOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateGatherOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateTransposeOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateReducerOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSubOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateDivOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateExpOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateTopKV2Options(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSplitOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateCastOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLessOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(value); + return CreateNegOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreatePadV2Options(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateGreaterOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSelectOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSliceOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateTileOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateEqualOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateShapeOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreatePowOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateArgMinOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreatePackOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateOneHotOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateUnpackOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSquareOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateFillOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateBidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateBidirectionalSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateUnidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateFloorModOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateRangeOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateResizeNearestNeighborOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateLeakyReluOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSquaredDifferenceOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateMirrorPadOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateAbsOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateSplitVOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateUniqueOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateReverseV2Options(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateAddNOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateGatherNdOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateCosOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateWhereOptions(_fbb, ptr, _rehasher).Union(); } case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + auto ptr = reinterpret_cast(value); + return CreateRankOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(value); + return CreateReverseSequenceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(value); + return CreateMatrixDiagOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateQuantizeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(value); + return CreateMatrixSetDiagOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(value); + return CreateHardSwishOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(value); + return CreateIfOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(value); + return CreateWhileOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(value); + return CreateDepthToSpaceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(value); + return CreateNonMaxSuppressionV4Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(value); + return CreateNonMaxSuppressionV5Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(value); + return CreateScatterNdOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SelectV2Options: { + auto ptr = reinterpret_cast(value); + return CreateSelectV2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DensifyOptions: { + auto ptr = reinterpret_cast(value); + return CreateDensifyOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SegmentSumOptions: { + auto ptr = reinterpret_cast(value); + return CreateSegmentSumOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BatchMatMulOptions: { + auto ptr = reinterpret_cast(value); + return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CumsumOptions: { + auto ptr = reinterpret_cast(value); + return CreateCumsumOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CallOnceOptions: { + auto ptr = reinterpret_cast(value); + return CreateCallOnceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BroadcastToOptions: { + auto ptr = reinterpret_cast(value); + return CreateBroadcastToOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(value); + return CreateRfft2dOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(value); + return CreateConv3DOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableFindOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableImportOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableSizeOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_VarHandleOptions: { + auto ptr = reinterpret_cast(value); + return CreateVarHandleOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_ReadVariableOptions: { + auto ptr = reinterpret_cast(value); + return CreateReadVariableOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_AssignVariableOptions: { + auto ptr = reinterpret_cast(value); + return CreateAssignVariableOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_RandomOptions: { + auto ptr = reinterpret_cast(value); + return CreateRandomOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_BucketizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateBucketizeOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_GeluOptions: { + auto ptr = reinterpret_cast(value); + return CreateGeluOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_DynamicUpdateSliceOptions: { + auto ptr = reinterpret_cast(value); + return CreateDynamicUpdateSliceOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_UnsortedSegmentProdOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnsortedSegmentProdOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_UnsortedSegmentMaxOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnsortedSegmentMaxOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_UnsortedSegmentMinOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnsortedSegmentMinOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_UnsortedSegmentSumOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnsortedSegmentSumOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_ATan2Options: { + auto ptr = reinterpret_cast(value); + return CreateATan2Options(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_SignOptions: { + auto ptr = reinterpret_cast(value); + return CreateSignOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_BitcastOptions: { + auto ptr = reinterpret_cast(value); + return CreateBitcastOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); + case BuiltinOptions_BitwiseXorOptions: { + auto ptr = reinterpret_cast(value); + return CreateBitwiseXorOptions(_fbb, ptr, _rehasher).Union(); } - default: return nullptr; + case BuiltinOptions_RightShiftOptions: { + auto ptr = reinterpret_cast(value); + return CreateRightShiftOptions(_fbb, ptr, _rehasher).Union(); + } + default: return 0; } } -inline flatbuffers::Offset BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { +inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) : type(u.type), value(nullptr) { switch (type) { case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateConv2DOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::Conv2DOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::DepthwiseConv2DOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ConcatEmbeddingsOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LSHProjectionOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - return CreatePool2DOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::Pool2DOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - return CreateSVDFOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SVDFOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateRNNOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::RNNOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::FullyConnectedOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SoftmaxOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ConcatenationOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::AddOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - return CreateL2NormOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::L2NormOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LocalResponseNormalizationOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSTMOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LSTMOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ResizeBilinearOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::CallOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateReshapeOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ReshapeOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SkipGramOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SpaceToDepthOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::EmbeddingLookupSparseOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - return CreateMulOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::MulOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - return CreatePadOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::PadOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::GatherOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::BatchToSpaceNDOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SpaceToBatchNDOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::TransposeOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - return CreateReducerOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ReducerOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - return CreateSubOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SubOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - return CreateDivOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::DivOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SqueezeOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SequenceRNNOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::StridedSliceOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ExpOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - return CreateTopKV2Options(_fbb, ptr, _rehasher).Union(); + value = new tflite::TopKV2OptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SplitOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LogSoftmaxOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - return CreateCastOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::CastOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::DequantizeOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::MaximumMinimumOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ArgMaxOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LessOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - return CreateNegOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::NegOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - return CreatePadV2Options(_fbb, ptr, _rehasher).Union(); + value = new tflite::PadV2OptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::GreaterOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::GreaterEqualOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LessEqualOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - return CreateSelectOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SelectOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSliceOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SliceOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::TransposeConvOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SparseToDenseOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - return CreateTileOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::TileOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ExpandDimsOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateEqualOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::EqualOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::NotEqualOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateShapeOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ShapeOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - return CreatePowOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::PowOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMinOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ArgMinOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::FakeQuantOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - return CreatePackOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::PackOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LogicalOrOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - return CreateOneHotOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::OneHotOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LogicalAndOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LogicalNotOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnpackOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::UnpackOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::FloorDivOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquareOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SquareOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ZerosLikeOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - return CreateFillOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::FillOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateBidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::BidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateBidirectionalSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::BidirectionalSequenceRNNOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::UnidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorModOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::FloorModOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(value); - return CreateRangeOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::RangeOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeNearestNeighborOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ResizeNearestNeighborOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(value); - return CreateLeakyReluOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::LeakyReluOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquaredDifferenceOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SquaredDifferenceOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(value); - return CreateMirrorPadOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::MirrorPadOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(value); - return CreateAbsOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::AbsOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitVOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SplitVOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(value); - return CreateUniqueOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::UniqueOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(value); - return CreateReverseV2Options(_fbb, ptr, _rehasher).Union(); + value = new tflite::ReverseV2OptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddNOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::AddNOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherNdOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::GatherNdOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(value); - return CreateCosOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::CosOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(value); - return CreateWhereOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::WhereOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(value); - return CreateRankOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::RankOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(value); - return CreateReverseSequenceOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ReverseSequenceOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(value); - return CreateMatrixDiagOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::MatrixDiagOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateQuantizeOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::QuantizeOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(value); - return CreateMatrixSetDiagOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::MatrixSetDiagOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(value); - return CreateHardSwishOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::HardSwishOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(value); - return CreateIfOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::IfOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(value); - return CreateWhileOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::WhileOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthToSpaceOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::DepthToSpaceOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV4Options(_fbb, ptr, _rehasher).Union(); + value = new tflite::NonMaxSuppressionV4OptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV5Options(_fbb, ptr, _rehasher).Union(); + value = new tflite::NonMaxSuppressionV5OptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(value); - return CreateScatterNdOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::ScatterNdOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(value); - return CreateSelectV2Options(_fbb, ptr, _rehasher).Union(); + value = new tflite::SelectV2OptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(value); - return CreateDensifyOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::DensifyOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(value); - return CreateSegmentSumOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::SegmentSumOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::BatchMatMulOptionsT(*reinterpret_cast(u.value)); + break; } case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(value); - return CreateCumsumOptions(_fbb, ptr, _rehasher).Union(); + value = new tflite::CumsumOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CallOnceOptions: { + value = new tflite::CallOnceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BroadcastToOptions: { + value = new tflite::BroadcastToOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_Rfft2dOptions: { + value = new tflite::Rfft2dOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_Conv3DOptions: { + value = new tflite::Conv3DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableOptions: { + value = new tflite::HashtableOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableFindOptions: { + value = new tflite::HashtableFindOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableImportOptions: { + value = new tflite::HashtableImportOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableSizeOptions: { + value = new tflite::HashtableSizeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_VarHandleOptions: { + value = new tflite::VarHandleOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReadVariableOptions: { + value = new tflite::ReadVariableOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_AssignVariableOptions: { + value = new tflite::AssignVariableOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_RandomOptions: { + value = new tflite::RandomOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BucketizeOptions: { + value = new tflite::BucketizeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GeluOptions: { + value = new tflite::GeluOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DynamicUpdateSliceOptions: { + value = new tflite::DynamicUpdateSliceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnsortedSegmentProdOptions: { + value = new tflite::UnsortedSegmentProdOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnsortedSegmentMaxOptions: { + value = new tflite::UnsortedSegmentMaxOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnsortedSegmentMinOptions: { + value = new tflite::UnsortedSegmentMinOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnsortedSegmentSumOptions: { + value = new tflite::UnsortedSegmentSumOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ATan2Options: { + value = new tflite::ATan2OptionsT(*reinterpret_cast(u.value)); + break; } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOnceOptions(_fbb, ptr, _rehasher).Union(); + case BuiltinOptions_SignOptions: { + value = new tflite::SignOptionsT(*reinterpret_cast(u.value)); + break; } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(value); - return CreateBroadcastToOptions(_fbb, ptr, _rehasher).Union(); + case BuiltinOptions_BitcastOptions: { + value = new tflite::BitcastOptionsT(*reinterpret_cast(u.value)); + break; } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(value); - return CreateRfft2dOptions(_fbb, ptr, _rehasher).Union(); + case BuiltinOptions_BitwiseXorOptions: { + value = new tflite::BitwiseXorOptionsT(*reinterpret_cast(u.value)); + break; } - default: return 0; + case BuiltinOptions_RightShiftOptions: { + value = new tflite::RightShiftOptionsT(*reinterpret_cast(u.value)); + break; + } + default: + break; } } -inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { +inline void BuiltinOptionsUnion::Reset() { switch (type) { case BuiltinOptions_Conv2DOptions: { - value = new tflite::Conv2DOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_DepthwiseConv2DOptions: { - value = new tflite::DepthwiseConv2DOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ConcatEmbeddingsOptions: { - value = new tflite::ConcatEmbeddingsOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LSHProjectionOptions: { - value = new tflite::LSHProjectionOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_Pool2DOptions: { - value = new tflite::Pool2DOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SVDFOptions: { - value = new tflite::SVDFOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_RNNOptions: { - value = new tflite::RNNOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_FullyConnectedOptions: { - value = new tflite::FullyConnectedOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SoftmaxOptions: { - value = new tflite::SoftmaxOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ConcatenationOptions: { - value = new tflite::ConcatenationOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_AddOptions: { - value = new tflite::AddOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_L2NormOptions: { - value = new tflite::L2NormOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LocalResponseNormalizationOptions: { - value = new tflite::LocalResponseNormalizationOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LSTMOptions: { - value = new tflite::LSTMOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ResizeBilinearOptions: { - value = new tflite::ResizeBilinearOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_CallOptions: { - value = new tflite::CallOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ReshapeOptions: { - value = new tflite::ReshapeOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SkipGramOptions: { - value = new tflite::SkipGramOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SpaceToDepthOptions: { - value = new tflite::SpaceToDepthOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_EmbeddingLookupSparseOptions: { - value = new tflite::EmbeddingLookupSparseOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_MulOptions: { - value = new tflite::MulOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_PadOptions: { - value = new tflite::PadOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_GatherOptions: { - value = new tflite::GatherOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_BatchToSpaceNDOptions: { - value = new tflite::BatchToSpaceNDOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SpaceToBatchNDOptions: { - value = new tflite::SpaceToBatchNDOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_TransposeOptions: { - value = new tflite::TransposeOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ReducerOptions: { - value = new tflite::ReducerOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SubOptions: { - value = new tflite::SubOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_DivOptions: { - value = new tflite::DivOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SqueezeOptions: { - value = new tflite::SqueezeOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SequenceRNNOptions: { - value = new tflite::SequenceRNNOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_StridedSliceOptions: { - value = new tflite::StridedSliceOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ExpOptions: { - value = new tflite::ExpOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_TopKV2Options: { - value = new tflite::TopKV2OptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SplitOptions: { - value = new tflite::SplitOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LogSoftmaxOptions: { - value = new tflite::LogSoftmaxOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_CastOptions: { - value = new tflite::CastOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_DequantizeOptions: { - value = new tflite::DequantizeOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_MaximumMinimumOptions: { - value = new tflite::MaximumMinimumOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ArgMaxOptions: { - value = new tflite::ArgMaxOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LessOptions: { - value = new tflite::LessOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_NegOptions: { - value = new tflite::NegOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_PadV2Options: { - value = new tflite::PadV2OptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_GreaterOptions: { - value = new tflite::GreaterOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_GreaterEqualOptions: { - value = new tflite::GreaterEqualOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LessEqualOptions: { - value = new tflite::LessEqualOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SelectOptions: { - value = new tflite::SelectOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SliceOptions: { - value = new tflite::SliceOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_TransposeConvOptions: { - value = new tflite::TransposeConvOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SparseToDenseOptions: { - value = new tflite::SparseToDenseOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_TileOptions: { - value = new tflite::TileOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ExpandDimsOptions: { - value = new tflite::ExpandDimsOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_EqualOptions: { - value = new tflite::EqualOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_NotEqualOptions: { - value = new tflite::NotEqualOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ShapeOptions: { - value = new tflite::ShapeOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_PowOptions: { - value = new tflite::PowOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ArgMinOptions: { - value = new tflite::ArgMinOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_FakeQuantOptions: { - value = new tflite::FakeQuantOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_PackOptions: { - value = new tflite::PackOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LogicalOrOptions: { - value = new tflite::LogicalOrOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_OneHotOptions: { - value = new tflite::OneHotOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LogicalAndOptions: { - value = new tflite::LogicalAndOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LogicalNotOptions: { - value = new tflite::LogicalNotOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_UnpackOptions: { - value = new tflite::UnpackOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_FloorDivOptions: { - value = new tflite::FloorDivOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SquareOptions: { - value = new tflite::SquareOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ZerosLikeOptions: { - value = new tflite::ZerosLikeOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_FillOptions: { - value = new tflite::FillOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - value = new tflite::BidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_BidirectionalSequenceRNNOptions: { - value = new tflite::BidirectionalSequenceRNNOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - value = new tflite::UnidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_FloorModOptions: { - value = new tflite::FloorModOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_RangeOptions: { - value = new tflite::RangeOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ResizeNearestNeighborOptions: { - value = new tflite::ResizeNearestNeighborOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_LeakyReluOptions: { - value = new tflite::LeakyReluOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SquaredDifferenceOptions: { - value = new tflite::SquaredDifferenceOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_MirrorPadOptions: { - value = new tflite::MirrorPadOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_AbsOptions: { - value = new tflite::AbsOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SplitVOptions: { - value = new tflite::SplitVOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_UniqueOptions: { - value = new tflite::UniqueOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ReverseV2Options: { - value = new tflite::ReverseV2OptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_AddNOptions: { - value = new tflite::AddNOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_GatherNdOptions: { - value = new tflite::GatherNdOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_CosOptions: { - value = new tflite::CosOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_WhereOptions: { - value = new tflite::WhereOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_RankOptions: { - value = new tflite::RankOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ReverseSequenceOptions: { - value = new tflite::ReverseSequenceOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_MatrixDiagOptions: { - value = new tflite::MatrixDiagOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_QuantizeOptions: { - value = new tflite::QuantizeOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_MatrixSetDiagOptions: { - value = new tflite::MatrixSetDiagOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_HardSwishOptions: { - value = new tflite::HardSwishOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_IfOptions: { - value = new tflite::IfOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_WhileOptions: { - value = new tflite::WhileOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_DepthToSpaceOptions: { - value = new tflite::DepthToSpaceOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_NonMaxSuppressionV4Options: { - value = new tflite::NonMaxSuppressionV4OptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_NonMaxSuppressionV5Options: { - value = new tflite::NonMaxSuppressionV5OptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_ScatterNdOptions: { - value = new tflite::ScatterNdOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SelectV2Options: { - value = new tflite::SelectV2OptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_DensifyOptions: { - value = new tflite::DensifyOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_SegmentSumOptions: { - value = new tflite::SegmentSumOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_BatchMatMulOptions: { - value = new tflite::BatchMatMulOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_CumsumOptions: { - value = new tflite::CumsumOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_CallOnceOptions: { - value = new tflite::CallOnceOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_BroadcastToOptions: { - value = new tflite::BroadcastToOptionsT(*reinterpret_cast(u.value)); + auto ptr = reinterpret_cast(value); + delete ptr; break; } case BuiltinOptions_Rfft2dOptions: { - value = new tflite::Rfft2dOptionsT( - *reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void BuiltinOptionsUnion::Reset() { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_VarHandleOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_ReadVariableOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_AssignVariableOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_RandomOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_BucketizeOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_GeluOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_DynamicUpdateSliceOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_UnsortedSegmentProdOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_UnsortedSegmentMaxOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_UnsortedSegmentMinOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_UnsortedSegmentSumOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_ATan2Options: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_SignOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_BitcastOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_BitwiseXorOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions_RightShiftOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + default: break; + } + value = nullptr; + type = BuiltinOptions_NONE; +} + +inline bool VerifyBuiltinOptions2(::flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions2 type) { + switch (type) { + case BuiltinOptions2_NONE: { + return true; } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloConcatenateOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloBroadcastInDimOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloSliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloConvolutionOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloCustomCallOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloReduceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloScatterOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloCompareOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloDynamicSliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloPadOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloIotaOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloDotGeneralOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloReduceWindowOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloSortOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloWhileOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloGatherOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloTransposeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_DilateOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloRngBitGeneratorOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_ReduceWindowOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StableHLOCompositeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloShiftLeftOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions2_StablehloCaseOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return true; + } +} + +inline bool VerifyBuiltinOptions2Vector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyBuiltinOptions2( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline void *BuiltinOptions2Union::UnPack(const void *obj, BuiltinOptions2 type, const ::flatbuffers::resolver_function_t *resolver) { + (void)resolver; + switch (type) { + case BuiltinOptions2_StablehloConcatenateOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloBroadcastInDimOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloSliceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloConvolutionOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloCustomCallOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloReduceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloScatterOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloCompareOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloDynamicSliceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloPadOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloIotaOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloDotGeneralOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloReduceWindowOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloSortOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloWhileOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloGatherOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_StablehloTransposeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions2_DilateOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloRngBitGeneratorOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_ReduceWindowOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StableHLOCompositeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloShiftLeftOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloCaseOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + default: return nullptr; + } +} + +inline ::flatbuffers::Offset BuiltinOptions2Union::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ::flatbuffers::rehasher_function_t *_rehasher) const { + (void)_rehasher; + switch (type) { + case BuiltinOptions2_StablehloConcatenateOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloConcatenateOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloBroadcastInDimOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloBroadcastInDimOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloSliceOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloSliceOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloConvolutionOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloConvolutionOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloCustomCallOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloCustomCallOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloReduceOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloReduceOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloScatterOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloScatterOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloCompareOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloCompareOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloDynamicSliceOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloDynamicSliceOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloPadOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloPadOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloIotaOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloIotaOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloDotGeneralOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloDotGeneralOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloReduceWindowOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloReduceWindowOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloSortOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloSortOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloWhileOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloWhileOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloGatherOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloGatherOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloTransposeOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloTransposeOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_DilateOptions: { + auto ptr = reinterpret_cast(value); + return CreateDilateOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloRngBitGeneratorOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloRngBitGeneratorOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_ReduceWindowOptions: { + auto ptr = reinterpret_cast(value); + return CreateReduceWindowOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StableHLOCompositeOptions: { + auto ptr = reinterpret_cast(value); + return CreateStableHLOCompositeOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloShiftLeftOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloShiftLeftOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; + case BuiltinOptions2_StablehloCaseOptions: { + auto ptr = reinterpret_cast(value); + return CreateStablehloCaseOptions(_fbb, ptr, _rehasher).Union(); } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + default: return 0; + } +} + +inline BuiltinOptions2Union::BuiltinOptions2Union(const BuiltinOptions2Union &u) : type(u.type), value(nullptr) { + switch (type) { + case BuiltinOptions2_StablehloConcatenateOptions: { + value = new tflite::StablehloConcatenateOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloBroadcastInDimOptions: { + value = new tflite::StablehloBroadcastInDimOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloSliceOptions: { + value = new tflite::StablehloSliceOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloConvolutionOptions: { + value = new tflite::StablehloConvolutionOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloCustomCallOptions: { + value = new tflite::StablehloCustomCallOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloReduceOptions: { + value = new tflite::StablehloReduceOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloScatterOptions: { + value = new tflite::StablehloScatterOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloCompareOptions: { + value = new tflite::StablehloCompareOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloDynamicSliceOptions: { + value = new tflite::StablehloDynamicSliceOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloPadOptions: { + value = new tflite::StablehloPadOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloIotaOptions: { + value = new tflite::StablehloIotaOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloDotGeneralOptions: { + value = new tflite::StablehloDotGeneralOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloReduceWindowOptions: { + value = new tflite::StablehloReduceWindowOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloSortOptions: { + value = new tflite::StablehloSortOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloWhileOptions: { + value = new tflite::StablehloWhileOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloGatherOptions: { + value = new tflite::StablehloGatherOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloTransposeOptions: { + value = new tflite::StablehloTransposeOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_DilateOptions: { + value = new tflite::DilateOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloRngBitGeneratorOptions: { + value = new tflite::StablehloRngBitGeneratorOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_ReduceWindowOptions: { + value = new tflite::ReduceWindowOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StableHLOCompositeOptions: { + value = new tflite::StableHLOCompositeOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloShiftLeftOptions: { + value = new tflite::StablehloShiftLeftOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; + case BuiltinOptions2_StablehloCaseOptions: { + value = new tflite::StablehloCaseOptionsT(*reinterpret_cast(u.value)); break; } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; + default: break; - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(value); + } +} + +inline void BuiltinOptions2Union::Reset() { + switch (type) { + case BuiltinOptions2_StablehloConcatenateOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloBroadcastInDimOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloSliceOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloConvolutionOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloCustomCallOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloReduceOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloScatterOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloCompareOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloDynamicSliceOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloPadOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloIotaOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloDotGeneralOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloReduceWindowOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloSortOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloWhileOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloGatherOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloTransposeOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_DilateOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloRngBitGeneratorOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_ReduceWindowOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StableHLOCompositeOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloShiftLeftOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(value); + case BuiltinOptions2_StablehloCaseOptions: { + auto ptr = reinterpret_cast(value); delete ptr; break; } default: break; } value = nullptr; - type = BuiltinOptions_NONE; + type = BuiltinOptions2_NONE; } inline const tflite::Model *GetModel(const void *buf) { - return flatbuffers::GetRoot(buf); + return ::flatbuffers::GetRoot(buf); } inline const tflite::Model *GetSizePrefixedModel(const void *buf) { - return flatbuffers::GetSizePrefixedRoot(buf); + return ::flatbuffers::GetSizePrefixedRoot(buf); } inline const char *ModelIdentifier() { @@ -17100,17 +25255,22 @@ inline const char *ModelIdentifier() { } inline bool ModelBufferHasIdentifier(const void *buf) { - return flatbuffers::BufferHasIdentifier( + return ::flatbuffers::BufferHasIdentifier( buf, ModelIdentifier()); } +inline bool SizePrefixedModelBufferHasIdentifier(const void *buf) { + return ::flatbuffers::BufferHasIdentifier( + buf, ModelIdentifier(), true); +} + inline bool VerifyModelBuffer( - flatbuffers::Verifier &verifier) { + ::flatbuffers::Verifier &verifier) { return verifier.VerifyBuffer(ModelIdentifier()); } inline bool VerifySizePrefixedModelBuffer( - flatbuffers::Verifier &verifier) { + ::flatbuffers::Verifier &verifier) { return verifier.VerifySizePrefixedBuffer(ModelIdentifier()); } @@ -17119,26 +25279,26 @@ inline const char *ModelExtension() { } inline void FinishModelBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { fbb.Finish(root, ModelIdentifier()); } inline void FinishSizePrefixedModelBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { fbb.FinishSizePrefixed(root, ModelIdentifier()); } inline std::unique_ptr UnPackModel( const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { + const ::flatbuffers::resolver_function_t *res = nullptr) { return std::unique_ptr(GetModel(buf)->UnPack(res)); } inline std::unique_ptr UnPackSizePrefixedModel( const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { + const ::flatbuffers::resolver_function_t *res = nullptr) { return std::unique_ptr(GetSizePrefixedModel(buf)->UnPack(res)); } diff --git a/src/tensorflow/lite/schema/schema_utils.h b/src/tensorflow/lite/schema/schema_utils.h index 453276b..871c2f3 100644 --- a/src/tensorflow/lite/schema/schema_utils.h +++ b/src/tensorflow/lite/schema/schema_utils.h @@ -15,19 +15,6 @@ limitations under the License. #ifndef TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ #define TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ -#include "flatbuffers/flatbuffers.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -// The following methods are introduced to resolve op builtin code shortage -// problem. The new builtin opreator will be assigned to the extended builtin -// code field in the flatbuffer schema. Those methods helps to hide builtin code -// details. -BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); - -BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code); - -} // namespace tflite +#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h" #endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ diff --git a/src/tensorflow/lite/version.h b/src/tensorflow/lite/version.h deleted file mode 100644 index f667447..0000000 --- a/src/tensorflow/lite/version.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_VERSION_H_ -#define TENSORFLOW_LITE_VERSION_H_ - -#include "tensorflow/core/public/version.h" - -// The version number of the Schema. Ideally all changes will be backward -// compatible. If that ever changes, we must ensure that version is the first -// entry in the new tflite root so that we can see that version is not 1. -#define TFLITE_SCHEMA_VERSION (3) - -// TensorFlow Lite Runtime version. -// This value is currently shared with that of TensorFlow. -#define TFLITE_VERSION_STRING TF_VERSION_STRING - -#endif // TENSORFLOW_LITE_VERSION_H_ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_armclang.h b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_armclang.h index 90de9db..446d21a 100644 --- a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_armclang.h +++ b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_armclang.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file - * @version V5.3.1 - * @date 26. March 2020 + * @version V6.0.0 + * @date 27. July 2024 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -22,33 +22,36 @@ * limitations under the License. */ -/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ - #ifndef __CMSIS_ARMCLANG_H #define __CMSIS_ARMCLANG_H #pragma clang system_header /* treat file as system include file */ -#ifndef __ARM_COMPAT_H -#include /* Compatibility header for Arm Compiler 5 intrinsics */ -#endif +#if (__ARM_ACLE >= 200) + #include +#else + #error Compiler must support ACLE V2.0 +#endif /* (__ARM_ACLE >= 200) */ /* CMSIS compiler specific defines */ #ifndef __ASM #define __ASM __asm #endif #ifndef __INLINE - #define __INLINE __inline + #define __INLINE inline #endif #ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline + #define __STATIC_INLINE static inline #endif #ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline #endif #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) #endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif #ifndef __USED #define __USED __attribute__((used)) #endif @@ -64,18 +67,9 @@ #ifndef __PACKED_UNION #define __PACKED_UNION union __attribute__((packed, aligned(1))) #endif -#ifndef __UNALIGNED_UINT32 /* deprecated */ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */ - struct __attribute__((packed)) T_UINT32 { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) -#endif #ifndef __UNALIGNED_UINT16_WRITE #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; #pragma clang diagnostic pop #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) @@ -83,7 +77,6 @@ #ifndef __UNALIGNED_UINT16_READ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; #pragma clang diagnostic pop #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) @@ -91,7 +84,6 @@ #ifndef __UNALIGNED_UINT32_WRITE #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; #pragma clang diagnostic pop #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) @@ -99,7 +91,6 @@ #ifndef __UNALIGNED_UINT32_READ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */ __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; #pragma clang diagnostic pop #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) @@ -113,699 +104,13 @@ #ifndef __COMPILER_BARRIER #define __COMPILER_BARRIER() __ASM volatile("":::"memory") #endif - -/* ######################### Startup and Lowlevel Init ######################## */ - -#ifndef __PROGRAM_START -#define __PROGRAM_START __main -#endif - -#ifndef __INITIAL_SP -#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit -#endif - -#ifndef __STACK_LIMIT -#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base -#endif - -#ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __Vectors -#endif - -#ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) -#endif - -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -/* intrinsic void __enable_irq(); see arm_compat.h */ - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -/* intrinsic void __disable_irq(); see arm_compat.h */ - - -/** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value - */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); -} +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".bss.noinit"))) #endif - - -/** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); -} +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) #endif - -/** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); -} - - -/** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); -} - - -/** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); -} - - -/** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); -} -#endif - - -/** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif - - -/** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); -} -#endif - - -/** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} -#endif - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); -} - - -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif - - -/** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return(result); -} -#endif - - -/** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) -{ - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} -#endif - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ - - -/** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); - return(result); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return(result); -} -#endif - - -/** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) -{ - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} -#endif - - -/** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); -} - - -/** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); - return(result); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); -} -#endif - - -/** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} -#endif - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ - - -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - -/** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif -} - -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif -} -#endif - - -/** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif -} -#endif - - -/** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif -} -#endif - - -/** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif - -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr -#else -#define __get_FPSCR() ((uint32_t)0U) -#endif - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __set_FPSCR __builtin_arm_set_fpscr -#else -#define __set_FPSCR(x) ((void)(x)) -#endif - - -/*@} end of CMSIS_Core_RegAccFunctions */ - - /* ########################## Core Instruction Access ######################### */ /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface Access to dedicated instructions @@ -829,13 +134,14 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) \brief No Operation \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -#define __NOP __builtin_arm_nop +#define __NOP() __nop() + /** \brief Wait For Interrupt \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -#define __WFI __builtin_arm_wfi +#define __WFI() __wfi() /** @@ -843,14 +149,14 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) \details Wait For Event is a hint instruction that permits the processor to enter a low-power state until one of a number of events occurs. */ -#define __WFE __builtin_arm_wfe +#define __WFE() __wfe() /** \brief Send Event \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -#define __SEV __builtin_arm_sev +#define __SEV() __sev() /** @@ -859,14 +165,15 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) so that all instructions following the ISB are fetched from cache or memory, after the instruction has been completed. */ -#define __ISB() __builtin_arm_isb(0xF) +#define __ISB() __isb(0xF) + /** \brief Data Synchronization Barrier \details Acts as a special kind of Data Memory Barrier. It completes when all explicit memory accesses before this instruction complete. */ -#define __DSB() __builtin_arm_dsb(0xF) +#define __DSB() __dsb(0xF) /** @@ -874,7 +181,7 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) \details Ensures the apparent order of the explicit memory operations before and after the instruction, without ensuring their completion. */ -#define __DMB() __builtin_arm_dmb(0xF) +#define __DMB() __dmb(0xF) /** @@ -883,7 +190,7 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) \param [in] value Value to reverse \return Reversed value */ -#define __REV(value) __builtin_bswap32(value) +#define __REV(value) __rev(value) /** @@ -892,7 +199,7 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) \param [in] value Value to reverse \return Reversed value */ -#define __REV16(value) __ROR(__REV(value), 16) +#define __REV16(value) __rev16(value) /** @@ -901,7 +208,7 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) \param [in] value Value to reverse \return Reversed value */ -#define __REVSH(value) (int16_t)__builtin_bswap16(value) +#define __REVSH(value) __revsh(value) /** @@ -911,15 +218,7 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) \param [in] op2 Number of Bits to rotate \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} +#define __ROR(op1, op2) __ror(op1, op2) /** @@ -929,7 +228,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value is ignored by the processor. If required, a debugger can use it to store additional information about the breakpoint. */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +#define __BKPT(value) __ASM volatile ("bkpt "#value) /** @@ -938,7 +237,8 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value Value to reverse \return Reversed value */ -#define __RBIT __builtin_arm_rbit +#define __RBIT(value) __rbit(value) + /** \brief Count leading zeros @@ -946,56 +246,99 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value Value to count the leading zeros \return number of leading zeros in value */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +#define __CLZ(value) __clz(value) + + +#if ((__ARM_FEATURE_SAT >= 1) && \ + (__ARM_ARCH_ISA_THUMB >= 2) ) +/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(value, sat) __ssat(value, sat) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(value, sat) __usat(value, sat) + +#else /* (__ARM_FEATURE_SAT >= 1) */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) { - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) + if ((sat >= 1U) && (sat <= 32U)) { - return 32U; + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return (max); + } + else if (val < min) + { + return (min); + } } - return __builtin_clz(value); + return (val); } -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return (max); + } + else if (val < 0) + { + return (0U); + } + } + return ((uint32_t)val); +} +#endif /* (__ARM_FEATURE_SAT >= 1) */ +#if (__ARM_FEATURE_LDREX >= 1) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex +#define __CLREX __builtin_arm_clrex /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \return value of type uint8_t at (*ptr) */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex +#define __LDREXB (uint8_t)__builtin_arm_ldrex /** @@ -1007,6 +350,17 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) \return 1 Function failed */ #define __STREXB (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 2) +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex /** @@ -1018,6 +372,17 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) \return 1 Function failed */ #define __STREXH (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 2) */ + + +#if (__ARM_FEATURE_LDREX >= 4) +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex /** @@ -1029,46 +394,10 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) \return 1 Function failed */ #define __STREXW (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 4) */ -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT __builtin_arm_ssat - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT __builtin_arm_usat - - +#if (__ARM_ARCH_ISA_THUMB >= 2) /** \brief Rotate Right with Extend (32 bit) \details Moves each bit of a bitstring right by one bit. @@ -1080,8 +409,8 @@ __STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return(result); + __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); + return (result); } @@ -1096,7 +425,7 @@ __STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) uint32_t result; __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t) result); /* Add explicit type cast here */ + return ((uint8_t)result); /* Add explicit type cast here */ } @@ -1111,7 +440,7 @@ __STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) uint32_t result; __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t) result); /* Add explicit type cast here */ + return ((uint16_t)result); /* Add explicit type cast here */ } @@ -1126,109 +455,12 @@ __STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) uint32_t result; __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} - -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; -} - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + return (result); } +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ - -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - +#if (__ARM_ARCH >= 8) /** \brief Load-Acquire (8 bit) \details Executes a LDAB instruction for 8 bit value. @@ -1240,7 +472,7 @@ __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) uint32_t result; __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); + return ((uint8_t)result); /* Add explicit type cast here */ } @@ -1255,7 +487,7 @@ __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) uint32_t result; __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); + return ((uint16_t)result); /* Add explicit type cast here */ } @@ -1270,7 +502,7 @@ __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) uint32_t result; __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); + return (result); } @@ -1316,7 +548,7 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) \param [in] ptr Pointer to data \return value of type uint8_t at (*ptr) */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex +#define __LDAEXB (uint8_t)__builtin_arm_ldaex /** @@ -1325,7 +557,7 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) \param [in] ptr Pointer to data \return value of type uint16_t at (*ptr) */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex +#define __LDAEXH (uint16_t)__builtin_arm_ldaex /** @@ -1334,7 +566,7 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) \param [in] ptr Pointer to data \return value of type uint32_t at (*ptr) */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex +#define __LDAEX (uint32_t)__builtin_arm_ldaex /** @@ -1345,7 +577,7 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) \return 0 Function succeeded \return 1 Function failed */ -#define __STLEXB (uint32_t)__builtin_arm_stlex +#define __STLEXB (uint32_t)__builtin_arm_stlex /** @@ -1356,7 +588,7 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) \return 0 Function succeeded \return 1 Function failed */ -#define __STLEXH (uint32_t)__builtin_arm_stlex +#define __STLEXH (uint32_t)__builtin_arm_stlex /** @@ -1367,101 +599,109 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) \return 0 Function succeeded \return 1 Function failed */ -#define __STLEX (uint32_t)__builtin_arm_stlex +#define __STLEX (uint32_t)__builtin_arm_stlex -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ +#endif /* (__ARM_ARCH >= 8) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ -*/ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} +#endif + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} +#endif + -#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) - -#define __SADD8 __builtin_arm_sadd8 -#define __QADD8 __builtin_arm_qadd8 -#define __SHADD8 __builtin_arm_shadd8 -#define __UADD8 __builtin_arm_uadd8 -#define __UQADD8 __builtin_arm_uqadd8 -#define __UHADD8 __builtin_arm_uhadd8 -#define __SSUB8 __builtin_arm_ssub8 -#define __QSUB8 __builtin_arm_qsub8 -#define __SHSUB8 __builtin_arm_shsub8 -#define __USUB8 __builtin_arm_usub8 -#define __UQSUB8 __builtin_arm_uqsub8 -#define __UHSUB8 __builtin_arm_uhsub8 -#define __SADD16 __builtin_arm_sadd16 -#define __QADD16 __builtin_arm_qadd16 -#define __SHADD16 __builtin_arm_shadd16 -#define __UADD16 __builtin_arm_uadd16 -#define __UQADD16 __builtin_arm_uqadd16 -#define __UHADD16 __builtin_arm_uhadd16 -#define __SSUB16 __builtin_arm_ssub16 -#define __QSUB16 __builtin_arm_qsub16 -#define __SHSUB16 __builtin_arm_shsub16 -#define __USUB16 __builtin_arm_usub16 -#define __UQSUB16 __builtin_arm_uqsub16 -#define __UHSUB16 __builtin_arm_uhsub16 -#define __SASX __builtin_arm_sasx -#define __QASX __builtin_arm_qasx -#define __SHASX __builtin_arm_shasx -#define __UASX __builtin_arm_uasx -#define __UQASX __builtin_arm_uqasx -#define __UHASX __builtin_arm_uhasx -#define __SSAX __builtin_arm_ssax -#define __QSAX __builtin_arm_qsax -#define __SHSAX __builtin_arm_shsax -#define __USAX __builtin_arm_usax -#define __UQSAX __builtin_arm_uqsax -#define __UHSAX __builtin_arm_uhsax -#define __USAD8 __builtin_arm_usad8 -#define __USADA8 __builtin_arm_usada8 -#define __SSAT16 __builtin_arm_ssat16 -#define __USAT16 __builtin_arm_usat16 -#define __UXTB16 __builtin_arm_uxtb16 -#define __UXTAB16 __builtin_arm_uxtab16 -#define __SXTB16 __builtin_arm_sxtb16 -#define __SXTAB16 __builtin_arm_sxtab16 -#define __SMUAD __builtin_arm_smuad -#define __SMUADX __builtin_arm_smuadx -#define __SMLAD __builtin_arm_smlad -#define __SMLADX __builtin_arm_smladx -#define __SMLALD __builtin_arm_smlald -#define __SMLALDX __builtin_arm_smlaldx -#define __SMUSD __builtin_arm_smusd -#define __SMUSDX __builtin_arm_smusdx -#define __SMLSD __builtin_arm_smlsd -#define __SMLSDX __builtin_arm_smlsdx -#define __SMLSLD __builtin_arm_smlsld -#define __SMLSLDX __builtin_arm_smlsldx -#define __SEL __builtin_arm_sel -#define __QADD __builtin_arm_qadd -#define __QSUB __builtin_arm_qsub - -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) - -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) { - int32_t result; +#if (defined(__ARM_FP) && (__ARM_FP >= 1)) + return (__builtin_arm_get_fpscr()); +#else + return (0U); +#endif +} + - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return(result); +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if (defined(__ARM_FP) && (__ARM_FP >= 1)) + __builtin_arm_set_fpscr(fpscr); +#else + (void)fpscr; +#endif } -#endif /* (__ARM_FEATURE_DSP == 1) */ -/*@} end of group CMSIS_SIMD_intrinsics */ +/** @} end of CMSIS_Core_RegAccFunctions */ +// Include the profile specific settings: +#if __ARM_ARCH_PROFILE == 'A' + #include "./a-profile/cmsis_armclang_a.h" +#elif __ARM_ARCH_PROFILE == 'R' + #include "./r-profile/cmsis_armclang_r.h" +#elif __ARM_ARCH_PROFILE == 'M' + #include "./m-profile/cmsis_armclang_m.h" +#else + #error "Unknown Arm architecture profile" +#endif #endif /* __CMSIS_ARMCLANG_H */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_clang.h b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_clang.h new file mode 100644 index 0000000..872e16c --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_clang.h @@ -0,0 +1,708 @@ +/**************************************************************************//** + * @file cmsis_clang.h + * @brief CMSIS compiler LLVM/Clang header file + * @version V6.0.0 + * @date 27. July 2024 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_CLANG_H +#define __CMSIS_CLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#if (__ARM_ACLE >= 200) + #include +#else + #error Compiler must support ACLE V2.0 +#endif /* (__ARM_ACLE >= 200) */ + +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".noinit"))) +#endif +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __nop() + + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __wfi() + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __wfe() + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __sev() + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __isb(0xF) + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __rev(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __rev16(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) __revsh(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +#define __ROR(op1, op2) __ror(op1, op2) + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT(value) __rbit(value) + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ(value) __clz(value) + + +#if ((__ARM_FEATURE_SAT >= 1) && \ + (__ARM_ARCH_ISA_THUMB >= 2) ) +/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(value, sat) __ssat(value, sat) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(value, sat) __usat(value, sat) + +#else /* (__ARM_FEATURE_SAT >= 1) */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return (max); + } + else if (val < min) + { + return (min); + } + } + return (val); +} + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return (max); + } + else if (val < 0) + { + return (0U); + } + } + return ((uint32_t)val); +} +#endif /* (__ARM_FEATURE_SAT >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 1) +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 2) +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 2) */ + + +#if (__ARM_FEATURE_LDREX >= 4) +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 4) */ + + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); + return (result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return (result); +} +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ + + +#if (__ARM_ARCH >= 8) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return (result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* (__ARM_ARCH >= 8) */ + +/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} +#endif + + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if (defined(__ARM_FP) && (__ARM_FP >= 1)) + return (__builtin_arm_get_fpscr()); +#else + return (0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if (defined(__ARM_FP) && (__ARM_FP >= 1)) + __builtin_arm_set_fpscr(fpscr); +#else + (void)fpscr; +#endif +} + +/** @} end of CMSIS_Core_RegAccFunctions */ + +// Include the profile specific settings: +#if __ARM_ARCH_PROFILE == 'A' + #include "./a-profile/cmsis_clang_a.h" +#elif __ARM_ARCH_PROFILE == 'R' + #include "./r-profile/cmsis_clang_r.h" +#elif __ARM_ARCH_PROFILE == 'M' + #include "./m-profile/cmsis_clang_m.h" +#else + #error "Unknown Arm architecture profile" +#endif + +#endif /* __CMSIS_CLANG_H */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h index adbf296..31c3a32 100644 --- a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h +++ b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h @@ -1,11 +1,5 @@ -/**************************************************************************//** - * @file cmsis_compiler.h - * @brief CMSIS compiler generic header file - * @version V5.1.0 - * @date 09. October 2018 - ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -22,47 +16,51 @@ * limitations under the License. */ +/* + * CMSIS Compiler Generic Header File + */ + #ifndef __CMSIS_COMPILER_H #define __CMSIS_COMPILER_H #include /* - * Arm Compiler 4/5 + * Arm Compiler above 6.10.1 (armclang) */ -#if defined ( __CC_ARM ) - #include "cmsis_armcc.h" - +#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) + #include "third_party/cmsis/CMSIS/Core/Include/cmsis_armclang.h" /* - * Arm Compiler 6.6 LTM (armclang) + * TI Arm Clang Compiler (tiarmclang) */ -#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100) - #include "cmsis_armclang_ltm.h" +#elif defined (__ti__) + #include "cmsis_tiarmclang.h" - /* - * Arm Compiler above 6.10.1 (armclang) + +/* + * LLVM/Clang Compiler */ -#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) - #include "cmsis_armclang.h" +#elif defined ( __clang__ ) + #include "third_party/cmsis/CMSIS/Core/Include/cmsis_clang.h" /* * GNU Compiler */ #elif defined ( __GNUC__ ) - #include "cmsis_gcc.h" + #include "third_party/cmsis/CMSIS/Core/Include/cmsis_gcc.h" /* * IAR Compiler */ #elif defined ( __ICCARM__ ) - #include + #include "third_party/cmsis/CMSIS/Core/Include/cmsis_iccarm.h" /* - * TI Arm Compiler + * TI Arm Compiler (armcl) */ #elif defined ( __TI_ARM__ ) #include @@ -97,10 +95,6 @@ #ifndef __PACKED_UNION #define __PACKED_UNION union __attribute__((packed)) #endif - #ifndef __UNALIGNED_UINT32 /* deprecated */ - struct __attribute__((packed)) T_UINT32 { uint32_t v; }; - #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) - #endif #ifndef __UNALIGNED_UINT16_WRITE __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val)) @@ -127,7 +121,12 @@ #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. #define __COMPILER_BARRIER() (void)0 #endif - + #ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".noinit"))) + #endif + #ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) + #endif /* * TASKING Compiler @@ -169,10 +168,6 @@ #ifndef __PACKED_UNION #define __PACKED_UNION union __packed__ #endif - #ifndef __UNALIGNED_UINT32 /* deprecated */ - struct __packed__ T_UINT32 { uint32_t v; }; - #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) - #endif #ifndef __UNALIGNED_UINT16_WRITE __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) @@ -190,7 +185,7 @@ #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) #endif #ifndef __ALIGNED - #define __ALIGNED(x) __align(x) + #define __ALIGNED(x) __align(x) #endif #ifndef __RESTRICT #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. @@ -200,7 +195,12 @@ #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. #define __COMPILER_BARRIER() (void)0 #endif - + #ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".noinit"))) + #endif + #ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) + #endif /* * COSMIC Compiler @@ -240,10 +240,6 @@ #ifndef __PACKED_UNION #define __PACKED_UNION @packed union #endif - #ifndef __UNALIGNED_UINT32 /* deprecated */ - @packed struct T_UINT32 { uint32_t v; }; - #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) - #endif #ifndef __UNALIGNED_UINT16_WRITE __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) @@ -272,7 +268,12 @@ #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. #define __COMPILER_BARRIER() (void)0 #endif - + #ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".noinit"))) + #endif + #ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) + #endif #else #error Unknown compiler. diff --git a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_gcc.h b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_gcc.h index 11866c8..4771466 100644 --- a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_gcc.h +++ b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_gcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_gcc.h * @brief CMSIS compiler GCC header file - * @version V5.3.0 - * @date 26. March 2020 + * @version V6.0.0 + * @date 27. July 2024 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -25,11 +25,9 @@ #ifndef __CMSIS_GCC_H #define __CMSIS_GCC_H -/* ignore some GCC warnings */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wsign-conversion" -#pragma GCC diagnostic ignored "-Wconversion" -#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC system_header /* treat file as system include file */ + +#include /* Fallback for __has_builtin */ #ifndef __has_builtin @@ -46,12 +44,15 @@ #ifndef __STATIC_INLINE #define __STATIC_INLINE static inline #endif -#ifndef __STATIC_FORCEINLINE +#ifndef __STATIC_FORCEINLINE #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif +#endif #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) #endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif #ifndef __USED #define __USED __attribute__((used)) #endif @@ -67,14 +68,6 @@ #ifndef __PACKED_UNION #define __PACKED_UNION union __attribute__((packed, aligned(1))) #endif -#ifndef __UNALIGNED_UINT32 /* deprecated */ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - struct __attribute__((packed)) T_UINT32 { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) -#endif #ifndef __UNALIGNED_UINT16_WRITE #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpacked" @@ -116,713 +109,730 @@ #ifndef __COMPILER_BARRIER #define __COMPILER_BARRIER() __ASM volatile("":::"memory") #endif - -/* ######################### Startup and Lowlevel Init ######################## */ - -#ifndef __PROGRAM_START - -/** - \brief Initializes data and bss sections - \details This default implementations initialized all data and additional bss - sections relying on .copy.table and .zero.table specified properly - in the used linker script. - - */ -__STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) -{ - extern void _start(void) __NO_RETURN; - - typedef struct { - uint32_t const* src; - uint32_t* dest; - uint32_t wlen; - } __copy_table_t; - - typedef struct { - uint32_t* dest; - uint32_t wlen; - } __zero_table_t; - - extern const __copy_table_t __copy_table_start__; - extern const __copy_table_t __copy_table_end__; - extern const __zero_table_t __zero_table_start__; - extern const __zero_table_t __zero_table_end__; - - for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) { - for(uint32_t i=0u; iwlen; ++i) { - pTable->dest[i] = pTable->src[i]; - } - } - - for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { - for(uint32_t i=0u; iwlen; ++i) { - pTable->dest[i] = 0u; - } - } - - _start(); -} - -#define __PROGRAM_START __cmsis_start +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".noinit"))) #endif - -#ifndef __INITIAL_SP -#define __INITIAL_SP __StackTop +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) #endif -#ifndef __STACK_LIMIT -#define __STACK_LIMIT __StackLimit -#endif +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ -#ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __Vectors +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) #endif -#ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) -#endif +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __ASM volatile ("nop") -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ +#define __WFI() __ASM volatile ("wfi":::"memory") + /** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} +#define __WFE() __ASM volatile ("wfe":::"memory") /** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} +#define __SEV() __ASM volatile ("sev") /** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +__STATIC_FORCEINLINE void __ISB(void) { - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); + __ASM volatile ("isb 0xF":::"memory"); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +__STATIC_FORCEINLINE void __DSB(void) { - uint32_t result; - - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); + __ASM volatile ("dsb 0xF":::"memory"); } -#endif /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +__STATIC_FORCEINLINE void __DMB(void) { - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ASM volatile ("dmb 0xF":::"memory"); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) { - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + return __builtin_bswap32(value); } -#endif /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); + __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return (result); } /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) { - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); + return (int16_t)__builtin_bswap16(value); } /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); -} +#define __BKPT(value) __ASM volatile ("bkpt "#value) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); -} +#if (__ARM_ARCH_ISA_THUMB >= 2) + __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ #endif + return (result); +} /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) { - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +#if (__ARM_FEATURE_SAT >= 1) /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif +#define __SSAT(value, sat) __ssat(value, sat) /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); -} - +#define __USAT(value, sat) __usat(value, sat) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +#else /* (__ARM_FEATURE_SAT >= 1) */ /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) { - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return (max); + } + else if (val < min) + { + return (min); + } + } + return (val); } -#endif /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) { - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return (max); + } + else if (val < 0) + { + return (0U); + } + } + return ((uint32_t)val); } +#endif /* (__ARM_FEATURE_SAT >= 1) */ -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +#if (__ARM_FEATURE_LDREX >= 1) /** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +__STATIC_FORCEINLINE void __CLREX(void) { - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); + __ASM volatile ("clrex" ::: "memory"); } -#endif -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) { uint32_t result; - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); + return ((uint8_t) result); /* Add explicit type cast here */ } /** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) { - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + uint32_t result; + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return (result); } -#endif +#endif /* (__ARM_FEATURE_LDREX >= 1) */ +#if (__ARM_FEATURE_LDREX >= 2) /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) { uint32_t result; - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); + return ((uint16_t)result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) { uint32_t result; - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return (result); } -#endif +#endif /* (__ARM_FEATURE_LDREX >= 2) */ +#if (__ARM_FEATURE_LDREX >= 4) /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) { - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return (result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) { - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return (result); } -#endif +#endif /* (__ARM_FEATURE_LDREX >= 4) */ -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +#if (__ARM_ARCH_ISA_THUMB >= 2) /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { - __ASM volatile ("cpsie f" : : : "memory"); + uint32_t result; + + __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); + return (result); } /** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { - __ASM volatile ("cpsid f" : : : "memory"); + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t)result); /* Add explicit type cast here */ } /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); - return(result); + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t)result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return(result); + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return (result); } -#endif /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) { - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) { - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); } +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ +#if (__ARM_ARCH >= 8) /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); - return(result); + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t)result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t)result); /* Add explicit type cast here */ } -#endif /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return (result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t)result); /* Add explicit type cast here */ } -#endif /** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t)result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return (result); } -#endif /** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return (result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return (result); } -#endif /** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return (result); } +#endif /* (__ARM_ARCH >= 8) */ + +/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE void __disable_irq(void) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif + __ASM volatile ("cpsid i" : : : "memory"); } -#endif -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +#if (__ARM_ARCH_ISA_THUMB >= 2) + /** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ + __STATIC_FORCEINLINE void __enable_fault_irq(void) + { + __ASM volatile ("cpsie f" : : : "memory"); + } + + + /** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ + __STATIC_FORCEINLINE void __disable_fault_irq(void) + { + __ASM volatile ("cpsid f" : : : "memory"); + } +#endif /** @@ -832,21 +842,10 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) */ __STATIC_FORCEINLINE uint32_t __get_FPSCR(void) { -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#if __has_builtin(__builtin_arm_get_fpscr) -// Re-enable using built-in when GCC has been fixed -// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - return __builtin_arm_get_fpscr(); -#else - uint32_t result; - - __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); - return(result); -#endif +#if (defined(__ARM_FP) && (__ARM_FP >= 1)) + return (__builtin_arm_get_fpscr()); #else - return(0U); + return (0U); #endif } @@ -858,1314 +857,150 @@ __STATIC_FORCEINLINE uint32_t __get_FPSCR(void) */ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) { -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#if __has_builtin(__builtin_arm_set_fpscr) -// Re-enable using built-in when GCC has been fixed -// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ +#if (defined(__ARM_FP) && (__ARM_FP >= 1)) __builtin_arm_set_fpscr(fpscr); -#else - __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); -#endif #else (void)fpscr; #endif } -/*@} end of CMSIS_Core_RegAccFunctions */ +/** @} end of CMSIS_Core_RegAccFunctions */ -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions @{ */ -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + #define __SADD8 __sadd8 + #define __QADD8 __qadd8 + #define __SHADD8 __shadd8 + #define __UADD8 __uadd8 + #define __UQADD8 __uqadd8 + #define __UHADD8 __uhadd8 + #define __SSUB8 __ssub8 + #define __QSUB8 __qsub8 + #define __SHSUB8 __shsub8 + #define __USUB8 __usub8 + #define __UQSUB8 __uqsub8 + #define __UHSUB8 __uhsub8 + #define __SADD16 __sadd16 + #define __QADD16 __qadd16 + #define __SHADD16 __shadd16 + #define __UADD16 __uadd16 + #define __UQADD16 __uqadd16 + #define __UHADD16 __uhadd16 + #define __SSUB16 __ssub16 + #define __QSUB16 __qsub16 + #define __SHSUB16 __shsub16 + #define __USUB16 __usub16 + #define __UQSUB16 __uqsub16 + #define __UHSUB16 __uhsub16 + #define __SASX __sasx + #define __QASX __qasx + #define __SHASX __shasx + #define __UASX __uasx + #define __UQASX __uqasx + #define __UHASX __uhasx + #define __SSAX __ssax + #define __QSAX __qsax + #define __SHSAX __shsax + #define __USAX __usax + #define __UQSAX __uqsax + #define __UHSAX __uhsax + #define __USAD8 __usad8 + #define __USADA8 __usada8 + #define __SSAT16 __ssat16 + #define __USAT16 __usat16 + #define __UXTB16 __uxtb16 + #define __UXTAB16 __uxtab16 + #define __SXTB16 __sxtb16 + #define __SXTAB16 __sxtab16 + #define __SMUAD __smuad + #define __SMUADX __smuadx + #define __SMLAD __smlad + #define __SMLADX __smladx + #define __SMLALD __smlald + #define __SMLALDX __smlaldx + #define __SMUSD __smusd + #define __SMUSDX __smusdx + #define __SMLSD __smlsd + #define __SMLSDX __smlsdx + #define __SMLSLD __smlsld + #define __SMLSLDX __smlsldx + #define __SEL __sel + #define __QADD __qadd + #define __QSUB __qsub + + #define __PKHBT(ARG1,ARG2,ARG3) \ + __extension__ \ + ({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + + #define __PKHTB(ARG1,ARG2,ARG3) \ + __extension__ \ + ({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + + __STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) + { + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) + { + __ASM volatile("sxtb16 %0, %1, ROR %2" : "=r"(result) : "r"(op1), "i"(rotate)); + } + else + { + result = __SXTB16(__ROR(op1, rotate)); + } + return result; + } + + __STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) + { + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) + { + __ASM volatile("sxtab16 %0, %1, %2, ROR %3" : "=r"(result) : "r"(op1), "r"(op2), "i"(rotate)); + } + else + { + result = __SXTAB16(op1, __ROR(op2, rotate)); + } + return result; + } + + __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) + { + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return (result); + } +#endif /* (__ARM_FEATURE_DSP == 1) */ +/** @} end of group CMSIS_SIMD_intrinsics */ + +// Include the profile specific settings: +#if __ARM_ARCH_PROFILE == 'A' + #include "a-profile/cmsis_gcc_a.h" +#elif __ARM_ARCH_PROFILE == 'R' + #include "r-profile/cmsis_gcc_r.h" +#elif __ARM_ARCH_PROFILE == 'M' + #include "m-profile/cmsis_gcc_m.h" #else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) + #error "Unknown Arm architecture profile" #endif -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __ASM volatile ("nop") - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __ASM volatile ("wfi":::"memory") - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __ASM volatile ("wfe":::"memory") - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __ASM volatile ("sev") - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -__STATIC_FORCEINLINE void __ISB(void) -{ - __ASM volatile ("isb 0xF":::"memory"); -} - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -__STATIC_FORCEINLINE void __DSB(void) -{ - __ASM volatile ("dsb 0xF":::"memory"); -} - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -__STATIC_FORCEINLINE void __DMB(void) -{ - __ASM volatile ("dmb 0xF":::"memory"); -} - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) -{ -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) - return __builtin_bswap32(value); -#else - uint32_t result; - - __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -#endif -} - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) -{ - uint32_t result; - - __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -} - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) -{ -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - return (int16_t)__builtin_bswap16(value); -#else - int16_t result; - - __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -#endif -} - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) -{ - uint32_t result; - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) - __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); -#else - uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ - - result = value; /* r will be reversed bits of v; first get LSB of v */ - for (value >>= 1U; value != 0U; value >>= 1U) - { - result <<= 1U; - result |= value & 1U; - s--; - } - result <<= s; /* shift when v's highest bits are zero */ -#endif - return result; -} - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) -{ - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); -} - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) -{ - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) -{ - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return(result); -} - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); -} - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); -} - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return(result); -} - - -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -__STATIC_FORCEINLINE void __CLREX(void) -{ - __ASM volatile ("clrex" ::: "memory"); -} - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(ARG1, ARG2) \ -__extension__ \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(ARG1, ARG2) \ - __extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - - -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return(result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} - -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; -} - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; -} - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ - - -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); -} - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); -} - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); -} - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); -} - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); -} - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); -} - -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ - -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ - - -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) - -__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - - -__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - - -__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - - __ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -#define __SSAT16(ARG1, ARG2) \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - -#define __USAT16(ARG1, ARG2) \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - -__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) -{ - uint32_t result; - - __ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) -{ - uint32_t result; - - __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) -{ - uint32_t result; - - __ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) ); - - return result; -} - -__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - - __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - - __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) -{ - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); -#endif - - return(llr.w64); -} - -__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) -{ - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); -#endif - - return(llr.w64); -} - -__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - - __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - - __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) -{ - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); -#endif - - return(llr.w64); -} - -__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) -{ - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); -#endif - - return(llr.w64); -} - -__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) -{ - int32_t result; - - __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) -{ - int32_t result; - - __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - - -#define __PKHBT(ARG1,ARG2,ARG3) \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -#endif /* (__ARM_FEATURE_DSP == 1) */ -/*@} end of group CMSIS_SIMD_intrinsics */ - - -#pragma GCC diagnostic pop - #endif /* __CMSIS_GCC_H */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_iccarm.h b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_iccarm.h new file mode 100644 index 0000000..1628446 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_iccarm.h @@ -0,0 +1,571 @@ +/* + * Copyright (c) 2017-2024 IAR Systems + * Copyright (c) 2017-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS-Core(Generic) Compiler ICCARM (IAR Compiler for Arm) Header File + */ + +#ifndef __CMSIS_ICCARM_H +#define __CMSIS_ICCARM_H + +#pragma system_include + +#if (__VER__ >= 8000000) + #define __ICCARM_V8 1 +#else + #define __ICCARM_V8 0 +#endif + +#define __IAR_FT _Pragma("inline=forced") __intrinsic + +#ifndef __ASM + #define __ASM __asm +#endif + +#ifndef __INLINE + #define __INLINE inline +#endif + +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif + +#ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") +#endif + +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE +#endif + +#ifndef __NO_RETURN + #if defined(__cplusplus) && __cplusplus >= 201103L + #define __NO_RETURN [[noreturn]] + #elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L + #define __NO_RETURN _Noreturn + #else + #define __NO_RETURN _Pragma("object_attribute=__noreturn") + #endif +#endif + +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif + +#ifndef __USED + #if __ICCARM_V8 + #define __USED __attribute__((used)) + #else + #define __USED _Pragma("__root") + #endif +#endif + +#undef __WEAK /* undo the definition from DLib_Defaults.h */ +#ifndef __WEAK + #if __ICCARM_V8 + #define __WEAK __attribute__((weak)) + #else + #define __WEAK _Pragma("__weak") + #endif +#endif + +#ifndef __PACKED + #if __ICCARM_V8 + #define __PACKED __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED __packed + #endif +#endif + +#ifndef __PACKED_STRUCT + #if __ICCARM_V8 + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_STRUCT __packed struct + #endif +#endif + +#ifndef __PACKED_UNION + #if __ICCARM_V8 + #define __PACKED_UNION union __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_UNION __packed union + #endif +#endif + +#ifndef __UNALIGNED_UINT16_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint16_t __iar_uint16_read(void const *ptr) +{ + return *(__packed uint16_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) +#endif + + +#ifndef __UNALIGNED_UINT16_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) +{ + *(__packed uint16_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint32_t __iar_uint32_read(void const *ptr) +{ + return *(__packed uint32_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) +#endif + +#ifndef __UNALIGNED_UINT32_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) +{ + *(__packed uint32_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#endif + +#ifndef __ALIGNED + #if __ICCARM_V8 + #define __ALIGNED(x) __attribute__((aligned(x))) + #elif (__VER__ >= 7080000) + /* Needs IAR language extensions */ + #define __ALIGNED(x) __attribute__((aligned(x))) + #else + #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. + #define __ALIGNED(x) + #endif +#endif + +#ifndef __RESTRICT + #if __ICCARM_V8 + #define __RESTRICT __restrict + #else + /* Needs IAR language extensions */ + #define __RESTRICT restrict + #endif +#endif + +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".noinit"))) +#endif + +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) +#endif + +#if defined(__CLZ) + #undef __CLZ +#endif +#if defined(__REVSH) + #undef __REVSH +#endif +#if defined(__RBIT) + #undef __RBIT +#endif +#if defined(__SSAT) + #undef __SSAT +#endif +#if defined(__USAT) + #undef __USAT +#endif + +#include "iccarm_builtin.h" + +#define __disable_irq __iar_builtin_disable_interrupt +#define __enable_irq __iar_builtin_enable_interrupt +#define __arm_rsr __iar_builtin_rsr +#define __arm_wsr __iar_builtin_wsr + +#define __NOP __iar_builtin_no_operation +#define __WFI __iar_builtin_WFI +#define __WFE __iar_builtin_WFE +#define __ISB __iar_builtin_ISB +#define __SEV __iar_builtin_SEV +#define __DSB __iar_builtin_DSB +#define __DMB __iar_builtin_DMB +#define __REV __iar_builtin_REV +#define __REV16 __iar_builtin_REV16 +#define __ROR __iar_builtin_ROR +#define __RBIT __iar_builtin_RBIT +#define __CLZ __iar_builtin_CLZ + +__IAR_FT int16_t __REVSH(int16_t val) +{ + return (int16_t) __iar_builtin_REVSH(val); +} + + +#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) + +#if (__ARM_FEATURE_LDREX >= 1) + +/* + * __iar_builtin_CLREX can be reordered w.r.t. STREX during high optimizations. + * As a workaround we use inline assembly and a memory barrier. + * (IAR issue EWARM-11901) + * Fixed in EWARM 9.50.i2 + */ + +__IAR_FT void __CLREX() { + __ASM volatile("CLREX" ::: "memory"); +} + + +#define __LDREXB __iar_builtin_LDREXB +#define __STREXB __iar_builtin_STREXB + +#endif /* (__ARM_FEATURE_LDREX >= 1) */ + +#if (__ARM_FEATURE_LDREX >= 2) + +#define __LDREXH __iar_builtin_LDREXH +#define __STREXH __iar_builtin_STREXH + +#endif /* (__ARM_FEATURE_LDREX >= 2) */ + +#if (__ARM_FEATURE_LDREX >= 4) + +#define __LDREXW __iar_builtin_LDREX +#define __STREXW __iar_builtin_STREX + +#endif /* (__ARM_FEATURE_LDREX >= 4) */ + +#if ((__ARM_FEATURE_SAT >= 1) && \ + (__ARM_ARCH_ISA_THUMB >= 2) ) +/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __iar_builtin_SSAT + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value +*/ +#define __USAT __iar_builtin_USAT + +#else /* (__ARM_FEATURE_SAT >= 1) */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return (max); + } + else if (val < min) + { + return (min); + } + } + return (val); +} + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return (max); + } + else if (val < 0) + { + return (0U); + } + } + return ((uint32_t)val); +} +#endif /* (__ARM_FEATURE_SAT >= 1) */ + +#if (__ARM_ARCH_ISA_THUMB >= 2) + +#define __RRX __iar_builtin_RRX + +__IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) +{ + uint32_t res; + __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint8_t)res); +} + +__IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) +{ + uint32_t res; + __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint16_t)res); +} + +__IAR_FT uint32_t __LDRT(volatile uint32_t *addr) +{ + uint32_t res; + __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return res; +} + +__IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) +{ + __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); +} + +__IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) +{ + __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); +} + +__IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) +{ + __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); +} +#endif + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + #define __SADD8 __iar_builtin_SADD8 + #define __QADD8 __iar_builtin_QADD8 + #define __SHADD8 __iar_builtin_SHADD8 + #define __UADD8 __iar_builtin_UADD8 + #define __UQADD8 __iar_builtin_UQADD8 + #define __UHADD8 __iar_builtin_UHADD8 + #define __SSUB8 __iar_builtin_SSUB8 + #define __QSUB8 __iar_builtin_QSUB8 + #define __SHSUB8 __iar_builtin_SHSUB8 + #define __USUB8 __iar_builtin_USUB8 + #define __UQSUB8 __iar_builtin_UQSUB8 + #define __UHSUB8 __iar_builtin_UHSUB8 + #define __SADD16 __iar_builtin_SADD16 + #define __QADD16 __iar_builtin_QADD16 + #define __SHADD16 __iar_builtin_SHADD16 + #define __UADD16 __iar_builtin_UADD16 + #define __UQADD16 __iar_builtin_UQADD16 + #define __UHADD16 __iar_builtin_UHADD16 + #define __SSUB16 __iar_builtin_SSUB16 + #define __QSUB16 __iar_builtin_QSUB16 + #define __SHSUB16 __iar_builtin_SHSUB16 + #define __USUB16 __iar_builtin_USUB16 + #define __UQSUB16 __iar_builtin_UQSUB16 + #define __UHSUB16 __iar_builtin_UHSUB16 + #define __SASX __iar_builtin_SASX + #define __QASX __iar_builtin_QASX + #define __SHASX __iar_builtin_SHASX + #define __UASX __iar_builtin_UASX + #define __UQASX __iar_builtin_UQASX + #define __UHASX __iar_builtin_UHASX + #define __SSAX __iar_builtin_SSAX + #define __QSAX __iar_builtin_QSAX + #define __SHSAX __iar_builtin_SHSAX + #define __USAX __iar_builtin_USAX + #define __UQSAX __iar_builtin_UQSAX + #define __UHSAX __iar_builtin_UHSAX + #define __USAD8 __iar_builtin_USAD8 + #define __USADA8 __iar_builtin_USADA8 + #define __SSAT16 __iar_builtin_SSAT16 + #define __USAT16 __iar_builtin_USAT16 + #define __UXTB16 __iar_builtin_UXTB16 + #define __UXTAB16 __iar_builtin_UXTAB16 + #define __SXTB16 __iar_builtin_SXTB16 + #define __SXTAB16 __iar_builtin_SXTAB16 + #define __SMUAD __iar_builtin_SMUAD + #define __SMUADX __iar_builtin_SMUADX + #define __SMMLA __iar_builtin_SMMLA + #define __SMLAD __iar_builtin_SMLAD + #define __SMLADX __iar_builtin_SMLADX + #define __SMLALD __iar_builtin_SMLALD + #define __SMLALDX __iar_builtin_SMLALDX + #define __SMUSD __iar_builtin_SMUSD + #define __SMUSDX __iar_builtin_SMUSDX + #define __SMLSD __iar_builtin_SMLSD + #define __SMLSDX __iar_builtin_SMLSDX + #define __SMLSLD __iar_builtin_SMLSLD + #define __SMLSLDX __iar_builtin_SMLSLDX + #define __SEL __iar_builtin_SEL + #define __QADD __iar_builtin_QADD + #define __QSUB __iar_builtin_QSUB + #define __PKHBT __iar_builtin_PKHBT + #define __PKHTB __iar_builtin_PKHTB + + /* Note, these are suboptimal but I lack compiler features to express this */ + + #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + #define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +#endif /* (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) */ + +#if (defined (__ARM_FP) && (__ARM_FP >= 1)) + #define __get_FPSCR() (__arm_rsr("FPSCR")) + #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE))) +#else + #define __get_FPSCR() ( 0 ) + #define __set_FPSCR(VALUE) ((void)VALUE) +#endif + +#if (defined(__ARM_ARCH_ISA_THUMB) && __ARM_ARCH_ISA_THUMB >= 2) +// This is not really fault_irq on Cortex-not-M. However +// there seems to be code that assumes this. + __IAR_FT void __disable_fault_irq() + { + __ASM volatile ("CPSID F" ::: "memory"); + } + + __IAR_FT void __enable_fault_irq() + { + __ASM volatile ("CPSIE F" ::: "memory"); + } +#endif + +#if (__ARM_ARCH >= 8) + + __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) + { + __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) + { + __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) + { + __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXB %0, %2, [%1]" : "=&r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXH %0, %2, [%1]" : "=&r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEX %0, %2, [%1]" : "=&r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + +#endif /* __ARM_ARCH >= 8 */ + + #if __ARM_ARCH_PROFILE == 'A' + #include "a-profile/cmsis_iccarm_a.h" + #elif __ARM_ARCH_PROFILE == 'R' + #include "r-profile/cmsis_iccarm_r.h" + #elif __ARM_ARCH_PROFILE == 'M' + #include "m-profile/cmsis_iccarm_m.h" + #else + #error "Unknown Arm architecture profile" + #endif + + +#endif diff --git a/src/third_party/cmsis/CMSIS/Core/Include/cmsis_version.h b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_version.h new file mode 100644 index 0000000..849a8a4 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/cmsis_version.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Core Version Definitions + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CMSIS_VERSION_H +#define __CMSIS_VERSION_H + +/* CMSIS-Core(M) Version definitions */ +#define __CM_CMSIS_VERSION_MAIN ( 6U) /*!< \brief [31:16] CMSIS-Core(M) main version */ +#define __CM_CMSIS_VERSION_SUB ( 1U) /*!< \brief [15:0] CMSIS-Core(M) sub version */ +#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ + __CM_CMSIS_VERSION_SUB ) /*!< \brief CMSIS Core(M) version number */ + +/* CMSIS-Core(A) Version definitions */ +#define __CA_CMSIS_VERSION_MAIN ( 6U) /*!< \brief [31:16] CMSIS-Core(A) main version */ +#define __CA_CMSIS_VERSION_SUB ( 1U) /*!< \brief [15:0] CMSIS-Core(A) sub version */ +#define __CA_CMSIS_VERSION ((__CA_CMSIS_VERSION_MAIN << 16U) | \ + __CA_CMSIS_VERSION_SUB ) /*!< \brief CMSIS-Core(A) version number */ + +#endif diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_ca.h b/src/third_party/cmsis/CMSIS/Core/Include/core_ca.h new file mode 100644 index 0000000..5c50a79 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_ca.h @@ -0,0 +1,3000 @@ +/* + * Copyright (c) 2009-2023 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-A Core Peripheral Access Layer Header File + */ + +#ifndef __CORE_CA_H_GENERIC +#define __CORE_CA_H_GENERIC + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CA definitions */ + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TMS470__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CA_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CA_H_DEPENDANT +#define __CORE_CA_H_DEPENDANT + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifdef __cplusplus + extern "C" { +#endif + + /* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CA_REV + #define __CA_REV 0x0000U /*!< \brief Contains the core revision for a Cortex-A class device */ + #warning "__CA_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __GIC_PRESENT + #define __GIC_PRESENT 1U + #warning "__GIC_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __TIM_PRESENT + #define __TIM_PRESENT 1U + #warning "__TIM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __L2C_PRESENT + #define __L2C_PRESENT 0U + #warning "__L2C_PRESENT not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +#ifdef __cplusplus + #define __I volatile /*!< \brief Defines 'read only' permissions */ +#else + #define __I volatile const /*!< \brief Defines 'read only' permissions */ +#endif +#define __O volatile /*!< \brief Defines 'write only' permissions */ +#define __IO volatile /*!< \brief Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */ +#define __OM volatile /*!< \brief Defines 'write only' structure member permissions */ +#define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */ +#define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas + + /******************************************************************************* + * Register Abstraction + Core Register contain: + - CPSR + - CP15 Registers + - L2C-310 Cache Controller + - Generic Interrupt Controller Distributor + - Generic Interrupt Controller Interface + ******************************************************************************/ + +/* Core Register CPSR */ +typedef union +{ + struct + { + uint32_t M:5; /*!< \brief bit: 0.. 4 Mode field */ + uint32_t T:1; /*!< \brief bit: 5 Thumb execution state bit */ + uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */ + uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */ + uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */ + uint32_t IT1:6; /*!< \brief bit: 10..15 If-Then execution state bits 2-7 */ + uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */ + RESERVED(0:4, uint32_t) + uint32_t J:1; /*!< \brief bit: 24 Jazelle bit */ + uint32_t IT0:2; /*!< \brief bit: 25..26 If-Then execution state bits 0-1 */ + uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPSR_Type; + + + +/* CPSR Register Definitions */ +#define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */ +#define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */ + +#define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */ +#define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */ + +#define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */ +#define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */ + +#define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */ +#define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */ + +#define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */ +#define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */ + +#define CPSR_IT0_Pos 25U /*!< \brief CPSR: IT0 Position */ +#define CPSR_IT0_Msk (3UL << CPSR_IT0_Pos) /*!< \brief CPSR: IT0 Mask */ + +#define CPSR_J_Pos 24U /*!< \brief CPSR: J Position */ +#define CPSR_J_Msk (1UL << CPSR_J_Pos) /*!< \brief CPSR: J Mask */ + +#define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */ +#define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */ + +#define CPSR_IT1_Pos 10U /*!< \brief CPSR: IT1 Position */ +#define CPSR_IT1_Msk (0x3FUL << CPSR_IT1_Pos) /*!< \brief CPSR: IT1 Mask */ + +#define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */ +#define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */ + +#define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */ +#define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */ + +#define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */ +#define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */ + +#define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */ +#define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */ + +#define CPSR_T_Pos 5U /*!< \brief CPSR: T Position */ +#define CPSR_T_Msk (1UL << CPSR_T_Pos) /*!< \brief CPSR: T Mask */ + +#define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */ +#define CPSR_M_Msk (0x1FUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */ + +#define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */ +#define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */ +#define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */ +#define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */ +#define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */ +#define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */ +#define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */ +#define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */ +#define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */ + +/* CP15 Register SCTLR */ +typedef union +{ + struct + { + uint32_t M:1; /*!< \brief bit: 0 MMU enable */ + uint32_t A:1; /*!< \brief bit: 1 Alignment check enable */ + uint32_t C:1; /*!< \brief bit: 2 Cache enable */ + RESERVED(0:2, uint32_t) + uint32_t CP15BEN:1; /*!< \brief bit: 5 CP15 barrier enable */ + RESERVED(1:1, uint32_t) + uint32_t B:1; /*!< \brief bit: 7 Endianness model */ + RESERVED(2:2, uint32_t) + uint32_t SW:1; /*!< \brief bit: 10 SWP and SWPB enable */ + uint32_t Z:1; /*!< \brief bit: 11 Branch prediction enable */ + uint32_t I:1; /*!< \brief bit: 12 Instruction cache enable */ + uint32_t V:1; /*!< \brief bit: 13 Vectors bit */ + uint32_t RR:1; /*!< \brief bit: 14 Round Robin select */ + RESERVED(3:2, uint32_t) + uint32_t HA:1; /*!< \brief bit: 17 Hardware Access flag enable */ + RESERVED(4:1, uint32_t) + uint32_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */ + uint32_t UWXN:1; /*!< \brief bit: 20 Unprivileged write permission implies PL1 XN */ + uint32_t FI:1; /*!< \brief bit: 21 Fast interrupts configuration enable */ + uint32_t U:1; /*!< \brief bit: 22 Alignment model */ + RESERVED(5:1, uint32_t) + uint32_t VE:1; /*!< \brief bit: 24 Interrupt Vectors Enable */ + uint32_t EE:1; /*!< \brief bit: 25 Exception Endianness */ + RESERVED(6:1, uint32_t) + uint32_t NMFI:1; /*!< \brief bit: 27 Non-maskable FIQ (NMFI) support */ + uint32_t TRE:1; /*!< \brief bit: 28 TEX remap enable. */ + uint32_t AFE:1; /*!< \brief bit: 29 Access flag enable */ + uint32_t TE:1; /*!< \brief bit: 30 Thumb Exception enable */ + RESERVED(7:1, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} SCTLR_Type; + +#define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */ +#define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */ + +#define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */ +#define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */ + +#define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */ +#define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */ + +#define SCTLR_NMFI_Pos 27U /*!< \brief SCTLR: NMFI Position */ +#define SCTLR_NMFI_Msk (1UL << SCTLR_NMFI_Pos) /*!< \brief SCTLR: NMFI Mask */ + +#define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */ +#define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */ + +#define SCTLR_VE_Pos 24U /*!< \brief SCTLR: VE Position */ +#define SCTLR_VE_Msk (1UL << SCTLR_VE_Pos) /*!< \brief SCTLR: VE Mask */ + +#define SCTLR_U_Pos 22U /*!< \brief SCTLR: U Position */ +#define SCTLR_U_Msk (1UL << SCTLR_U_Pos) /*!< \brief SCTLR: U Mask */ + +#define SCTLR_FI_Pos 21U /*!< \brief SCTLR: FI Position */ +#define SCTLR_FI_Msk (1UL << SCTLR_FI_Pos) /*!< \brief SCTLR: FI Mask */ + +#define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */ +#define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */ + +#define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */ +#define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */ + +#define SCTLR_HA_Pos 17U /*!< \brief SCTLR: HA Position */ +#define SCTLR_HA_Msk (1UL << SCTLR_HA_Pos) /*!< \brief SCTLR: HA Mask */ + +#define SCTLR_RR_Pos 14U /*!< \brief SCTLR: RR Position */ +#define SCTLR_RR_Msk (1UL << SCTLR_RR_Pos) /*!< \brief SCTLR: RR Mask */ + +#define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */ +#define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */ + +#define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */ +#define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */ + +#define SCTLR_Z_Pos 11U /*!< \brief SCTLR: Z Position */ +#define SCTLR_Z_Msk (1UL << SCTLR_Z_Pos) /*!< \brief SCTLR: Z Mask */ + +#define SCTLR_SW_Pos 10U /*!< \brief SCTLR: SW Position */ +#define SCTLR_SW_Msk (1UL << SCTLR_SW_Pos) /*!< \brief SCTLR: SW Mask */ + +#define SCTLR_B_Pos 7U /*!< \brief SCTLR: B Position */ +#define SCTLR_B_Msk (1UL << SCTLR_B_Pos) /*!< \brief SCTLR: B Mask */ + +#define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */ +#define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */ + +#define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */ +#define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */ + +#define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */ +#define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */ + +#define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */ +#define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */ + +/* CP15 Register ACTLR */ +typedef union +{ +#if __CORTEX_A == 5 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A5 */ + struct + { + uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */ + RESERVED(0:5, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */ + RESERVED(1:2, uint32_t) + uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */ + uint32_t DWBST:1; /*!< \brief bit: 11 AXI data write bursts to Normal memory */ + uint32_t RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */ + uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */ + uint32_t BP:2; /*!< \brief bit:16..15 Branch prediction policy */ + uint32_t RSDIS:1; /*!< \brief bit: 17 Disable return stack operation */ + uint32_t BTDIS:1; /*!< \brief bit: 18 Disable indirect Branch Target Address Cache (BTAC) */ + RESERVED(3:9, uint32_t) + uint32_t DBDI:1; /*!< \brief bit: 28 Disable branch dual issue */ + RESERVED(7:3, uint32_t) + } b; +#endif +#if __CORTEX_A == 7 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A7 */ + struct + { + RESERVED(0:6, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + RESERVED(1:3, uint32_t) + uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */ + uint32_t L2RADIS:1; /*!< \brief bit: 11 L2 Data Cache read-allocate mode disable */ + uint32_t L1RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */ + uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */ + uint32_t DDVM:1; /*!< \brief bit: 15 Disable Distributed Virtual Memory (DVM) transactions */ + RESERVED(3:12, uint32_t) + uint32_t DDI:1; /*!< \brief bit: 28 Disable dual issue */ + RESERVED(7:3, uint32_t) + } b; +#endif +#if __CORTEX_A == 9 || defined(DOXYGEN) + /** \brief Structure used for bit access on Cortex-A9 */ + struct + { + uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */ + RESERVED(0:1, uint32_t) + uint32_t L1PE:1; /*!< \brief bit: 2 Dside prefetch */ + uint32_t WFLZM:1; /*!< \brief bit: 3 Cache and TLB maintenance broadcast */ + RESERVED(1:2, uint32_t) + uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */ + uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */ + uint32_t AOW:1; /*!< \brief bit: 8 Enable allocation in one cache way only */ + uint32_t PARITY:1; /*!< \brief bit: 9 Support for parity checking, if implemented */ + RESERVED(7:22, uint32_t) + } b; +#endif + uint32_t w; /*!< \brief Type used for word access */ +} ACTLR_Type; + +#define ACTLR_DDI_Pos 28U /*!< \brief ACTLR: DDI Position */ +#define ACTLR_DDI_Msk (1UL << ACTLR_DDI_Pos) /*!< \brief ACTLR: DDI Mask */ + +#define ACTLR_DBDI_Pos 28U /*!< \brief ACTLR: DBDI Position */ +#define ACTLR_DBDI_Msk (1UL << ACTLR_DBDI_Pos) /*!< \brief ACTLR: DBDI Mask */ + +#define ACTLR_BTDIS_Pos 18U /*!< \brief ACTLR: BTDIS Position */ +#define ACTLR_BTDIS_Msk (1UL << ACTLR_BTDIS_Pos) /*!< \brief ACTLR: BTDIS Mask */ + +#define ACTLR_RSDIS_Pos 17U /*!< \brief ACTLR: RSDIS Position */ +#define ACTLR_RSDIS_Msk (1UL << ACTLR_RSDIS_Pos) /*!< \brief ACTLR: RSDIS Mask */ + +#define ACTLR_BP_Pos 15U /*!< \brief ACTLR: BP Position */ +#define ACTLR_BP_Msk (3UL << ACTLR_BP_Pos) /*!< \brief ACTLR: BP Mask */ + +#define ACTLR_DDVM_Pos 15U /*!< \brief ACTLR: DDVM Position */ +#define ACTLR_DDVM_Msk (1UL << ACTLR_DDVM_Pos) /*!< \brief ACTLR: DDVM Mask */ + +#define ACTLR_L1PCTL_Pos 13U /*!< \brief ACTLR: L1PCTL Position */ +#define ACTLR_L1PCTL_Msk (3UL << ACTLR_L1PCTL_Pos) /*!< \brief ACTLR: L1PCTL Mask */ + +#define ACTLR_RADIS_Pos 12U /*!< \brief ACTLR: RADIS Position */ +#define ACTLR_RADIS_Msk (1UL << ACTLR_RADIS_Pos) /*!< \brief ACTLR: RADIS Mask */ + +#define ACTLR_L1RADIS_Pos 12U /*!< \brief ACTLR: L1RADIS Position */ +#define ACTLR_L1RADIS_Msk (1UL << ACTLR_L1RADIS_Pos) /*!< \brief ACTLR: L1RADIS Mask */ + +#define ACTLR_DWBST_Pos 11U /*!< \brief ACTLR: DWBST Position */ +#define ACTLR_DWBST_Msk (1UL << ACTLR_DWBST_Pos) /*!< \brief ACTLR: DWBST Mask */ + +#define ACTLR_L2RADIS_Pos 11U /*!< \brief ACTLR: L2RADIS Position */ +#define ACTLR_L2RADIS_Msk (1UL << ACTLR_L2RADIS_Pos) /*!< \brief ACTLR: L2RADIS Mask */ + +#define ACTLR_DODMBS_Pos 10U /*!< \brief ACTLR: DODMBS Position */ +#define ACTLR_DODMBS_Msk (1UL << ACTLR_DODMBS_Pos) /*!< \brief ACTLR: DODMBS Mask */ + +#define ACTLR_PARITY_Pos 9U /*!< \brief ACTLR: PARITY Position */ +#define ACTLR_PARITY_Msk (1UL << ACTLR_PARITY_Pos) /*!< \brief ACTLR: PARITY Mask */ + +#define ACTLR_AOW_Pos 8U /*!< \brief ACTLR: AOW Position */ +#define ACTLR_AOW_Msk (1UL << ACTLR_AOW_Pos) /*!< \brief ACTLR: AOW Mask */ + +#define ACTLR_EXCL_Pos 7U /*!< \brief ACTLR: EXCL Position */ +#define ACTLR_EXCL_Msk (1UL << ACTLR_EXCL_Pos) /*!< \brief ACTLR: EXCL Mask */ + +#define ACTLR_SMP_Pos 6U /*!< \brief ACTLR: SMP Position */ +#define ACTLR_SMP_Msk (1UL << ACTLR_SMP_Pos) /*!< \brief ACTLR: SMP Mask */ + +#define ACTLR_WFLZM_Pos 3U /*!< \brief ACTLR: WFLZM Position */ +#define ACTLR_WFLZM_Msk (1UL << ACTLR_WFLZM_Pos) /*!< \brief ACTLR: WFLZM Mask */ + +#define ACTLR_L1PE_Pos 2U /*!< \brief ACTLR: L1PE Position */ +#define ACTLR_L1PE_Msk (1UL << ACTLR_L1PE_Pos) /*!< \brief ACTLR: L1PE Mask */ + +#define ACTLR_FW_Pos 0U /*!< \brief ACTLR: FW Position */ +#define ACTLR_FW_Msk (1UL << ACTLR_FW_Pos) /*!< \brief ACTLR: FW Mask */ + +/* CP15 Register CPACR */ +typedef union +{ + struct + { + uint32_t CP0:2; /*!< \brief bit: 0..1 Access rights for coprocessor 0 */ + uint32_t CP1:2; /*!< \brief bit: 2..3 Access rights for coprocessor 1 */ + uint32_t CP2:2; /*!< \brief bit: 4..5 Access rights for coprocessor 2 */ + uint32_t CP3:2; /*!< \brief bit: 6..7 Access rights for coprocessor 3 */ + uint32_t CP4:2; /*!< \brief bit: 8..9 Access rights for coprocessor 4 */ + uint32_t CP5:2; /*!< \brief bit:10..11 Access rights for coprocessor 5 */ + uint32_t CP6:2; /*!< \brief bit:12..13 Access rights for coprocessor 6 */ + uint32_t CP7:2; /*!< \brief bit:14..15 Access rights for coprocessor 7 */ + uint32_t CP8:2; /*!< \brief bit:16..17 Access rights for coprocessor 8 */ + uint32_t CP9:2; /*!< \brief bit:18..19 Access rights for coprocessor 9 */ + uint32_t CP10:2; /*!< \brief bit:20..21 Access rights for coprocessor 10 */ + uint32_t CP11:2; /*!< \brief bit:22..23 Access rights for coprocessor 11 */ + uint32_t CP12:2; /*!< \brief bit:24..25 Access rights for coprocessor 11 */ + uint32_t CP13:2; /*!< \brief bit:26..27 Access rights for coprocessor 11 */ + uint32_t TRCDIS:1; /*!< \brief bit: 28 Disable CP14 access to trace registers */ + RESERVED(0:1, uint32_t) + uint32_t D32DIS:1; /*!< \brief bit: 30 Disable use of registers D16-D31 of the VFP register file */ + uint32_t ASEDIS:1; /*!< \brief bit: 31 Disable Advanced SIMD Functionality */ + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CPACR_Type; + +#define CPACR_ASEDIS_Pos 31U /*!< \brief CPACR: ASEDIS Position */ +#define CPACR_ASEDIS_Msk (1UL << CPACR_ASEDIS_Pos) /*!< \brief CPACR: ASEDIS Mask */ + +#define CPACR_D32DIS_Pos 30U /*!< \brief CPACR: D32DIS Position */ +#define CPACR_D32DIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */ + +#define CPACR_TRCDIS_Pos 28U /*!< \brief CPACR: D32DIS Position */ +#define CPACR_TRCDIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */ + +#define CPACR_CP_Pos_(n) (n*2U) /*!< \brief CPACR: CPn Position */ +#define CPACR_CP_Msk_(n) (3UL << CPACR_CP_Pos_(n)) /*!< \brief CPACR: CPn Mask */ + +#define CPACR_CP_NA 0U /*!< \brief CPACR CPn field: Access denied. */ +#define CPACR_CP_PL1 1U /*!< \brief CPACR CPn field: Accessible from PL1 only. */ +#define CPACR_CP_FA 3U /*!< \brief CPACR CPn field: Full access. */ + +/* CP15 Register DFSR */ +typedef union +{ + struct + { + uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */ + uint32_t Domain:4; /*!< \brief bit: 4.. 7 Fault on which domain */ + RESERVED(0:1, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */ + uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */ + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */ + RESERVED(1:18, uint32_t) + } s; /*!< \brief Structure used for bit access in short format */ + struct + { + uint32_t STATUS:5; /*!< \brief bit: 0.. 5 Fault Status bits */ + RESERVED(0:3, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + RESERVED(1:1, uint32_t) + uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */ + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */ + RESERVED(2:18, uint32_t) + } l; /*!< \brief Structure used for bit access in long format */ + uint32_t w; /*!< \brief Type used for word access */ +} DFSR_Type; + +#define DFSR_CM_Pos 13U /*!< \brief DFSR: CM Position */ +#define DFSR_CM_Msk (1UL << DFSR_CM_Pos) /*!< \brief DFSR: CM Mask */ + +#define DFSR_Ext_Pos 12U /*!< \brief DFSR: Ext Position */ +#define DFSR_Ext_Msk (1UL << DFSR_Ext_Pos) /*!< \brief DFSR: Ext Mask */ + +#define DFSR_WnR_Pos 11U /*!< \brief DFSR: WnR Position */ +#define DFSR_WnR_Msk (1UL << DFSR_WnR_Pos) /*!< \brief DFSR: WnR Mask */ + +#define DFSR_FS1_Pos 10U /*!< \brief DFSR: FS1 Position */ +#define DFSR_FS1_Msk (1UL << DFSR_FS1_Pos) /*!< \brief DFSR: FS1 Mask */ + +#define DFSR_LPAE_Pos 9U /*!< \brief DFSR: LPAE Position */ +#define DFSR_LPAE_Msk (1UL << DFSR_LPAE_Pos) /*!< \brief DFSR: LPAE Mask */ + +#define DFSR_Domain_Pos 4U /*!< \brief DFSR: Domain Position */ +#define DFSR_Domain_Msk (0xFUL << DFSR_Domain_Pos) /*!< \brief DFSR: Domain Mask */ + +#define DFSR_FS0_Pos 0U /*!< \brief DFSR: FS0 Position */ +#define DFSR_FS0_Msk (0xFUL << DFSR_FS0_Pos) /*!< \brief DFSR: FS0 Mask */ + +#define DFSR_STATUS_Pos 0U /*!< \brief DFSR: STATUS Position */ +#define DFSR_STATUS_Msk (0x3FUL << DFSR_STATUS_Pos) /*!< \brief DFSR: STATUS Mask */ + +/* CP15 Register IFSR */ +typedef union +{ + struct + { + uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */ + RESERVED(0:5, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */ + RESERVED(1:1, uint32_t) + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + RESERVED(2:19, uint32_t) + } s; /*!< \brief Structure used for bit access in short format */ + struct + { + uint32_t STATUS:6; /*!< \brief bit: 0.. 5 Fault Status bits */ + RESERVED(0:3, uint32_t) + uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */ + RESERVED(1:2, uint32_t) + uint32_t ExT:1; /*!< \brief bit: 12 External abort type */ + RESERVED(2:19, uint32_t) + } l; /*!< \brief Structure used for bit access in long format */ + uint32_t w; /*!< \brief Type used for word access */ +} IFSR_Type; + +#define IFSR_ExT_Pos 12U /*!< \brief IFSR: ExT Position */ +#define IFSR_ExT_Msk (1UL << IFSR_ExT_Pos) /*!< \brief IFSR: ExT Mask */ + +#define IFSR_FS1_Pos 10U /*!< \brief IFSR: FS1 Position */ +#define IFSR_FS1_Msk (1UL << IFSR_FS1_Pos) /*!< \brief IFSR: FS1 Mask */ + +#define IFSR_LPAE_Pos 9U /*!< \brief IFSR: LPAE Position */ +#define IFSR_LPAE_Msk (0x1UL << IFSR_LPAE_Pos) /*!< \brief IFSR: LPAE Mask */ + +#define IFSR_FS0_Pos 0U /*!< \brief IFSR: FS0 Position */ +#define IFSR_FS0_Msk (0xFUL << IFSR_FS0_Pos) /*!< \brief IFSR: FS0 Mask */ + +#define IFSR_STATUS_Pos 0U /*!< \brief IFSR: STATUS Position */ +#define IFSR_STATUS_Msk (0x3FUL << IFSR_STATUS_Pos) /*!< \brief IFSR: STATUS Mask */ + +/* CP15 Register ISR */ +typedef union +{ + struct + { + RESERVED(0:6, uint32_t) + uint32_t F:1; /*!< \brief bit: 6 FIQ pending bit */ + uint32_t I:1; /*!< \brief bit: 7 IRQ pending bit */ + uint32_t A:1; /*!< \brief bit: 8 External abort pending bit */ + RESERVED(1:23, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} ISR_Type; + +#define ISR_A_Pos 13U /*!< \brief ISR: A Position */ +#define ISR_A_Msk (1UL << ISR_A_Pos) /*!< \brief ISR: A Mask */ + +#define ISR_I_Pos 12U /*!< \brief ISR: I Position */ +#define ISR_I_Msk (1UL << ISR_I_Pos) /*!< \brief ISR: I Mask */ + +#define ISR_F_Pos 11U /*!< \brief ISR: F Position */ +#define ISR_F_Msk (1UL << ISR_F_Pos) /*!< \brief ISR: F Mask */ + +/* DACR Register */ +#define DACR_D_Pos_(n) (2U*n) /*!< \brief DACR: Dn Position */ +#define DACR_D_Msk_(n) (3UL << DACR_D_Pos_(n)) /*!< \brief DACR: Dn Mask */ +#define DACR_Dn_NOACCESS 0U /*!< \brief DACR Dn field: No access */ +#define DACR_Dn_CLIENT 1U /*!< \brief DACR Dn field: Client */ +#define DACR_Dn_MANAGER 3U /*!< \brief DACR Dn field: Manager */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param [in] field Name of the register bit field. + \param [in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param [in] field Name of the register bit field. + \param [in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + + +/** + \brief Union type to access the L2C_310 Cache Controller. +*/ +#if (defined(__L2C_PRESENT) && (__L2C_PRESENT == 1U)) || \ + defined(DOXYGEN) +typedef struct +{ + __IM uint32_t CACHE_ID; /*!< \brief Offset: 0x0000 (R/ ) Cache ID Register */ + __IM uint32_t CACHE_TYPE; /*!< \brief Offset: 0x0004 (R/ ) Cache Type Register */ + RESERVED(0[0x3e], uint32_t) + __IOM uint32_t CONTROL; /*!< \brief Offset: 0x0100 (R/W) Control Register */ + __IOM uint32_t AUX_CNT; /*!< \brief Offset: 0x0104 (R/W) Auxiliary Control */ + RESERVED(1[0x3e], uint32_t) + __IOM uint32_t EVENT_CONTROL; /*!< \brief Offset: 0x0200 (R/W) Event Counter Control */ + __IOM uint32_t EVENT_COUNTER1_CONF; /*!< \brief Offset: 0x0204 (R/W) Event Counter 1 Configuration */ + __IOM uint32_t EVENT_COUNTER0_CONF; /*!< \brief Offset: 0x0208 (R/W) Event Counter 1 Configuration */ + RESERVED(2[0x2], uint32_t) + __IOM uint32_t INTERRUPT_MASK; /*!< \brief Offset: 0x0214 (R/W) Interrupt Mask */ + __IM uint32_t MASKED_INT_STATUS; /*!< \brief Offset: 0x0218 (R/ ) Masked Interrupt Status */ + __IM uint32_t RAW_INT_STATUS; /*!< \brief Offset: 0x021c (R/ ) Raw Interrupt Status */ + __OM uint32_t INTERRUPT_CLEAR; /*!< \brief Offset: 0x0220 ( /W) Interrupt Clear */ + RESERVED(3[0x143], uint32_t) + __IOM uint32_t CACHE_SYNC; /*!< \brief Offset: 0x0730 (R/W) Cache Sync */ + RESERVED(4[0xf], uint32_t) + __IOM uint32_t INV_LINE_PA; /*!< \brief Offset: 0x0770 (R/W) Invalidate Line By PA */ + RESERVED(6[2], uint32_t) + __IOM uint32_t INV_WAY; /*!< \brief Offset: 0x077c (R/W) Invalidate by Way */ + RESERVED(5[0xc], uint32_t) + __IOM uint32_t CLEAN_LINE_PA; /*!< \brief Offset: 0x07b0 (R/W) Clean Line by PA */ + RESERVED(7[1], uint32_t) + __IOM uint32_t CLEAN_LINE_INDEX_WAY; /*!< \brief Offset: 0x07b8 (R/W) Clean Line by Index/Way */ + __IOM uint32_t CLEAN_WAY; /*!< \brief Offset: 0x07bc (R/W) Clean by Way */ + RESERVED(8[0xc], uint32_t) + __IOM uint32_t CLEAN_INV_LINE_PA; /*!< \brief Offset: 0x07f0 (R/W) Clean and Invalidate Line by PA */ + RESERVED(9[1], uint32_t) + __IOM uint32_t CLEAN_INV_LINE_INDEX_WAY; /*!< \brief Offset: 0x07f8 (R/W) Clean and Invalidate Line by Index/Way */ + __IOM uint32_t CLEAN_INV_WAY; /*!< \brief Offset: 0x07fc (R/W) Clean and Invalidate by Way */ + RESERVED(10[0x40], uint32_t) + __IOM uint32_t DATA_LOCK_0_WAY; /*!< \brief Offset: 0x0900 (R/W) Data Lockdown 0 by Way */ + __IOM uint32_t INST_LOCK_0_WAY; /*!< \brief Offset: 0x0904 (R/W) Instruction Lockdown 0 by Way */ + __IOM uint32_t DATA_LOCK_1_WAY; /*!< \brief Offset: 0x0908 (R/W) Data Lockdown 1 by Way */ + __IOM uint32_t INST_LOCK_1_WAY; /*!< \brief Offset: 0x090c (R/W) Instruction Lockdown 1 by Way */ + __IOM uint32_t DATA_LOCK_2_WAY; /*!< \brief Offset: 0x0910 (R/W) Data Lockdown 2 by Way */ + __IOM uint32_t INST_LOCK_2_WAY; /*!< \brief Offset: 0x0914 (R/W) Instruction Lockdown 2 by Way */ + __IOM uint32_t DATA_LOCK_3_WAY; /*!< \brief Offset: 0x0918 (R/W) Data Lockdown 3 by Way */ + __IOM uint32_t INST_LOCK_3_WAY; /*!< \brief Offset: 0x091c (R/W) Instruction Lockdown 3 by Way */ + __IOM uint32_t DATA_LOCK_4_WAY; /*!< \brief Offset: 0x0920 (R/W) Data Lockdown 4 by Way */ + __IOM uint32_t INST_LOCK_4_WAY; /*!< \brief Offset: 0x0924 (R/W) Instruction Lockdown 4 by Way */ + __IOM uint32_t DATA_LOCK_5_WAY; /*!< \brief Offset: 0x0928 (R/W) Data Lockdown 5 by Way */ + __IOM uint32_t INST_LOCK_5_WAY; /*!< \brief Offset: 0x092c (R/W) Instruction Lockdown 5 by Way */ + __IOM uint32_t DATA_LOCK_6_WAY; /*!< \brief Offset: 0x0930 (R/W) Data Lockdown 5 by Way */ + __IOM uint32_t INST_LOCK_6_WAY; /*!< \brief Offset: 0x0934 (R/W) Instruction Lockdown 5 by Way */ + __IOM uint32_t DATA_LOCK_7_WAY; /*!< \brief Offset: 0x0938 (R/W) Data Lockdown 6 by Way */ + __IOM uint32_t INST_LOCK_7_WAY; /*!< \brief Offset: 0x093c (R/W) Instruction Lockdown 6 by Way */ + RESERVED(11[0x4], uint32_t) + __IOM uint32_t LOCK_LINE_EN; /*!< \brief Offset: 0x0950 (R/W) Lockdown by Line Enable */ + __IOM uint32_t UNLOCK_ALL_BY_WAY; /*!< \brief Offset: 0x0954 (R/W) Unlock All Lines by Way */ + RESERVED(12[0xaa], uint32_t) + __IOM uint32_t ADDRESS_FILTER_START; /*!< \brief Offset: 0x0c00 (R/W) Address Filtering Start */ + __IOM uint32_t ADDRESS_FILTER_END; /*!< \brief Offset: 0x0c04 (R/W) Address Filtering End */ + RESERVED(13[0xce], uint32_t) + __IOM uint32_t DEBUG_CONTROL; /*!< \brief Offset: 0x0f40 (R/W) Debug Control Register */ +} L2C_310_TypeDef; + +#define L2C_310 ((L2C_310_TypeDef *)L2C_310_BASE) /*!< \brief L2C_310 register set access pointer */ +#endif + +#if (defined(__GIC_PRESENT) && (__GIC_PRESENT == 1U)) || \ + defined(DOXYGEN) + +/** \brief Structure type to access the Generic Interrupt Controller Distributor (GICD) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) Distributor Control Register */ + __IM uint32_t TYPER; /*!< \brief Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IM uint32_t IIDR; /*!< \brief Offset: 0x008 (R/ ) Distributor Implementer Identification Register */ + RESERVED(0, uint32_t) + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x010 (R/W) Error Reporting Status Register, optional */ + RESERVED(1[11], uint32_t) + __OM uint32_t SETSPI_NSR; /*!< \brief Offset: 0x040 ( /W) Set SPI Register */ + RESERVED(2, uint32_t) + __OM uint32_t CLRSPI_NSR; /*!< \brief Offset: 0x048 ( /W) Clear SPI Register */ + RESERVED(3, uint32_t) + __OM uint32_t SETSPI_SR; /*!< \brief Offset: 0x050 ( /W) Set SPI, Secure Register */ + RESERVED(4, uint32_t) + __OM uint32_t CLRSPI_SR; /*!< \brief Offset: 0x058 ( /W) Clear SPI, Secure Register */ + RESERVED(5[9], uint32_t) + __IOM uint32_t IGROUPR[32]; /*!< \brief Offset: 0x080 (R/W) Interrupt Group Registers */ + __IOM uint32_t ISENABLER[32]; /*!< \brief Offset: 0x100 (R/W) Interrupt Set-Enable Registers */ + __IOM uint32_t ICENABLER[32]; /*!< \brief Offset: 0x180 (R/W) Interrupt Clear-Enable Registers */ + __IOM uint32_t ISPENDR[32]; /*!< \brief Offset: 0x200 (R/W) Interrupt Set-Pending Registers */ + __IOM uint32_t ICPENDR[32]; /*!< \brief Offset: 0x280 (R/W) Interrupt Clear-Pending Registers */ + __IOM uint32_t ISACTIVER[32]; /*!< \brief Offset: 0x300 (R/W) Interrupt Set-Active Registers */ + __IOM uint32_t ICACTIVER[32]; /*!< \brief Offset: 0x380 (R/W) Interrupt Clear-Active Registers */ + __IOM uint32_t IPRIORITYR[255]; /*!< \brief Offset: 0x400 (R/W) Interrupt Priority Registers */ + RESERVED(6, uint32_t) + __IOM uint32_t ITARGETSR[255]; /*!< \brief Offset: 0x800 (R/W) Interrupt Targets Registers */ + RESERVED(7, uint32_t) + __IOM uint32_t ICFGR[64]; /*!< \brief Offset: 0xC00 (R/W) Interrupt Configuration Registers */ + __IOM uint32_t IGRPMODR[32]; /*!< \brief Offset: 0xD00 (R/W) Interrupt Group Modifier Registers */ + RESERVED(8[32], uint32_t) + __IOM uint32_t NSACR[64]; /*!< \brief Offset: 0xE00 (R/W) Non-secure Access Control Registers */ + __OM uint32_t SGIR; /*!< \brief Offset: 0xF00 ( /W) Software Generated Interrupt Register */ + RESERVED(9[3], uint32_t) + __IOM uint32_t CPENDSGIR[4]; /*!< \brief Offset: 0xF10 (R/W) SGI Clear-Pending Registers */ + __IOM uint32_t SPENDSGIR[4]; /*!< \brief Offset: 0xF20 (R/W) SGI Set-Pending Registers */ + RESERVED(10[5236], uint32_t) + __IOM uint64_t IROUTER[988]; /*!< \brief Offset: 0x6100(R/W) Interrupt Routing Registers */ +} GICDistributor_Type; + +#define GICDistributor ((GICDistributor_Type *) GIC_DISTRIBUTOR_BASE ) /*!< \brief GIC Distributor register set access pointer */ + +/* GICDistributor CTLR Register */ +#define GICDistributor_CTLR_EnableGrp0_Pos 0U /*!< GICDistributor CTLR: EnableGrp0 Position */ +#define GICDistributor_CTLR_EnableGrp0_Msk (0x1U /*<< GICDistributor_CTLR_EnableGrp0_Pos*/) /*!< GICDistributor CTLR: EnableGrp0 Mask */ +#define GICDistributor_CTLR_EnableGrp0(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CTLR_EnableGrp0_Pos*/)) & GICDistributor_CTLR_EnableGrp0_Msk) + +#define GICDistributor_CTLR_EnableGrp1_Pos 1U /*!< GICDistributor CTLR: EnableGrp1 Position */ +#define GICDistributor_CTLR_EnableGrp1_Msk (0x1U << GICDistributor_CTLR_EnableGrp1_Pos) /*!< GICDistributor CTLR: EnableGrp1 Mask */ +#define GICDistributor_CTLR_EnableGrp1(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_EnableGrp1_Pos)) & GICDistributor_CTLR_EnableGrp1_Msk) + +#define GICDistributor_CTLR_ARE_Pos 4U /*!< GICDistributor CTLR: ARE Position */ +#define GICDistributor_CTLR_ARE_Msk (0x1U << GICDistributor_CTLR_ARE_Pos) /*!< GICDistributor CTLR: ARE Mask */ +#define GICDistributor_CTLR_ARE(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_ARE_Pos)) & GICDistributor_CTLR_ARE_Msk) + +#define GICDistributor_CTLR_DC_Pos 6U /*!< GICDistributor CTLR: DC Position */ +#define GICDistributor_CTLR_DC_Msk (0x1U << GICDistributor_CTLR_DC_Pos) /*!< GICDistributor CTLR: DC Mask */ +#define GICDistributor_CTLR_DC(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_DC_Pos)) & GICDistributor_CTLR_DC_Msk) + +#define GICDistributor_CTLR_EINWF_Pos 7U /*!< GICDistributor CTLR: EINWF Position */ +#define GICDistributor_CTLR_EINWF_Msk (0x1U << GICDistributor_CTLR_EINWF_Pos) /*!< GICDistributor CTLR: EINWF Mask */ +#define GICDistributor_CTLR_EINWF(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_EINWF_Pos)) & GICDistributor_CTLR_EINWF_Msk) + +#define GICDistributor_CTLR_RWP_Pos 31U /*!< GICDistributor CTLR: RWP Position */ +#define GICDistributor_CTLR_RWP_Msk (0x1U << GICDistributor_CTLR_RWP_Pos) /*!< GICDistributor CTLR: RWP Mask */ +#define GICDistributor_CTLR_RWP(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_CTLR_RWP_Pos)) & GICDistributor_CTLR_RWP_Msk) + +/* GICDistributor TYPER Register */ +#define GICDistributor_TYPER_ITLinesNumber_Pos 0U /*!< GICDistributor TYPER: ITLinesNumber Position */ +#define GICDistributor_TYPER_ITLinesNumber_Msk (0x1FU /*<< GICDistributor_TYPER_ITLinesNumber_Pos*/) /*!< GICDistributor TYPER: ITLinesNumber Mask */ +#define GICDistributor_TYPER_ITLinesNumber(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_TYPER_ITLinesNumber_Pos*/)) & GICDistributor_CTLR_ITLinesNumber_Msk) + +#define GICDistributor_TYPER_CPUNumber_Pos 5U /*!< GICDistributor TYPER: CPUNumber Position */ +#define GICDistributor_TYPER_CPUNumber_Msk (0x7U << GICDistributor_TYPER_CPUNumber_Pos) /*!< GICDistributor TYPER: CPUNumber Mask */ +#define GICDistributor_TYPER_CPUNumber(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_CPUNumber_Pos)) & GICDistributor_TYPER_CPUNumber_Msk) + +#define GICDistributor_TYPER_SecurityExtn_Pos 10U /*!< GICDistributor TYPER: SecurityExtn Position */ +#define GICDistributor_TYPER_SecurityExtn_Msk (0x1U << GICDistributor_TYPER_SecurityExtn_Pos) /*!< GICDistributor TYPER: SecurityExtn Mask */ +#define GICDistributor_TYPER_SecurityExtn(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_SecurityExtn_Pos)) & GICDistributor_TYPER_SecurityExtn_Msk) + +#define GICDistributor_TYPER_LSPI_Pos 11U /*!< GICDistributor TYPER: LSPI Position */ +#define GICDistributor_TYPER_LSPI_Msk (0x1FU << GICDistributor_TYPER_LSPI_Pos) /*!< GICDistributor TYPER: LSPI Mask */ +#define GICDistributor_TYPER_LSPI(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_TYPER_LSPI_Pos)) & GICDistributor_TYPER_LSPI_Msk) + +/* GICDistributor IIDR Register */ +#define GICDistributor_IIDR_Implementer_Pos 0U /*!< GICDistributor IIDR: Implementer Position */ +#define GICDistributor_IIDR_Implementer_Msk (0xFFFU /*<< GICDistributor_IIDR_Implementer_Pos*/) /*!< GICDistributor IIDR: Implementer Mask */ +#define GICDistributor_IIDR_Implementer(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_IIDR_Implementer_Pos*/)) & GICDistributor_IIDR_Implementer_Msk) + +#define GICDistributor_IIDR_Revision_Pos 12U /*!< GICDistributor IIDR: Revision Position */ +#define GICDistributor_IIDR_Revision_Msk (0xFU << GICDistributor_IIDR_Revision_Pos) /*!< GICDistributor IIDR: Revision Mask */ +#define GICDistributor_IIDR_Revision(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_Revision_Pos)) & GICDistributor_IIDR_Revision_Msk) + +#define GICDistributor_IIDR_Variant_Pos 16U /*!< GICDistributor IIDR: Variant Position */ +#define GICDistributor_IIDR_Variant_Msk (0xFU << GICDistributor_IIDR_Variant_Pos) /*!< GICDistributor IIDR: Variant Mask */ +#define GICDistributor_IIDR_Variant(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_Variant_Pos)) & GICDistributor_IIDR_Variant_Msk) + +#define GICDistributor_IIDR_ProductID_Pos 24U /*!< GICDistributor IIDR: ProductID Position */ +#define GICDistributor_IIDR_ProductID_Msk (0xFFU << GICDistributor_IIDR_ProductID_Pos) /*!< GICDistributor IIDR: ProductID Mask */ +#define GICDistributor_IIDR_ProductID(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_IIDR_ProductID_Pos)) & GICDistributor_IIDR_ProductID_Msk) + +/* GICDistributor STATUSR Register */ +#define GICDistributor_STATUSR_RRD_Pos 0U /*!< GICDistributor STATUSR: RRD Position */ +#define GICDistributor_STATUSR_RRD_Msk (0x1U /*<< GICDistributor_STATUSR_RRD_Pos*/) /*!< GICDistributor STATUSR: RRD Mask */ +#define GICDistributor_STATUSR_RRD(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_STATUSR_RRD_Pos*/)) & GICDistributor_STATUSR_RRD_Msk) + +#define GICDistributor_STATUSR_WRD_Pos 1U /*!< GICDistributor STATUSR: WRD Position */ +#define GICDistributor_STATUSR_WRD_Msk (0x1U << GICDistributor_STATUSR_WRD_Pos) /*!< GICDistributor STATUSR: WRD Mask */ +#define GICDistributor_STATUSR_WRD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_WRD_Pos)) & GICDistributor_STATUSR_WRD_Msk) + +#define GICDistributor_STATUSR_RWOD_Pos 2U /*!< GICDistributor STATUSR: RWOD Position */ +#define GICDistributor_STATUSR_RWOD_Msk (0x1U << GICDistributor_STATUSR_RWOD_Pos) /*!< GICDistributor STATUSR: RWOD Mask */ +#define GICDistributor_STATUSR_RWOD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_RWOD_Pos)) & GICDistributor_STATUSR_RWOD_Msk) + +#define GICDistributor_STATUSR_WROD_Pos 3U /*!< GICDistributor STATUSR: WROD Position */ +#define GICDistributor_STATUSR_WROD_Msk (0x1U << GICDistributor_STATUSR_WROD_Pos) /*!< GICDistributor STATUSR: WROD Mask */ +#define GICDistributor_STATUSR_WROD(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_STATUSR_WROD_Pos)) & GICDistributor_STATUSR_WROD_Msk) + +/* GICDistributor SETSPI_NSR Register */ +#define GICDistributor_SETSPI_NSR_INTID_Pos 0U /*!< GICDistributor SETSPI_NSR: INTID Position */ +#define GICDistributor_SETSPI_NSR_INTID_Msk (0x3FFU /*<< GICDistributor_SETSPI_NSR_INTID_Pos*/) /*!< GICDistributor SETSPI_NSR: INTID Mask */ +#define GICDistributor_SETSPI_NSR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SETSPI_NSR_INTID_Pos*/)) & GICDistributor_SETSPI_NSR_INTID_Msk) + +/* GICDistributor CLRSPI_NSR Register */ +#define GICDistributor_CLRSPI_NSR_INTID_Pos 0U /*!< GICDistributor CLRSPI_NSR: INTID Position */ +#define GICDistributor_CLRSPI_NSR_INTID_Msk (0x3FFU /*<< GICDistributor_CLRSPI_NSR_INTID_Pos*/) /*!< GICDistributor CLRSPI_NSR: INTID Mask */ +#define GICDistributor_CLRSPI_NSR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CLRSPI_NSR_INTID_Pos*/)) & GICDistributor_CLRSPI_NSR_INTID_Msk) + +/* GICDistributor SETSPI_SR Register */ +#define GICDistributor_SETSPI_SR_INTID_Pos 0U /*!< GICDistributor SETSPI_SR: INTID Position */ +#define GICDistributor_SETSPI_SR_INTID_Msk (0x3FFU /*<< GICDistributor_SETSPI_SR_INTID_Pos*/) /*!< GICDistributor SETSPI_SR: INTID Mask */ +#define GICDistributor_SETSPI_SR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SETSPI_SR_INTID_Pos*/)) & GICDistributor_SETSPI_SR_INTID_Msk) + +/* GICDistributor CLRSPI_SR Register */ +#define GICDistributor_CLRSPI_SR_INTID_Pos 0U /*!< GICDistributor CLRSPI_SR: INTID Position */ +#define GICDistributor_CLRSPI_SR_INTID_Msk (0x3FFU /*<< GICDistributor_CLRSPI_SR_INTID_Pos*/) /*!< GICDistributor CLRSPI_SR: INTID Mask */ +#define GICDistributor_CLRSPI_SR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_CLRSPI_SR_INTID_Pos*/)) & GICDistributor_CLRSPI_SR_INTID_Msk) + +/* GICDistributor ITARGETSR Register */ +#define GICDistributor_ITARGETSR_CPU0_Pos 0U /*!< GICDistributor ITARGETSR: CPU0 Position */ +#define GICDistributor_ITARGETSR_CPU0_Msk (0x1U /*<< GICDistributor_ITARGETSR_CPU0_Pos*/) /*!< GICDistributor ITARGETSR: CPU0 Mask */ +#define GICDistributor_ITARGETSR_CPU0(x) (((uint8_t)(((uint8_t)(x)) /*<< GICDistributor_ITARGETSR_CPU0_Pos*/)) & GICDistributor_ITARGETSR_CPU0_Msk) + +#define GICDistributor_ITARGETSR_CPU1_Pos 1U /*!< GICDistributor ITARGETSR: CPU1 Position */ +#define GICDistributor_ITARGETSR_CPU1_Msk (0x1U << GICDistributor_ITARGETSR_CPU1_Pos) /*!< GICDistributor ITARGETSR: CPU1 Mask */ +#define GICDistributor_ITARGETSR_CPU1(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU1_Pos)) & GICDistributor_ITARGETSR_CPU1_Msk) + +#define GICDistributor_ITARGETSR_CPU2_Pos 2U /*!< GICDistributor ITARGETSR: CPU2 Position */ +#define GICDistributor_ITARGETSR_CPU2_Msk (0x1U << GICDistributor_ITARGETSR_CPU2_Pos) /*!< GICDistributor ITARGETSR: CPU2 Mask */ +#define GICDistributor_ITARGETSR_CPU2(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU2_Pos)) & GICDistributor_ITARGETSR_CPU2_Msk) + +#define GICDistributor_ITARGETSR_CPU3_Pos 3U /*!< GICDistributor ITARGETSR: CPU3 Position */ +#define GICDistributor_ITARGETSR_CPU3_Msk (0x1U << GICDistributor_ITARGETSR_CPU3_Pos) /*!< GICDistributor ITARGETSR: CPU3 Mask */ +#define GICDistributor_ITARGETSR_CPU3(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU3_Pos)) & GICDistributor_ITARGETSR_CPU3_Msk) + +#define GICDistributor_ITARGETSR_CPU4_Pos 4U /*!< GICDistributor ITARGETSR: CPU4 Position */ +#define GICDistributor_ITARGETSR_CPU4_Msk (0x1U << GICDistributor_ITARGETSR_CPU4_Pos) /*!< GICDistributor ITARGETSR: CPU4 Mask */ +#define GICDistributor_ITARGETSR_CPU4(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU4_Pos)) & GICDistributor_ITARGETSR_CPU4_Msk) + +#define GICDistributor_ITARGETSR_CPU5_Pos 5U /*!< GICDistributor ITARGETSR: CPU5 Position */ +#define GICDistributor_ITARGETSR_CPU5_Msk (0x1U << GICDistributor_ITARGETSR_CPU5_Pos) /*!< GICDistributor ITARGETSR: CPU5 Mask */ +#define GICDistributor_ITARGETSR_CPU5(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU5_Pos)) & GICDistributor_ITARGETSR_CPU5_Msk) + +#define GICDistributor_ITARGETSR_CPU6_Pos 6U /*!< GICDistributor ITARGETSR: CPU6 Position */ +#define GICDistributor_ITARGETSR_CPU6_Msk (0x1U << GICDistributor_ITARGETSR_CPU6_Pos) /*!< GICDistributor ITARGETSR: CPU6 Mask */ +#define GICDistributor_ITARGETSR_CPU6(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU6_Pos)) & GICDistributor_ITARGETSR_CPU6_Msk) + +#define GICDistributor_ITARGETSR_CPU7_Pos 7U /*!< GICDistributor ITARGETSR: CPU7 Position */ +#define GICDistributor_ITARGETSR_CPU7_Msk (0x1U << GICDistributor_ITARGETSR_CPU7_Pos) /*!< GICDistributor ITARGETSR: CPU7 Mask */ +#define GICDistributor_ITARGETSR_CPU7(x) (((uint8_t)(((uint8_t)(x)) << GICDistributor_ITARGETSR_CPU7_Pos)) & GICDistributor_ITARGETSR_CPU7_Msk) + +/* GICDistributor SGIR Register */ +#define GICDistributor_SGIR_INTID_Pos 0U /*!< GICDistributor SGIR: INTID Position */ +#define GICDistributor_SGIR_INTID_Msk (0x7U /*<< GICDistributor_SGIR_INTID_Pos*/) /*!< GICDistributor SGIR: INTID Mask */ +#define GICDistributor_SGIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICDistributor_SGIR_INTID_Pos*/)) & GICDistributor_SGIR_INTID_Msk) + +#define GICDistributor_SGIR_NSATT_Pos 15U /*!< GICDistributor SGIR: NSATT Position */ +#define GICDistributor_SGIR_NSATT_Msk (0x1U << GICDistributor_SGIR_NSATT_Pos) /*!< GICDistributor SGIR: NSATT Mask */ +#define GICDistributor_SGIR_NSATT(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_NSATT_Pos)) & GICDistributor_SGIR_NSATT_Msk) + +#define GICDistributor_SGIR_CPUTargetList_Pos 16U /*!< GICDistributor SGIR: CPUTargetList Position */ +#define GICDistributor_SGIR_CPUTargetList_Msk (0xFFU << GICDistributor_SGIR_CPUTargetList_Pos) /*!< GICDistributor SGIR: CPUTargetList Mask */ +#define GICDistributor_SGIR_CPUTargetList(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_CPUTargetList_Pos)) & GICDistributor_SGIR_CPUTargetList_Msk) + +#define GICDistributor_SGIR_TargetFilterList_Pos 24U /*!< GICDistributor SGIR: TargetFilterList Position */ +#define GICDistributor_SGIR_TargetFilterList_Msk (0x3U << GICDistributor_SGIR_TargetFilterList_Pos) /*!< GICDistributor SGIR: TargetFilterList Mask */ +#define GICDistributor_SGIR_TargetFilterList(x) (((uint32_t)(((uint32_t)(x)) << GICDistributor_SGIR_TargetFilterList_Pos)) & GICDistributor_SGIR_TargetFilterList_Msk) + +/* GICDistributor IROUTER Register */ +#define GICDistributor_IROUTER_Aff0_Pos 0UL /*!< GICDistributor IROUTER: Aff0 Position */ +#define GICDistributor_IROUTER_Aff0_Msk (0xFFUL /*<< GICDistributor_IROUTER_Aff0_Pos*/) /*!< GICDistributor IROUTER: Aff0 Mask */ +#define GICDistributor_IROUTER_Aff0(x) (((uint64_t)(((uint64_t)(x)) /*<< GICDistributor_IROUTER_Aff0_Pos*/)) & GICDistributor_IROUTER_Aff0_Msk) + +#define GICDistributor_IROUTER_Aff1_Pos 8UL /*!< GICDistributor IROUTER: Aff1 Position */ +#define GICDistributor_IROUTER_Aff1_Msk (0xFFUL << GICDistributor_IROUTER_Aff1_Pos) /*!< GICDistributor IROUTER: Aff1 Mask */ +#define GICDistributor_IROUTER_Aff1(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff1_Pos)) & GICDistributor_IROUTER_Aff1_Msk) + +#define GICDistributor_IROUTER_Aff2_Pos 16UL /*!< GICDistributor IROUTER: Aff2 Position */ +#define GICDistributor_IROUTER_Aff2_Msk (0xFFUL << GICDistributor_IROUTER_Aff2_Pos) /*!< GICDistributor IROUTER: Aff2 Mask */ +#define GICDistributor_IROUTER_Aff2(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff2_Pos)) & GICDistributor_IROUTER_Aff2_Msk) + +#define GICDistributor_IROUTER_IRM_Pos 31UL /*!< GICDistributor IROUTER: IRM Position */ +#define GICDistributor_IROUTER_IRM_Msk (0xFFUL << GICDistributor_IROUTER_IRM_Pos) /*!< GICDistributor IROUTER: IRM Mask */ +#define GICDistributor_IROUTER_IRM(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_IRM_Pos)) & GICDistributor_IROUTER_IRM_Msk) + +#define GICDistributor_IROUTER_Aff3_Pos 32UL /*!< GICDistributor IROUTER: Aff3 Position */ +#define GICDistributor_IROUTER_Aff3_Msk (0xFFUL << GICDistributor_IROUTER_Aff3_Pos) /*!< GICDistributor IROUTER: Aff3 Mask */ +#define GICDistributor_IROUTER_Aff3(x) (((uint64_t)(((uint64_t)(x)) << GICDistributor_IROUTER_Aff3_Pos)) & GICDistributor_IROUTER_Aff3_Msk) + + + +/** \brief Structure type to access the Generic Interrupt Controller Interface (GICC) +*/ +typedef struct +{ + __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) CPU Interface Control Register */ + __IOM uint32_t PMR; /*!< \brief Offset: 0x004 (R/W) Interrupt Priority Mask Register */ + __IOM uint32_t BPR; /*!< \brief Offset: 0x008 (R/W) Binary Point Register */ + __IM uint32_t IAR; /*!< \brief Offset: 0x00C (R/ ) Interrupt Acknowledge Register */ + __OM uint32_t EOIR; /*!< \brief Offset: 0x010 ( /W) End Of Interrupt Register */ + __IM uint32_t RPR; /*!< \brief Offset: 0x014 (R/ ) Running Priority Register */ + __IM uint32_t HPPIR; /*!< \brief Offset: 0x018 (R/ ) Highest Priority Pending Interrupt Register */ + __IOM uint32_t ABPR; /*!< \brief Offset: 0x01C (R/W) Aliased Binary Point Register */ + __IM uint32_t AIAR; /*!< \brief Offset: 0x020 (R/ ) Aliased Interrupt Acknowledge Register */ + __OM uint32_t AEOIR; /*!< \brief Offset: 0x024 ( /W) Aliased End Of Interrupt Register */ + __IM uint32_t AHPPIR; /*!< \brief Offset: 0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */ + __IOM uint32_t STATUSR; /*!< \brief Offset: 0x02C (R/W) Error Reporting Status Register, optional */ + RESERVED(1[40], uint32_t) + __IOM uint32_t APR[4]; /*!< \brief Offset: 0x0D0 (R/W) Active Priority Register */ + __IOM uint32_t NSAPR[4]; /*!< \brief Offset: 0x0E0 (R/W) Non-secure Active Priority Register */ + RESERVED(2[3], uint32_t) + __IM uint32_t IIDR; /*!< \brief Offset: 0x0FC (R/ ) CPU Interface Identification Register */ + RESERVED(3[960], uint32_t) + __OM uint32_t DIR; /*!< \brief Offset: 0x1000( /W) Deactivate Interrupt Register */ +} GICInterface_Type; + +#define GICInterface ((GICInterface_Type *) GIC_INTERFACE_BASE ) /*!< \brief GIC Interface register set access pointer */ + +/* GICInterface CTLR Register */ +#define GICInterface_CTLR_Enable_Pos 0U /*!< PTIM CTLR: Enable Position */ +#define GICInterface_CTLR_Enable_Msk (0x1U /*<< GICInterface_CTLR_Enable_Pos*/) /*!< PTIM CTLR: Enable Mask */ +#define GICInterface_CTLR_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_CTLR_Enable_Pos*/)) & GICInterface_CTLR_Enable_Msk) + +/* GICInterface PMR Register */ +#define GICInterface_PMR_Priority_Pos 0U /*!< PTIM PMR: Priority Position */ +#define GICInterface_PMR_Priority_Msk (0xFFU /*<< GICInterface_PMR_Priority_Pos*/) /*!< PTIM PMR: Priority Mask */ +#define GICInterface_PMR_Priority(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_PMR_Priority_Pos*/)) & GICInterface_PMR_Priority_Msk) + +/* GICInterface BPR Register */ +#define GICInterface_BPR_Binary_Point_Pos 0U /*!< PTIM BPR: Binary_Point Position */ +#define GICInterface_BPR_Binary_Point_Msk (0x7U /*<< GICInterface_BPR_Binary_Point_Pos*/) /*!< PTIM BPR: Binary_Point Mask */ +#define GICInterface_BPR_Binary_Point(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_BPR_Binary_Point_Pos*/)) & GICInterface_BPR_Binary_Point_Msk) + +/* GICInterface IAR Register */ +#define GICInterface_IAR_INTID_Pos 0U /*!< PTIM IAR: INTID Position */ +#define GICInterface_IAR_INTID_Msk (0xFFFFFFU /*<< GICInterface_IAR_INTID_Pos*/) /*!< PTIM IAR: INTID Mask */ +#define GICInterface_IAR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_IAR_INTID_Pos*/)) & GICInterface_IAR_INTID_Msk) + +/* GICInterface EOIR Register */ +#define GICInterface_EOIR_INTID_Pos 0U /*!< PTIM EOIR: INTID Position */ +#define GICInterface_EOIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_EOIR_INTID_Pos*/) /*!< PTIM EOIR: INTID Mask */ +#define GICInterface_EOIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_EOIR_INTID_Pos*/)) & GICInterface_EOIR_INTID_Msk) + +/* GICInterface RPR Register */ +#define GICInterface_RPR_INTID_Pos 0U /*!< PTIM RPR: INTID Position */ +#define GICInterface_RPR_INTID_Msk (0xFFU /*<< GICInterface_RPR_INTID_Pos*/) /*!< PTIM RPR: INTID Mask */ +#define GICInterface_RPR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_RPR_INTID_Pos*/)) & GICInterface_RPR_INTID_Msk) + +/* GICInterface HPPIR Register */ +#define GICInterface_HPPIR_INTID_Pos 0U /*!< PTIM HPPIR: INTID Position */ +#define GICInterface_HPPIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_HPPIR_INTID_Pos*/) /*!< PTIM HPPIR: INTID Mask */ +#define GICInterface_HPPIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_HPPIR_INTID_Pos*/)) & GICInterface_HPPIR_INTID_Msk) + +/* GICInterface ABPR Register */ +#define GICInterface_ABPR_Binary_Point_Pos 0U /*!< PTIM ABPR: Binary_Point Position */ +#define GICInterface_ABPR_Binary_Point_Msk (0x7U /*<< GICInterface_ABPR_Binary_Point_Pos*/) /*!< PTIM ABPR: Binary_Point Mask */ +#define GICInterface_ABPR_Binary_Point(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_ABPR_Binary_Point_Pos*/)) & GICInterface_ABPR_Binary_Point_Msk) + +/* GICInterface AIAR Register */ +#define GICInterface_AIAR_INTID_Pos 0U /*!< PTIM AIAR: INTID Position */ +#define GICInterface_AIAR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AIAR_INTID_Pos*/) /*!< PTIM AIAR: INTID Mask */ +#define GICInterface_AIAR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AIAR_INTID_Pos*/)) & GICInterface_AIAR_INTID_Msk) + +/* GICInterface AEOIR Register */ +#define GICInterface_AEOIR_INTID_Pos 0U /*!< PTIM AEOIR: INTID Position */ +#define GICInterface_AEOIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AEOIR_INTID_Pos*/) /*!< PTIM AEOIR: INTID Mask */ +#define GICInterface_AEOIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AEOIR_INTID_Pos*/)) & GICInterface_AEOIR_INTID_Msk) + +/* GICInterface AHPPIR Register */ +#define GICInterface_AHPPIR_INTID_Pos 0U /*!< PTIM AHPPIR: INTID Position */ +#define GICInterface_AHPPIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_AHPPIR_INTID_Pos*/) /*!< PTIM AHPPIR: INTID Mask */ +#define GICInterface_AHPPIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_AHPPIR_INTID_Pos*/)) & GICInterface_AHPPIR_INTID_Msk) + +/* GICInterface STATUSR Register */ +#define GICInterface_STATUSR_RRD_Pos 0U /*!< GICInterface STATUSR: RRD Position */ +#define GICInterface_STATUSR_RRD_Msk (0x1U /*<< GICInterface_STATUSR_RRD_Pos*/) /*!< GICInterface STATUSR: RRD Mask */ +#define GICInterface_STATUSR_RRD(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_STATUSR_RRD_Pos*/)) & GICInterface_STATUSR_RRD_Msk) + +#define GICInterface_STATUSR_WRD_Pos 1U /*!< GICInterface STATUSR: WRD Position */ +#define GICInterface_STATUSR_WRD_Msk (0x1U << GICInterface_STATUSR_WRD_Pos) /*!< GICInterface STATUSR: WRD Mask */ +#define GICInterface_STATUSR_WRD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_WRD_Pos)) & GICInterface_STATUSR_WRD_Msk) + +#define GICInterface_STATUSR_RWOD_Pos 2U /*!< GICInterface STATUSR: RWOD Position */ +#define GICInterface_STATUSR_RWOD_Msk (0x1U << GICInterface_STATUSR_RWOD_Pos) /*!< GICInterface STATUSR: RWOD Mask */ +#define GICInterface_STATUSR_RWOD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_RWOD_Pos)) & GICInterface_STATUSR_RWOD_Msk) + +#define GICInterface_STATUSR_WROD_Pos 3U /*!< GICInterface STATUSR: WROD Position */ +#define GICInterface_STATUSR_WROD_Msk (0x1U << GICInterface_STATUSR_WROD_Pos) /*!< GICInterface STATUSR: WROD Mask */ +#define GICInterface_STATUSR_WROD(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_WROD_Pos)) & GICInterface_STATUSR_WROD_Msk) + +#define GICInterface_STATUSR_ASV_Pos 4U /*!< GICInterface STATUSR: ASV Position */ +#define GICInterface_STATUSR_ASV_Msk (0x1U << GICInterface_STATUSR_ASV_Pos) /*!< GICInterface STATUSR: ASV Mask */ +#define GICInterface_STATUSR_ASV(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_STATUSR_ASV_Pos)) & GICInterface_STATUSR_ASV_Msk) + +/* GICInterface IIDR Register */ +#define GICInterface_IIDR_Implementer_Pos 0U /*!< GICInterface IIDR: Implementer Position */ +#define GICInterface_IIDR_Implementer_Msk (0xFFFU /*<< GICInterface_IIDR_Implementer_Pos*/) /*!< GICInterface IIDR: Implementer Mask */ +#define GICInterface_IIDR_Implementer(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_IIDR_Implementer_Pos*/)) & GICInterface_IIDR_Implementer_Msk) + +#define GICInterface_IIDR_Revision_Pos 12U /*!< GICInterface IIDR: Revision Position */ +#define GICInterface_IIDR_Revision_Msk (0xFU << GICInterface_IIDR_Revision_Pos) /*!< GICInterface IIDR: Revision Mask */ +#define GICInterface_IIDR_Revision(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_Revision_Pos)) & GICInterface_IIDR_Revision_Msk) + +#define GICInterface_IIDR_Arch_version_Pos 16U /*!< GICInterface IIDR: Arch_version Position */ +#define GICInterface_IIDR_Arch_version_Msk (0xFU << GICInterface_IIDR_Arch_version_Pos) /*!< GICInterface IIDR: Arch_version Mask */ +#define GICInterface_IIDR_Arch_version(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_Arch_version_Pos)) & GICInterface_IIDR_Arch_version_Msk) + +#define GICInterface_IIDR_ProductID_Pos 20U /*!< GICInterface IIDR: ProductID Position */ +#define GICInterface_IIDR_ProductID_Msk (0xFFFU << GICInterface_IIDR_ProductID_Pos) /*!< GICInterface IIDR: ProductID Mask */ +#define GICInterface_IIDR_ProductID(x) (((uint32_t)(((uint32_t)(x)) << GICInterface_IIDR_ProductID_Pos)) & GICInterface_IIDR_ProductID_Msk) + +/* GICInterface DIR Register */ +#define GICInterface_DIR_INTID_Pos 0U /*!< PTIM DIR: INTID Position */ +#define GICInterface_DIR_INTID_Msk (0xFFFFFFU /*<< GICInterface_DIR_INTID_Pos*/) /*!< PTIM DIR: INTID Mask */ +#define GICInterface_DIR_INTID(x) (((uint32_t)(((uint32_t)(x)) /*<< GICInterface_DIR_INTID_Pos*/)) & GICInterface_DIR_INTID_Msk) +#endif /* (__GIC_PRESENT == 1U) || defined(DOXYGEN) */ + +#if (defined(__TIM_PRESENT) && (__TIM_PRESENT == 1U)) || \ + defined(DOXYGEN) +#if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) +/** \brief Structure type to access the Private Timer +*/ +typedef struct +{ + __IOM uint32_t LOAD; //!< \brief Offset: 0x000 (R/W) Private Timer Load Register + __IOM uint32_t COUNTER; //!< \brief Offset: 0x004 (R/W) Private Timer Counter Register + __IOM uint32_t CONTROL; //!< \brief Offset: 0x008 (R/W) Private Timer Control Register + __IOM uint32_t ISR; //!< \brief Offset: 0x00C (R/W) Private Timer Interrupt Status Register + RESERVED(0[4], uint32_t) + __IOM uint32_t WLOAD; //!< \brief Offset: 0x020 (R/W) Watchdog Load Register + __IOM uint32_t WCOUNTER; //!< \brief Offset: 0x024 (R/W) Watchdog Counter Register + __IOM uint32_t WCONTROL; //!< \brief Offset: 0x028 (R/W) Watchdog Control Register + __IOM uint32_t WISR; //!< \brief Offset: 0x02C (R/W) Watchdog Interrupt Status Register + __IOM uint32_t WRESET; //!< \brief Offset: 0x030 (R/W) Watchdog Reset Status Register + __OM uint32_t WDISABLE; //!< \brief Offset: 0x034 ( /W) Watchdog Disable Register +} Timer_Type; +#define PTIM ((Timer_Type *) TIMER_BASE ) /*!< \brief Timer register struct */ + +/* PTIM Control Register */ +#define PTIM_CONTROL_Enable_Pos 0U /*!< PTIM CONTROL: Enable Position */ +#define PTIM_CONTROL_Enable_Msk (0x1U /*<< PTIM_CONTROL_Enable_Pos*/) /*!< PTIM CONTROL: Enable Mask */ +#define PTIM_CONTROL_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_CONTROL_Enable_Pos*/)) & PTIM_CONTROL_Enable_Msk) + +#define PTIM_CONTROL_AutoReload_Pos 1U /*!< PTIM CONTROL: Auto Reload Position */ +#define PTIM_CONTROL_AutoReload_Msk (0x1U << PTIM_CONTROL_AutoReload_Pos) /*!< PTIM CONTROL: Auto Reload Mask */ +#define PTIM_CONTROL_AutoReload(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_AutoReload_Pos)) & PTIM_CONTROL_AutoReload_Msk) + +#define PTIM_CONTROL_IRQenable_Pos 2U /*!< PTIM CONTROL: IRQ Enabel Position */ +#define PTIM_CONTROL_IRQenable_Msk (0x1U << PTIM_CONTROL_IRQenable_Pos) /*!< PTIM CONTROL: IRQ Enabel Mask */ +#define PTIM_CONTROL_IRQenable(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_IRQenable_Pos)) & PTIM_CONTROL_IRQenable_Msk) + +#define PTIM_CONTROL_Prescaler_Pos 8U /*!< PTIM CONTROL: Prescaler Position */ +#define PTIM_CONTROL_Prescaler_Msk (0xFFU << PTIM_CONTROL_Prescaler_Pos) /*!< PTIM CONTROL: Prescaler Mask */ +#define PTIM_CONTROL_Prescaler(x) (((uint32_t)(((uint32_t)(x)) << PTIM_CONTROL_Prescaler_Pos)) & PTIM_CONTROL_Prescaler_Msk) + +/* WCONTROL Watchdog Control Register */ +#define PTIM_WCONTROL_Enable_Pos 0U /*!< PTIM WCONTROL: Enable Position */ +#define PTIM_WCONTROL_Enable_Msk (0x1U /*<< PTIM_WCONTROL_Enable_Pos*/) /*!< PTIM WCONTROL: Enable Mask */ +#define PTIM_WCONTROL_Enable(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WCONTROL_Enable_Pos*/)) & PTIM_WCONTROL_Enable_Msk) + +#define PTIM_WCONTROL_AutoReload_Pos 1U /*!< PTIM WCONTROL: Auto Reload Position */ +#define PTIM_WCONTROL_AutoReload_Msk (0x1U << PTIM_WCONTROL_AutoReload_Pos) /*!< PTIM WCONTROL: Auto Reload Mask */ +#define PTIM_WCONTROL_AutoReload(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_AutoReload_Pos)) & PTIM_WCONTROL_AutoReload_Msk) + +#define PTIM_WCONTROL_IRQenable_Pos 2U /*!< PTIM WCONTROL: IRQ Enable Position */ +#define PTIM_WCONTROL_IRQenable_Msk (0x1U << PTIM_WCONTROL_IRQenable_Pos) /*!< PTIM WCONTROL: IRQ Enable Mask */ +#define PTIM_WCONTROL_IRQenable(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_IRQenable_Pos)) & PTIM_WCONTROL_IRQenable_Msk) + +#define PTIM_WCONTROL_Mode_Pos 3U /*!< PTIM WCONTROL: Watchdog Mode Position */ +#define PTIM_WCONTROL_Mode_Msk (0x1U << PTIM_WCONTROL_Mode_Pos) /*!< PTIM WCONTROL: Watchdog Mode Mask */ +#define PTIM_WCONTROL_Mode(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_Mode_Pos)) & PTIM_WCONTROL_Mode_Msk) + +#define PTIM_WCONTROL_Presacler_Pos 8U /*!< PTIM WCONTROL: Prescaler Position */ +#define PTIM_WCONTROL_Presacler_Msk (0xFFU << PTIM_WCONTROL_Presacler_Pos) /*!< PTIM WCONTROL: Prescaler Mask */ +#define PTIM_WCONTROL_Presacler(x) (((uint32_t)(((uint32_t)(x)) << PTIM_WCONTROL_Presacler_Pos)) & PTIM_WCONTROL_Presacler_Msk) + +/* WISR Watchdog Interrupt Status Register */ +#define PTIM_WISR_EventFlag_Pos 0U /*!< PTIM WISR: Event Flag Position */ +#define PTIM_WISR_EventFlag_Msk (0x1U /*<< PTIM_WISR_EventFlag_Pos*/) /*!< PTIM WISR: Event Flag Mask */ +#define PTIM_WISR_EventFlag(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WISR_EventFlag_Pos*/)) & PTIM_WISR_EventFlag_Msk) + +/* WRESET Watchdog Reset Status */ +#define PTIM_WRESET_ResetFlag_Pos 0U /*!< PTIM WRESET: Reset Flag Position */ +#define PTIM_WRESET_ResetFlag_Msk (0x1U /*<< PTIM_WRESET_ResetFlag_Pos*/) /*!< PTIM WRESET: Reset Flag Mask */ +#define PTIM_WRESET_ResetFlag(x) (((uint32_t)(((uint32_t)(x)) /*<< PTIM_WRESET_ResetFlag_Pos*/)) & PTIM_WRESET_ResetFlag_Msk) + +#endif /* ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) */ +#endif /* (__TIM_PRESENT == 1U) || defined(DOXYGEN) */ + + /******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - L1 Cache Functions + - L2C-310 Cache Controller Functions + - PL1 Timer Functions + - GIC Functions + - MMU Functions + ******************************************************************************/ + +/* ########################## L1 Cache functions ################################# */ + +/** \brief Enable Caches by setting I and C bits in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_EnableCaches(void) { + __set_SCTLR( __get_SCTLR() | SCTLR_I_Msk | SCTLR_C_Msk); + __ISB(); +} + +/** \brief Disable Caches by clearing I and C bits in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_DisableCaches(void) { + __set_SCTLR( __get_SCTLR() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk)); + __ISB(); +} + +/** \brief Enable Branch Prediction by setting Z bit in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_EnableBTAC(void) { + __set_SCTLR( __get_SCTLR() | SCTLR_Z_Msk); + __ISB(); +} + +/** \brief Disable Branch Prediction by clearing Z bit in SCTLR register. +*/ +__STATIC_FORCEINLINE void L1C_DisableBTAC(void) { + __set_SCTLR( __get_SCTLR() & (~SCTLR_Z_Msk)); + __ISB(); +} + +/** \brief Invalidate entire branch predictor array +*/ +__STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) { + __set_BPIALL(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new state +} + +/** \brief Clean instruction cache line by address. +* \param [in] va Pointer to instructions to clear the cache for. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateICacheMVA(void *va) { + __set_ICIMVAC((uint32_t)va); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new I cache state +} + +/** \brief Invalidate the whole instruction cache +*/ +__STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) { + __set_ICIALLU(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new I cache state +} + +/** \brief Clean data cache line by address. +* \param [in] va Pointer to data to clear the cache for. +*/ +__STATIC_FORCEINLINE void L1C_CleanDCacheMVA(void *va) { + __set_DCCMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Invalidate data cache line by address. +* \param [in] va Pointer to data to invalidate the cache for. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateDCacheMVA(void *va) { + __set_DCIMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Clean and Invalidate data cache by address. +* \param [in] va Pointer to data to invalidate the cache for. +*/ +__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheMVA(void *va) { + __set_DCCIMVAC((uint32_t)va); + __DMB(); //ensure the ordering of data cache maintenance operations and their effects +} + +/** \brief Calculate log2 rounded up +* - log(0) => 0 +* - log(1) => 0 +* - log(2) => 1 +* - log(3) => 2 +* - log(4) => 2 +* - log(5) => 3 +* : : +* - log(16) => 4 +* - log(32) => 5 +* : : +* \param [in] n input value parameter +* \return log2(n) +*/ +__STATIC_FORCEINLINE uint8_t __log2_up(uint32_t n) +{ + if (n < 2U) { + return 0U; + } + uint8_t log = 0U; + uint32_t t = n; + while(t > 1U) + { + log++; + t >>= 1U; + } + if (n & 1U) { log++; } + return log; +} + +/** \brief Apply cache maintenance to given cache level. +* \param [in] level cache level to be maintained +* \param [in] maint 0 - invalidate, 1 - clean, otherwise - invalidate and clean +*/ +__STATIC_FORCEINLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint) +{ + uint32_t Dummy; + uint32_t ccsidr; + uint32_t num_sets; + uint32_t num_ways; + uint32_t shift_way; + uint32_t log2_linesize; + uint8_t log2_num_ways; + + Dummy = level << 1U; + /* set csselr, select ccsidr register */ + __set_CSSELR(Dummy); + /* get current ccsidr register */ + ccsidr = __get_CCSIDR(); + num_sets = ((ccsidr & 0x0FFFE000U) >> 13U) + 1U; + num_ways = ((ccsidr & 0x00001FF8U) >> 3U) + 1U; + log2_linesize = (ccsidr & 0x00000007U) + 2U + 2U; + log2_num_ways = __log2_up(num_ways); + if (log2_num_ways > 32U) { + return; // FATAL ERROR + } + shift_way = 32U - log2_num_ways; + for(int32_t way = num_ways-1; way >= 0; way--) + { + for(int32_t set = num_sets-1; set >= 0; set--) + { + Dummy = (level << 1U) | (((uint32_t)set) << log2_linesize) | (((uint32_t)way) << shift_way); + switch (maint) + { + case 0U: __set_DCISW(Dummy); break; + case 1U: __set_DCCSW(Dummy); break; + default: __set_DCCISW(Dummy); break; + } + } + } + __DMB(); +} + +/** \brief Clean and Invalidate the entire data or unified cache +* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean +*/ +__STATIC_FORCEINLINE void L1C_CleanInvalidateCache(uint32_t op) { + uint32_t clidr; + uint32_t cache_type; + clidr = __get_CLIDR(); + for(uint32_t i = 0U; i<7U; i++) + { + cache_type = (clidr >> i*3U) & 0x7UL; + if ((cache_type >= 2U) && (cache_type <= 4U)) + { + __L1C_MaintainDCacheSetWay(i, op); + } + } +} + +/** \brief Invalidate the whole data cache. +*/ +__STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) { + L1C_CleanInvalidateCache(0); +} + +/** \brief Clean the whole data cache. + */ +__STATIC_FORCEINLINE void L1C_CleanDCacheAll(void) { + L1C_CleanInvalidateCache(1); +} + +/** \brief Clean and invalidate the whole data cache. + */ +__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheAll(void) { + L1C_CleanInvalidateCache(2); +} + +/* ########################## L2 Cache functions ################################# */ +#if (defined(__L2C_PRESENT) && (__L2C_PRESENT == 1U)) || \ + defined(DOXYGEN) +/** \brief Cache Sync operation by writing CACHE_SYNC register. +*/ +__STATIC_INLINE void L2C_Sync(void) +{ + L2C_310->CACHE_SYNC = 0x0; +} + +/** \brief Read cache controller cache ID from CACHE_ID register. + * \return L2C_310_TypeDef::CACHE_ID + */ +__STATIC_INLINE int L2C_GetID (void) +{ + return L2C_310->CACHE_ID; +} + +/** \brief Read cache controller cache type from CACHE_TYPE register. +* \return L2C_310_TypeDef::CACHE_TYPE +*/ +__STATIC_INLINE int L2C_GetType (void) +{ + return L2C_310->CACHE_TYPE; +} + +/** \brief Invalidate all cache by way +*/ +__STATIC_INLINE void L2C_InvAllByWay (void) +{ + unsigned int assoc; + + if (L2C_310->AUX_CNT & (1U << 16U)) { + assoc = 16U; + } else { + assoc = 8U; + } + + L2C_310->INV_WAY = (1U << assoc) - 1U; + while(L2C_310->INV_WAY & ((1U << assoc) - 1U)); //poll invalidate + + L2C_Sync(); +} + +/** \brief Clean and Invalidate all cache by way +*/ +__STATIC_INLINE void L2C_CleanInvAllByWay (void) +{ + unsigned int assoc; + + if (L2C_310->AUX_CNT & (1U << 16U)) { + assoc = 16U; + } else { + assoc = 8U; + } + + L2C_310->CLEAN_INV_WAY = (1U << assoc) - 1U; + while(L2C_310->CLEAN_INV_WAY & ((1U << assoc) - 1U)); //poll invalidate + + L2C_Sync(); +} + +/** \brief Enable Level 2 Cache +*/ +__STATIC_INLINE void L2C_Enable(void) +{ + L2C_310->CONTROL = 0; + L2C_310->INTERRUPT_CLEAR = 0x000001FFuL; + L2C_310->DEBUG_CONTROL = 0; + L2C_310->DATA_LOCK_0_WAY = 0; + L2C_310->CACHE_SYNC = 0; + L2C_310->CONTROL = 0x01; + L2C_Sync(); +} + +/** \brief Disable Level 2 Cache +*/ +__STATIC_INLINE void L2C_Disable(void) +{ + L2C_310->CONTROL = 0x00; + L2C_Sync(); +} + +/** \brief Invalidate cache by physical address +* \param [in] pa Pointer to data to invalidate cache for. +*/ +__STATIC_INLINE void L2C_InvPa (void *pa) +{ + L2C_310->INV_LINE_PA = (unsigned int)pa; + L2C_Sync(); +} + +/** \brief Clean cache by physical address +* \param [in] pa Pointer to data to invalidate cache for. +*/ +__STATIC_INLINE void L2C_CleanPa (void *pa) +{ + L2C_310->CLEAN_LINE_PA = (unsigned int)pa; + L2C_Sync(); +} + +/** \brief Clean and invalidate cache by physical address +* \param [in] pa Pointer to data to invalidate cache for. +*/ +__STATIC_INLINE void L2C_CleanInvPa (void *pa) +{ + L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa; + L2C_Sync(); +} +#endif + +/* ########################## GIC functions ###################################### */ +#if (defined(__GIC_PRESENT) && (__GIC_PRESENT == 1U)) || \ + defined(DOXYGEN) + +/** \brief Enable the interrupt distributor using the GIC's CTLR register. +*/ +__STATIC_INLINE void GIC_EnableDistributor(void) +{ + GICDistributor->CTLR |= 1U; +} + +/** \brief Disable the interrupt distributor using the GIC's CTLR register. +*/ +__STATIC_INLINE void GIC_DisableDistributor(void) +{ + GICDistributor->CTLR &=~1U; +} + +/** \brief Read the GIC's TYPER register. +* \return GICDistributor_Type::TYPER +*/ +__STATIC_INLINE uint32_t GIC_DistributorInfo(void) +{ + return (GICDistributor->TYPER); +} + +/** \brief Reads the GIC's IIDR register. +* \return GICDistributor_Type::IIDR +*/ +__STATIC_INLINE uint32_t GIC_DistributorImplementer(void) +{ + return (GICDistributor->IIDR); +} + +/** \brief Sets the GIC's ITARGETSR register for the given interrupt. +* \param [in] IRQn Interrupt to be configured. +* \param [in] cpu_target CPU interfaces to assign this interrupt to. +*/ +__STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target) +{ + uint32_t mask = GICDistributor->ITARGETSR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + GICDistributor->ITARGETSR[IRQn / 4U] = mask | ((cpu_target & 0xFFUL) << ((IRQn % 4U) * 8U)); +} + +/** \brief Read the GIC's ITARGETSR register. +* \param [in] IRQn Interrupt to acquire the configuration for. +* \return GICDistributor_Type::ITARGETSR +*/ +__STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn) +{ + return (GICDistributor->ITARGETSR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; +} + +/** \brief Enable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_EnableInterface(void) +{ + GICInterface->CTLR |= 1U; //enable interface +} + +/** \brief Disable the CPU's interrupt interface. +*/ +__STATIC_INLINE void GIC_DisableInterface(void) +{ + GICInterface->CTLR &=~1U; //disable distributor +} + +/** \brief Read the CPU's IAR register. +* \return GICInterface_Type::IAR +*/ +__STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void) +{ + return (IRQn_Type)(GICInterface->IAR); +} + +/** \brief Writes the given interrupt number to the CPU's EOIR register. +* \param [in] IRQn The interrupt to be signaled as finished. +*/ +__STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn) +{ + GICInterface->EOIR = IRQn; +} + +/** \brief Enables the given interrupt using GIC's ISENABLER register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn) +{ + GICDistributor->ISENABLER[IRQn / 32U] = 1U << (IRQn % 32U); +} + +/** \brief Get interrupt enable status using GIC's ISENABLER register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - interrupt is not enabled, 1 - interrupt is enabled. +*/ +__STATIC_INLINE uint32_t GIC_GetEnableIRQ(IRQn_Type IRQn) +{ + return (GICDistributor->ISENABLER[IRQn / 32U] >> (IRQn % 32U)) & 1UL; +} + +/** \brief Disables the given interrupt using GIC's ICENABLER register. +* \param [in] IRQn The interrupt to be disabled. +*/ +__STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn) +{ + GICDistributor->ICENABLER[IRQn / 32U] = 1U << (IRQn % 32U); +} + +/** \brief Get interrupt pending status from GIC's ISPENDR register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - interrupt is not pending, 1 - interrupt is pendig. +*/ +__STATIC_INLINE uint32_t GIC_GetPendingIRQ(IRQn_Type IRQn) +{ + uint32_t pend; + + if (IRQn >= 16U) { + pend = (GICDistributor->ISPENDR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; + } else { + // INTID 0-15 Software Generated Interrupt + pend = (GICDistributor->SPENDSGIR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; + // No CPU identification offered + if (pend != 0U) { + pend = 1U; + } else { + pend = 0U; + } + } + + return (pend); +} + +/** \brief Sets the given interrupt as pending using GIC's ISPENDR register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if (IRQn >= 16U) { + GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U); + } else { + // INTID 0-15 Software Generated Interrupt + // Forward the interrupt to the CPU interface that requested it + GICDistributor->SGIR = (IRQn | 0x02000000U); + } +} + +/** \brief Clears the given interrupt from being pending using GIC's ICPENDR register. +* \param [in] IRQn The interrupt to be enabled. +*/ +__STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if (IRQn >= 16U) { + GICDistributor->ICPENDR[IRQn / 32U] = 1U << (IRQn % 32U); + } else { + // INTID 0-15 Software Generated Interrupt + GICDistributor->CPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U); + } +} + +/** \brief Sets the interrupt configuration using GIC's ICFGR register. +* \param [in] IRQn The interrupt to be configured. +* \param [in] int_config Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) +* Bit 1: 0 - level sensitive, 1 - edge triggered +*/ +__STATIC_INLINE void GIC_SetConfiguration(IRQn_Type IRQn, uint32_t int_config) +{ + uint32_t icfgr = GICDistributor->ICFGR[IRQn / 16U]; /* read current register content */ + uint32_t shift = (IRQn % 16U) << 1U; /* calculate shift value */ + + int_config &= 3U; /* only 2 bits are valid */ + icfgr &= (~(3U << shift)); /* clear bits to change */ + icfgr |= ( int_config << shift); /* set new configuration */ + + GICDistributor->ICFGR[IRQn / 16U] = icfgr; /* write new register content */ +} + +/** \brief Get the interrupt configuration from the GIC's ICFGR register. +* \param [in] IRQn Interrupt to acquire the configuration for. +* \return Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1) +* Bit 1: 0 - level sensitive, 1 - edge triggered +*/ +__STATIC_INLINE uint32_t GIC_GetConfiguration(IRQn_Type IRQn) +{ + return (GICDistributor->ICFGR[IRQn / 16U] >> ((IRQn % 16U) >> 1U)); +} + +/** \brief Set the priority for the given interrupt in the GIC's IPRIORITYR register. +* \param [in] IRQn The interrupt to be configured. +* \param [in] priority The priority for the interrupt, lower values denote higher priorities. +*/ +__STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + uint32_t mask = GICDistributor->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U)); + GICDistributor->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U)); +} + +/** \brief Read the current interrupt priority from GIC's IPRIORITYR register. +* \param [in] IRQn The interrupt to be queried. +*/ +__STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn) +{ + return (GICDistributor->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL; +} + +/** \brief Set the interrupt priority mask using CPU's PMR register. +* \param [in] priority Priority mask to be set. +*/ +__STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority) +{ + GICInterface->PMR = priority & 0xFFUL; //set priority mask +} + +/** \brief Read the current interrupt priority mask from CPU's PMR register. +* \result GICInterface_Type::PMR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void) +{ + return GICInterface->PMR; +} + +/** \brief Configures the group priority and subpriority split point using CPU's BPR register. +* \param [in] binary_point Amount of bits used as subpriority. +*/ +__STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point) +{ + GICInterface->BPR = binary_point & 7U; //set binary point +} + +/** \brief Read the current group priority and subpriority split point from CPU's BPR register. +* \return GICInterface_Type::BPR +*/ +__STATIC_INLINE uint32_t GIC_GetBinaryPoint(void) +{ + return GICInterface->BPR; +} + +/** \brief Get the status for a given interrupt. +* \param [in] IRQn The interrupt to get status for. +* \return 0 - not pending/active, 1 - pending, 2 - active, 3 - pending and active +*/ +__STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn) +{ + uint32_t pending, active; + + active = ((GICDistributor->ISACTIVER[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; + pending = ((GICDistributor->ISPENDR[IRQn / 32U]) >> (IRQn % 32U)) & 1UL; + + return ((active<<1U) | pending); +} + +/** \brief Generate a software interrupt using GIC's SGIR register. +* \param [in] IRQn Software interrupt to be generated. +* \param [in] target_list List of CPUs the software interrupt should be forwarded to. +* \param [in] filter_list Filter to be applied to determine interrupt receivers. +*/ +__STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list) +{ + GICDistributor->SGIR = ((filter_list & 3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (IRQn & 0x0FUL); +} + +/** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register. +* \return GICInterface_Type::HPPIR +*/ +__STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void) +{ + return GICInterface->HPPIR; +} + +/** \brief Provides information about the implementer and revision of the CPU interface. +* \return GICInterface_Type::IIDR +*/ +__STATIC_INLINE uint32_t GIC_GetInterfaceId(void) +{ + return GICInterface->IIDR; +} + +/** \brief Set the interrupt group from the GIC's IGROUPR register. +* \param [in] IRQn The interrupt to be queried. +* \param [in] group Interrupt group number: 0 - Group 0, 1 - Group 1 +*/ +__STATIC_INLINE void GIC_SetGroup(IRQn_Type IRQn, uint32_t group) +{ + uint32_t igroupr = GICDistributor->IGROUPR[IRQn / 32U]; + uint32_t shift = (IRQn % 32U); + + igroupr &= (~(1U << shift)); + igroupr |= ( (group & 1U) << shift); + + GICDistributor->IGROUPR[IRQn / 32U] = igroupr; +} +#define GIC_SetSecurity GIC_SetGroup + +/** \brief Get the interrupt group from the GIC's IGROUPR register. +* \param [in] IRQn The interrupt to be queried. +* \return 0 - Group 0, 1 - Group 1 +*/ +__STATIC_INLINE uint32_t GIC_GetGroup(IRQn_Type IRQn) +{ + return (GICDistributor->IGROUPR[IRQn / 32U] >> (IRQn % 32U)) & 1UL; +} +#define GIC_GetSecurity GIC_GetGroup + +/** \brief Initialize the interrupt distributor. +*/ +__STATIC_INLINE void GIC_DistInit(void) +{ + uint32_t i; + uint32_t num_irq = 0U; + uint32_t priority_field; + + //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, + //configuring all of the interrupts as Secure. + + //Disable interrupt forwarding + GIC_DisableDistributor(); + //Get the maximum number of interrupts that the GIC supports + num_irq = 32U * ((GIC_DistributorInfo() & 0x1FU) + 1U); + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored.*/ + GIC_SetPriority((IRQn_Type)0U, 0xFFU); + priority_field = GIC_GetPriority((IRQn_Type)0U); + + for (i = 32U; i < num_irq; i++) + { + //Disable the SPI interrupt + GIC_DisableIRQ((IRQn_Type)i); + //Set level-sensitive (and N-N model) + GIC_SetConfiguration((IRQn_Type)i, 0U); + //Set priority + GIC_SetPriority((IRQn_Type)i, priority_field/2U); + //Set target list to CPU0 + GIC_SetTarget((IRQn_Type)i, 1U); + } + //Enable distributor + GIC_EnableDistributor(); +} + +/** \brief Initialize the CPU's interrupt interface +*/ +__STATIC_INLINE void GIC_CPUInterfaceInit(void) +{ + uint32_t i; + uint32_t priority_field; + + //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0, + //configuring all of the interrupts as Secure. + + //Disable interrupt forwarding + GIC_DisableInterface(); + + /* Priority level is implementation defined. + To determine the number of priority bits implemented write 0xFF to an IPRIORITYR + priority field and read back the value stored.*/ + GIC_SetPriority((IRQn_Type)0U, 0xFFU); + priority_field = GIC_GetPriority((IRQn_Type)0U); + + //SGI and PPI + for (i = 0U; i < 32U; i++) + { + if(i > 15U) { + //Set level-sensitive (and N-N model) for PPI + GIC_SetConfiguration((IRQn_Type)i, 0U); + } + //Disable SGI and PPI interrupts + GIC_DisableIRQ((IRQn_Type)i); + //Set priority + GIC_SetPriority((IRQn_Type)i, priority_field/2U); + } + //Enable interface + GIC_EnableInterface(); + //Set binary point to 0 + GIC_SetBinaryPoint(0U); + //Set priority mask + GIC_SetInterfacePriorityMask(0xFFU); +} + +/** \brief Initialize and enable the GIC +*/ +__STATIC_INLINE void GIC_Enable(void) +{ + GIC_DistInit(); + GIC_CPUInterfaceInit(); //per CPU +} +#endif + +/* ########################## Generic Timer functions ############################ */ +#if (defined(__TIM_PRESENT) && (__TIM_PRESENT == 1U)) || \ + defined(DOXYGEN) + +/* PL1 Physical Timer */ +#if (__CORTEX_A == 7U) || defined(DOXYGEN) + +/** \brief Physical Timer Control register */ +typedef union +{ + struct + { + uint32_t ENABLE:1; /*!< \brief bit: 0 Enables the timer. */ + uint32_t IMASK:1; /*!< \brief bit: 1 Timer output signal mask bit. */ + uint32_t ISTATUS:1; /*!< \brief bit: 2 The status of the timer. */ + RESERVED(0:29, uint32_t) + } b; /*!< \brief Structure used for bit access */ + uint32_t w; /*!< \brief Type used for word access */ +} CNTP_CTL_Type; + +/** \brief Configures the frequency the timer shall run at. +* \param [in] value The timer frequency in Hz. +*/ +__STATIC_INLINE void PL1_SetCounterFrequency(uint32_t value) +{ + __set_CNTFRQ(value); + __ISB(); +} + +/** \brief Sets the reset value of the timer. +* \param [in] value The value the timer is loaded with. +*/ +__STATIC_INLINE void PL1_SetLoadValue(uint32_t value) +{ + __set_CNTP_TVAL(value); + __ISB(); +} + +/** \brief Get the current counter value. +* \return Current counter value. +*/ +__STATIC_INLINE uint32_t PL1_GetCurrentValue(void) +{ + return(__get_CNTP_TVAL()); +} + +/** \brief Get the current physical counter value. +* \return Current physical counter value. +*/ +__STATIC_INLINE uint64_t PL1_GetCurrentPhysicalValue(void) +{ + return(__get_CNTPCT()); +} + +/** \brief Set the physical compare value. +* \param [in] value New physical timer compare value. +*/ +__STATIC_INLINE void PL1_SetPhysicalCompareValue(uint64_t value) +{ + __set_CNTP_CVAL(value); + __ISB(); +} + +/** \brief Get the physical compare value. +* \return Physical compare value. +*/ +__STATIC_INLINE uint64_t PL1_GetPhysicalCompareValue(void) +{ + return(__get_CNTP_CVAL()); +} + +/** \brief Configure the timer by setting the control value. +* \param [in] value New timer control value. +*/ +__STATIC_INLINE void PL1_SetControl(uint32_t value) +{ + __set_CNTP_CTL(value); + __ISB(); +} + +/** \brief Get the control value. +* \return Control value. +*/ +__STATIC_INLINE uint32_t PL1_GetControl(void) +{ + return(__get_CNTP_CTL()); +} + +/******************************* VIRTUAL TIMER *******************************/ +/** \brief Virtual Timer Control register */ + +/** \brief Sets the reset value of the virtual timer. +* \param [in] value The value the virtual timer is loaded with. +*/ +__STATIC_INLINE void VL1_SetCurrentTimerValue(uint32_t value) +{ + __set_CNTV_TVAL(value); + __ISB(); +} + +/** \brief Get the current virtual timer value. +* \return Current virtual timer value. +*/ +__STATIC_INLINE uint32_t VL1_GetCurrentTimerValue(void) +{ + return(__get_CNTV_TVAL()); +} + +/** \brief Get the current virtual count value. +* \return Current virtual count value. +*/ +__STATIC_INLINE uint64_t VL1_GetCurrentCountValue(void) +{ + return(__get_CNTVCT()); +} + +/** \brief Set the virtual timer compare value. +* \param [in] value New virtual timer compare value. +*/ +__STATIC_INLINE void VL1_SetTimerCompareValue(uint64_t value) +{ + __set_CNTV_CVAL(value); + __ISB(); +} + +/** \brief Get the virtual timer compare value. +* \return Virtual timer compare value. +*/ +__STATIC_INLINE uint64_t VL1_GetTimerCompareValue(void) +{ + return(__get_CNTV_CVAL()); +} + +/** \brief Configure the virtual timer by setting the control value. +* \param [in] value New virtual timer control value. +*/ +__STATIC_INLINE void VL1_SetControl(uint32_t value) +{ + __set_CNTV_CTL(value); + __ISB(); +} + +/** \brief Get the virtual timer control value. +* \return Virtual timer control value. +*/ +__STATIC_INLINE uint32_t VL1_GetControl(void) +{ + return(__get_CNTV_CTL()); +} +/***************************** VIRTUAL TIMER END *****************************/ +#endif + +/* Private Timer */ +#if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN) +/** \brief Set the load value to timers LOAD register. +* \param [in] value The load value to be set. +*/ +__STATIC_INLINE void PTIM_SetLoadValue(uint32_t value) +{ + PTIM->LOAD = value; +} + +/** \brief Get the load value from timers LOAD register. +* \return Timer_Type::LOAD +*/ +__STATIC_INLINE uint32_t PTIM_GetLoadValue(void) +{ + return(PTIM->LOAD); +} + +/** \brief Set current counter value from its COUNTER register. +*/ +__STATIC_INLINE void PTIM_SetCurrentValue(uint32_t value) +{ + PTIM->COUNTER = value; +} + +/** \brief Get current counter value from timers COUNTER register. +* \result Timer_Type::COUNTER +*/ +__STATIC_INLINE uint32_t PTIM_GetCurrentValue(void) +{ + return(PTIM->COUNTER); +} + +/** \brief Configure the timer using its CONTROL register. +* \param [in] value The new configuration value to be set. +*/ +__STATIC_INLINE void PTIM_SetControl(uint32_t value) +{ + PTIM->CONTROL = value; +} + +/** ref Timer_Type::CONTROL Get the current timer configuration from its CONTROL register. +* \return Timer_Type::CONTROL +*/ +__STATIC_INLINE uint32_t PTIM_GetControl(void) +{ + return(PTIM->CONTROL); +} + +/** ref Timer_Type::CONTROL Get the event flag in timers ISR register. +* \return 0 - flag is not set, 1- flag is set +*/ +__STATIC_INLINE uint32_t PTIM_GetEventFlag(void) +{ + return (PTIM->ISR & 1UL); +} + +/** ref Timer_Type::CONTROL Clears the event flag in timers ISR register. +*/ +__STATIC_INLINE void PTIM_ClearEventFlag(void) +{ + PTIM->ISR = 1; +} +#endif +#endif + +/* ########################## MMU functions ###################################### */ + +#define SECTION_DESCRIPTOR (0x2) +#define SECTION_MASK (0xFFFFFFFC) + +#define SECTION_TEXCB_MASK (0xFFFF8FF3) +#define SECTION_B_SHIFT (2) +#define SECTION_C_SHIFT (3) +#define SECTION_TEX0_SHIFT (12) +#define SECTION_TEX1_SHIFT (13) +#define SECTION_TEX2_SHIFT (14) + +#define SECTION_XN_MASK (0xFFFFFFEF) +#define SECTION_XN_SHIFT (4) + +#define SECTION_DOMAIN_MASK (0xFFFFFE1F) +#define SECTION_DOMAIN_SHIFT (5) + +#define SECTION_P_MASK (0xFFFFFDFF) +#define SECTION_P_SHIFT (9) + +#define SECTION_AP_MASK (0xFFFF73FF) +#define SECTION_AP_SHIFT (10) +#define SECTION_AP2_SHIFT (15) + +#define SECTION_S_MASK (0xFFFEFFFF) +#define SECTION_S_SHIFT (16) + +#define SECTION_NG_MASK (0xFFFDFFFF) +#define SECTION_NG_SHIFT (17) + +#define SECTION_NS_MASK (0xFFF7FFFF) +#define SECTION_NS_SHIFT (19) + +#define PAGE_L1_DESCRIPTOR (0x1) +#define PAGE_L1_MASK (0xFFFFFFFC) + +#define PAGE_L2_4K_DESC (0x2) +#define PAGE_L2_4K_MASK (0xFFFFFFFD) + +#define PAGE_L2_64K_DESC (0x1) +#define PAGE_L2_64K_MASK (0xFFFFFFFC) + +#define PAGE_4K_TEXCB_MASK (0xFFFFFE33) +#define PAGE_4K_B_SHIFT (2) +#define PAGE_4K_C_SHIFT (3) +#define PAGE_4K_TEX0_SHIFT (6) +#define PAGE_4K_TEX1_SHIFT (7) +#define PAGE_4K_TEX2_SHIFT (8) + +#define PAGE_64K_TEXCB_MASK (0xFFFF8FF3) +#define PAGE_64K_B_SHIFT (2) +#define PAGE_64K_C_SHIFT (3) +#define PAGE_64K_TEX0_SHIFT (12) +#define PAGE_64K_TEX1_SHIFT (13) +#define PAGE_64K_TEX2_SHIFT (14) + +#define PAGE_TEXCB_MASK (0xFFFF8FF3) +#define PAGE_B_SHIFT (2) +#define PAGE_C_SHIFT (3) +#define PAGE_TEX_SHIFT (12) + +#define PAGE_XN_4K_MASK (0xFFFFFFFE) +#define PAGE_XN_4K_SHIFT (0) +#define PAGE_XN_64K_MASK (0xFFFF7FFF) +#define PAGE_XN_64K_SHIFT (15) + +#define PAGE_DOMAIN_MASK (0xFFFFFE1F) +#define PAGE_DOMAIN_SHIFT (5) + +#define PAGE_P_MASK (0xFFFFFDFF) +#define PAGE_P_SHIFT (9) + +#define PAGE_AP_MASK (0xFFFFFDCF) +#define PAGE_AP_SHIFT (4) +#define PAGE_AP2_SHIFT (9) + +#define PAGE_S_MASK (0xFFFFFBFF) +#define PAGE_S_SHIFT (10) + +#define PAGE_NG_MASK (0xFFFFF7FF) +#define PAGE_NG_SHIFT (11) + +#define PAGE_NS_MASK (0xFFFFFFF7) +#define PAGE_NS_SHIFT (3) + +#define OFFSET_1M (0x00100000) +#define OFFSET_64K (0x00010000) +#define OFFSET_4K (0x00001000) + +#define DESCRIPTOR_FAULT (0x00000000) + +/* Attributes enumerations */ + +/* Region size attributes */ +typedef enum +{ + SECTION, + PAGE_4k, + PAGE_64k, +} mmu_region_size_Type; + +/* Region type attributes */ +typedef enum +{ + NORMAL, + DEVICE, + SHARED_DEVICE, + NON_SHARED_DEVICE, + STRONGLY_ORDERED +} mmu_memory_Type; + +/* Region cacheability attributes */ +typedef enum +{ + NON_CACHEABLE, + WB_WA, + WT, + WB_NO_WA, +} mmu_cacheability_Type; + +/* Region parity check attributes */ +typedef enum +{ + ECC_DISABLED, + ECC_ENABLED, +} mmu_ecc_check_Type; + +/* Region execution attributes */ +typedef enum +{ + EXECUTE, + NON_EXECUTE, +} mmu_execute_Type; + +/* Region global attributes */ +typedef enum +{ + GLOBAL, + NON_GLOBAL, +} mmu_global_Type; + +/* Region shareability attributes */ +typedef enum +{ + NON_SHARED, + SHARED, +} mmu_shared_Type; + +/* Region security attributes */ +typedef enum +{ + SECURE, + NON_SECURE, +} mmu_secure_Type; + +/* Region access attributes */ +typedef enum +{ + NO_ACCESS, + RW, + READ, +} mmu_access_Type; + +/* Memory Region definition */ +typedef struct RegionStruct { + mmu_region_size_Type rg_t; + mmu_memory_Type mem_t; + uint8_t domain; + mmu_cacheability_Type inner_norm_t; + mmu_cacheability_Type outer_norm_t; + mmu_ecc_check_Type e_t; + mmu_execute_Type xn_t; + mmu_global_Type g_t; + mmu_secure_Type sec_t; + mmu_access_Type priv_t; + mmu_access_Type user_t; + mmu_shared_Type sh_t; + +} mmu_region_attributes_Type; + +//Following macros define the descriptors and attributes +//Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0 +#define section_normal(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_NC. Outer & inner non-cacheable, non-shareable, executable, rw, domain 0 +#define section_normal_nc(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0 +#define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_RO. Sect_Normal_Cod, but not executable +#define section_normal_ro(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable +#define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = WB_WA; \ + region.outer_norm_t = WB_WA; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); +//Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0 +#define section_so(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0 +#define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = READ; \ + region.user_t = READ; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + +//Sect_Device_RW. Sect_Device_RO, but writeable +#define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = STRONGLY_ORDERED; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); +//Page_4k_Device_RW. Shared device, not executable, rw, domain 0 +#define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = SHARED_DEVICE; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region); + +//Page_64k_Device_RW. Shared device, not executable, rw, domain 0 +#define page64k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_64k; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = SHARED_DEVICE; \ + region.sec_t = SECURE; \ + region.xn_t = NON_EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region); + +/** \brief Set section execution-never attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] xn Section execution-never attribute : EXECUTE , NON_EXECUTE. + + \return 0 +*/ +__STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn) +{ + *descriptor_l1 &= SECTION_XN_MASK; + *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT); + return 0; +} + +/** \brief Set section domain + + \param [out] descriptor_l1 L1 descriptor. + \param [in] domain Section domain + + \return 0 +*/ +__STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain) +{ + *descriptor_l1 &= SECTION_DOMAIN_MASK; + *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT); + return 0; +} + +/** \brief Set section parity check + + \param [out] descriptor_l1 L1 descriptor. + \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED + + \return 0 +*/ +__STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit) +{ + *descriptor_l1 &= SECTION_P_MASK; + *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT); + return 0; +} + +/** \brief Set section access privileges + + \param [out] descriptor_l1 L1 descriptor. + \param [in] user User Level Access: NO_ACCESS, RW, READ + \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ + \param [in] afe Access flag enable + + \return 0 +*/ +__STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe) +{ + uint32_t ap = 0; + + if (afe == 0) { //full access + if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; } + else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == READ)) { ap = 0x2; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + else { //Simplified access + if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + *descriptor_l1 &= SECTION_AP_MASK; + *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT; + *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT; + + return 0; +} + +/** \brief Set section shareability + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit Section shareability: NON_SHARED, SHARED + + \return 0 +*/ +__STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit) +{ + *descriptor_l1 &= SECTION_S_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT); + return 0; +} + +/** \brief Set section Global attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] g_bit Section attribute: GLOBAL, NON_GLOBAL + + \return 0 +*/ +__STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit) +{ + *descriptor_l1 &= SECTION_NG_MASK; + *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT); + return 0; +} + +/** \brief Set section Security attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit Section Security attribute: SECURE, NON_SECURE + + \return 0 +*/ +__STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit) +{ + *descriptor_l1 &= SECTION_NS_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT); + return 0; +} + +/* Page 4k or 64k */ +/** \brief Set 4k/64k page execution-never attribute + + \param [out] descriptor_l2 L2 descriptor. + \param [in] xn Page execution-never attribute : EXECUTE , NON_EXECUTE. + \param [in] page Page size: PAGE_4k, PAGE_64k, + + \return 0 +*/ +__STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page) +{ + if (page == PAGE_4k) + { + *descriptor_l2 &= PAGE_XN_4K_MASK; + *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT); + } + else + { + *descriptor_l2 &= PAGE_XN_64K_MASK; + *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT); + } + return 0; +} + +/** \brief Set 4k/64k page domain + + \param [out] descriptor_l1 L1 descriptor. + \param [in] domain Page domain + + \return 0 +*/ +__STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain) +{ + *descriptor_l1 &= PAGE_DOMAIN_MASK; + *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page parity check + + \param [out] descriptor_l1 L1 descriptor. + \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED + + \return 0 +*/ +__STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit) +{ + *descriptor_l1 &= SECTION_P_MASK; + *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page access privileges + + \param [out] descriptor_l2 L2 descriptor. + \param [in] user User Level Access: NO_ACCESS, RW, READ + \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ + \param [in] afe Access flag enable + + \return 0 +*/ +__STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe) +{ + uint32_t ap = 0; + + if (afe == 0) { //full access + if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; } + else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == READ)) { ap = 0x2; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x6; } + } + + else { //Simplified access + if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; } + else if ((priv == RW) && (user == RW)) { ap = 0x3; } + else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; } + else if ((priv == READ) && (user == READ)) { ap = 0x7; } + } + + *descriptor_l2 &= PAGE_AP_MASK; + *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT; + *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT; + + return 0; +} + +/** \brief Set 4k/64k page shareability + + \param [out] descriptor_l2 L2 descriptor. + \param [in] s_bit 4k/64k page shareability: NON_SHARED, SHARED + + \return 0 +*/ +__STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit) +{ + *descriptor_l2 &= PAGE_S_MASK; + *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page Global attribute + + \param [out] descriptor_l2 L2 descriptor. + \param [in] g_bit 4k/64k page attribute: GLOBAL, NON_GLOBAL + + \return 0 +*/ +__STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit) +{ + *descriptor_l2 &= PAGE_NG_MASK; + *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT); + return 0; +} + +/** \brief Set 4k/64k page Security attribute + + \param [out] descriptor_l1 L1 descriptor. + \param [in] s_bit 4k/64k page Security attribute: SECURE, NON_SECURE + + \return 0 +*/ +__STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit) +{ + *descriptor_l1 &= PAGE_NS_MASK; + *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT); + return 0; +} + +/** \brief Set Section memory attributes + + \param [out] descriptor_l1 L1 descriptor. + \param [in] mem Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED + \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + + \return 0 +*/ +__STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner) +{ + *descriptor_l1 &= SECTION_TEXCB_MASK; + + if (STRONGLY_ORDERED == mem) + { + return 0; + } + else if (SHARED_DEVICE == mem) + { + *descriptor_l1 |= (1 << SECTION_B_SHIFT); + } + else if (NON_SHARED_DEVICE == mem) + { + *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT); + } + else if (NORMAL == mem) + { + *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT; + switch(inner) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l1 |= (1 << SECTION_B_SHIFT); + break; + case WT: + *descriptor_l1 |= 1 << SECTION_C_SHIFT; + break; + case WB_NO_WA: + *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT); + break; + } + switch(outer) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT); + break; + case WT: + *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT; + break; + case WB_NO_WA: + *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT); + break; + } + } + return 0; +} + +/** \brief Set 4k/64k page memory attributes + + \param [out] descriptor_l2 L2 descriptor. + \param [in] mem 4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED + \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA, + \param [in] page Page size + + \return 0 +*/ +__STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page) +{ + *descriptor_l2 &= PAGE_4K_TEXCB_MASK; + + if (page == PAGE_64k) + { + //same as section + MMU_MemorySection(descriptor_l2, mem, outer, inner); + } + else + { + if (STRONGLY_ORDERED == mem) + { + return 0; + } + else if (SHARED_DEVICE == mem) + { + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT); + } + else if (NON_SHARED_DEVICE == mem) + { + *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT); + } + else if (NORMAL == mem) + { + *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT; + switch(inner) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT); + break; + case WT: + *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT; + break; + case WB_NO_WA: + *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT); + break; + } + switch(outer) + { + case NON_CACHEABLE: + break; + case WB_WA: + *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT); + break; + case WT: + *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT; + break; + case WB_NO_WA: + *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT); + break; + } + } + } + + return 0; +} + +/** \brief Create a L1 section descriptor + + \param [out] descriptor L1 descriptor + \param [in] reg Section attributes + + \return 0 +*/ +__STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg) +{ + *descriptor = 0; + + MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t); + MMU_XNSection(descriptor,reg.xn_t); + MMU_DomainSection(descriptor, reg.domain); + MMU_PSection(descriptor, reg.e_t); + MMU_APSection(descriptor, reg.user_t, reg.priv_t, 1); + MMU_SharedSection(descriptor,reg.sh_t); + MMU_GlobalSection(descriptor,reg.g_t); + MMU_SecureSection(descriptor,reg.sec_t); + *descriptor &= SECTION_MASK; + *descriptor |= SECTION_DESCRIPTOR; + + return 0; +} + + +/** \brief Create a L1 and L2 4k/64k page descriptor + + \param [out] descriptor L1 descriptor + \param [out] descriptor2 L2 descriptor + \param [in] reg 4k/64k page attributes + + \return 0 +*/ +__STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg) +{ + *descriptor = 0; + *descriptor2 = 0; + + switch (reg.rg_t) + { + case PAGE_4k: + MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k); + MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k); + MMU_DomainPage(descriptor, reg.domain); + MMU_PPage(descriptor, reg.e_t); + MMU_APPage(descriptor2, reg.user_t, reg.priv_t, 1); + MMU_SharedPage(descriptor2,reg.sh_t); + MMU_GlobalPage(descriptor2,reg.g_t); + MMU_SecurePage(descriptor,reg.sec_t); + *descriptor &= PAGE_L1_MASK; + *descriptor |= PAGE_L1_DESCRIPTOR; + *descriptor2 &= PAGE_L2_4K_MASK; + *descriptor2 |= PAGE_L2_4K_DESC; + break; + + case PAGE_64k: + MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k); + MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k); + MMU_DomainPage(descriptor, reg.domain); + MMU_PPage(descriptor, reg.e_t); + MMU_APPage(descriptor2, reg.user_t, reg.priv_t, 1); + MMU_SharedPage(descriptor2,reg.sh_t); + MMU_GlobalPage(descriptor2,reg.g_t); + MMU_SecurePage(descriptor,reg.sec_t); + *descriptor &= PAGE_L1_MASK; + *descriptor |= PAGE_L1_DESCRIPTOR; + *descriptor2 &= PAGE_L2_64K_MASK; + *descriptor2 |= PAGE_L2_64K_DESC; + break; + + case SECTION: + //error + break; + } + + return 0; +} + +/** \brief Create a 1MB Section + + \param [in] ttb Translation table base address + \param [in] base_address Section base address + \param [in] count Number of sections to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1) +{ + uint32_t offset; + uint32_t entry; + uint32_t i; + + offset = base_address >> 20; + entry = (base_address & 0xFFF00000) | descriptor_l1; + + //4 bytes aligned + ttb = ttb + offset; + + for (i = 0; i < count; i++ ) + { + //4 bytes aligned + *ttb++ = entry; + entry += OFFSET_1M; + } +} + +/** \brief Create a 4k page entry + + \param [in] ttb L1 table base address + \param [in] base_address 4k base address + \param [in] count Number of 4k pages to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + \param [in] ttb_l2 L2 table base address + \param [in] descriptor_l2 L2 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 ) +{ + + uint32_t offset, offset2; + uint32_t entry, entry2; + uint32_t i; + + offset = base_address >> 20; + entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1; + + //4 bytes aligned + ttb += offset; + //create l1_entry + *ttb = entry; + + offset2 = (base_address & 0xff000) >> 12; + ttb_l2 += offset2; + entry2 = (base_address & 0xFFFFF000) | descriptor_l2; + for (i = 0; i < count; i++ ) + { + //4 bytes aligned + *ttb_l2++ = entry2; + entry2 += OFFSET_4K; + } +} + +/** \brief Create a 64k page entry + + \param [in] ttb L1 table base address + \param [in] base_address 64k base address + \param [in] count Number of 64k pages to create + \param [in] descriptor_l1 L1 descriptor (region attributes) + \param [in] ttb_l2 L2 table base address + \param [in] descriptor_l2 L2 descriptor (region attributes) + +*/ +__STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 ) +{ + uint32_t offset, offset2; + uint32_t entry, entry2; + uint32_t i,j; + + + offset = base_address >> 20; + entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1; + + //4 bytes aligned + ttb += offset; + //create l1_entry + *ttb = entry; + + offset2 = (base_address & 0xff000) >> 12; + ttb_l2 += offset2; + entry2 = (base_address & 0xFFFF0000) | descriptor_l2; + for (i = 0; i < count; i++ ) + { + //create 16 entries + for (j = 0; j < 16; j++) + { + //4 bytes aligned + *ttb_l2++ = entry2; + } + entry2 += OFFSET_64K; + } +} + +/** \brief Enable MMU +*/ +__STATIC_INLINE void MMU_Enable(void) +{ + // Set M bit 0 to enable the MMU + // Set AFE bit to enable simplified access permissions model + // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking + __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29)); + __ISB(); +} + +/** \brief Disable MMU +*/ +__STATIC_INLINE void MMU_Disable(void) +{ + // Clear M bit 0 to disable the MMU + __set_SCTLR( __get_SCTLR() & ~1); + __ISB(); +} + +/** \brief Invalidate entire unified TLB +*/ + +__STATIC_INLINE void MMU_InvalidateTLB(void) +{ + __set_TLBIALL(0); + __DSB(); //ensure completion of the invalidation + __ISB(); //ensure instruction fetch path sees new state +} + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CA_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm0.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm0.h new file mode 100644 index 0000000..fd23b2a --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm0.h @@ -0,0 +1,967 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M0 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM0_H_GENERIC +#define __CORE_CM0_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M0 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM0 definitions */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0_H_DEPENDANT +#define __CORE_CM0_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0_REV + #define __CM0_REV 0x0000U + #warning "__CM0_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M0 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IPR[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/* NVIC_GetActive not available for Cortex-M0 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */ + /* ARM Application Note 321 states that the M0 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */ +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm0plus.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm0plus.h new file mode 100644 index 0000000..79ea13a --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm0plus.h @@ -0,0 +1,1103 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M0+ Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM0PLUS_H_GENERIC +#define __CORE_CM0PLUS_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex-M0+ + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM0+ definitions */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0PLUS_H_DEPENDANT +#define __CORE_CM0PLUS_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0PLUS_REV + #define __CM0PLUS_REV 0x0000U + #warning "__CM0PLUS_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex-M0+ */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IPR[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 8U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/** \brief MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0+ Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0+ header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/* NVIC_GetActive not available for Cortex-M0+ */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +#else + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */ +#endif + /* ARM Application Note 321 states that the M0+ does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +#else + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */ +#endif +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "m-profile/armv7m_mpu.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm1.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm1.h new file mode 100644 index 0000000..921530c --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm1.h @@ -0,0 +1,992 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M1 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM1_H_GENERIC +#define __CORE_CM1_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M1 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM1 definitions */ + +#define __CORTEX_M (1U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM1_H_DEPENDANT +#define __CORE_CM1_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM1_REV + #define __CM1_REV 0x0100U + #warning "__CM1_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IPR[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/** \brief SCnSCB Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_ITCMUAEN_Pos 4U /*!< ACTLR: Instruction TCM Upper Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMUAEN_Msk (1UL << SCnSCB_ACTLR_ITCMUAEN_Pos) /*!< ACTLR: Instruction TCM Upper Alias Enable Mask */ + +#define SCnSCB_ACTLR_ITCMLAEN_Pos 3U /*!< ACTLR: Instruction TCM Lower Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMLAEN_Msk (1UL << SCnSCB_ACTLR_ITCMLAEN_Pos) /*!< ACTLR: Instruction TCM Lower Alias Enable Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M1 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M1 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M1 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)0x0U; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M1 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)0x0U; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm23.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm23.h new file mode 100644 index 0000000..a9b71fa --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm23.h @@ -0,0 +1,2253 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M23 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM23_H_GENERIC +#define __CORE_CM23_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M23 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM23 definitions */ + +#define __CORTEX_M (23U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM23_H_DEPENDANT +#define __CORE_CM23_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM23_REV + #define __CM23_REV 0x0000U + #warning "__CM23_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M23 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED14[992U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Architecture Register */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_FOnMan_Pos 6U /*!< TPIU FFCR: FOnMan Position */ +#define TPIU_FFCR_FOnMan_Msk (1UL << TPIU_FFCR_FOnMan_Pos) /*!< TPIU FFCR: FOnMan Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU Periodic Synchronization Control Register Definitions */ +#define TPIU_PSCR_PSCount_Pos 0U /*!< TPIU PSCR: PSCount Position */ +#define TPIU_PSCR_PSCount_Msk (0x1FUL /*<< TPIU_PSCR_PSCount_Pos*/) /*!< TPIU PSCR: TPSCount Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 0 Register Definitions */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD0: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD0: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPIU ITFTTD0: ATB Interface 1 data2 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPIU ITFTTD0: ATB Interface 1 data1 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPIU ITFTTD0: ATB Interface 1 data0 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPIU_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPIU ITFTTD0: ATB Interface 1 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 2 Register Definitions */ +#define TPIU_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID2S Position */ +#define TPIU_ITATBCTR2_AFVALID2S_Msk (1UL << TPIU_ITATBCTR2_AFVALID2S_Pos) /*!< TPIU ITATBCTR2: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID1S Position */ +#define TPIU_ITATBCTR2_AFVALID1S_Msk (1UL << TPIU_ITATBCTR2_AFVALID1S_Pos) /*!< TPIU ITATBCTR2: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2S Position */ +#define TPIU_ITATBCTR2_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2S Mask */ + +#define TPIU_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1S Position */ +#define TPIU_ITATBCTR2_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1S Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 1 Register Definitions */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD1: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD1: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPIU ITFTTD1: ATB Interface 2 data2 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPIU ITFTTD1: ATB Interface 2 data1 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPIU ITFTTD1: ATB Interface 2 data0 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPIU_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPIU ITFTTD1: ATB Interface 2 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 0 Definitions */ +#define TPIU_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID2S Position */ +#define TPIU_ITATBCTR0_AFVALID2S_Msk (1UL << TPIU_ITATBCTR0_AFVALID2S_Pos) /*!< TPIU ITATBCTR0: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID1S Position */ +#define TPIU_ITATBCTR0_AFVALID1S_Msk (1UL << TPIU_ITATBCTR0_AFVALID1S_Pos) /*!< TPIU ITATBCTR0: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2S Position */ +#define TPIU_ITATBCTR0_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2S Mask */ + +#define TPIU_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1S Position */ +#define TPIU_ITATBCTR0_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1S Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_FIFOSZ_Pos 6U /*!< TPIU DEVID: FIFOSZ Position */ +#define TPIU_DEVID_FIFOSZ_Msk (0x7UL << TPIU_DEVID_FIFOSZ_Pos) /*!< TPIU DEVID: FIFOSZ Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/** \brief MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Mask */ + +/** \brief MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/** \brief MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/** \brief SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/** \brief SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/** \brief SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/** \brief SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/** \brief SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/** \brief DCB Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/** \brief DCB Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/** \brief DIB SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/** \brief DIB SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/** \brief DIB Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/** \brief DIB SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/** \brief DIB SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; + __IOM uint32_t DSCSR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos DCB_DHCSR_S_RESTART_ST_Pos +#define CoreDebug_DHCSR_S_RESTART_ST_Msk DCB_DHCSR_S_RESTART_ST_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_DWTENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_DWTENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos DCB_DAUTHCTRL_INTSPNIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk DCB_DAUTHCTRL_INTSPNIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos DCB_DAUTHCTRL_SPNIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk DCB_DAUTHCTRL_SPNIDENSEL_Msk + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos DCB_DAUTHCTRL_INTSPIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk DCB_DAUTHCTRL_INTSPIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos DCB_DAUTHCTRL_SPIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk DCB_DAUTHCTRL_SPIDENSEL_Msk + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos DCB_DSCSR_CDS_Pos +#define CoreDebug_DSCSR_CDS_Msk DCB_DSCSR_CDS_Msk + +#define CoreDebug_DSCSR_SBRSEL_Pos DCB_DSCSR_SBRSEL_Pos +#define CoreDebug_DSCSR_SBRSEL_Msk DCB_DSCSR_SBRSEL_Msk + +#define CoreDebug_DSCSR_SBRSELEN_Pos DCB_DSCSR_SBRSELEN_Pos +#define CoreDebug_DSCSR_SBRSELEN_Msk DCB_DSCSR_SBRSELEN_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define CoreDebug_NS ((CoreDebug_Type *) DCB_BASE_NS) +#endif + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/* NVIC_SetPriorityGrouping not available for Cortex-M23 */ +/* NVIC_GetPriorityGrouping not available for Cortex-M23 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + + #include "m-profile/armv8m_mpu.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm3.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm3.h new file mode 100644 index 0000000..fa624e4 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm3.h @@ -0,0 +1,2045 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M3 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM3_H_GENERIC +#define __CORE_CM3_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M3 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM3 definitions */ + +#define __CORTEX_M (3U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM3_H_DEPENDANT +#define __CORE_CM3_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM3_REV + #define __CM3_REV 0x0200U + #warning "__CM3_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M3 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IPR[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED3[93U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__CM3_REV) && (__CM3_REV < 0x0201U) /* core r2p1 */ +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#else +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +#else + uint32_t RESERVED1[1U]; +#endif +} SCnSCB_Type; + +/** \brief SCnSCB Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/** \brief SCnSCB Auxiliary Control Register Definitions */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ +#endif + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Lock Status Register */ +} ITM_Type; + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Lock Status Register Definitions */ +#define ITM_LSR_BYTEACC_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_BYTEACC_Msk (1UL << ITM_LSR_BYTEACC_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_ACCESS_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_ACCESS_Msk (1UL << ITM_LSR_ACCESS_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_PRESENT_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_PRESENT_Msk (1UL /*<< ITM_LSR_PRESENT_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration ETM Data Register Definitions (FIFO0) */ +#define TPIU_FIFO0_ITM_ATVALID_Pos 29U /*!< TPIU FIFO0: ITM_ATVALID Position */ +#define TPIU_FIFO0_ITM_ATVALID_Msk (1UL << TPIU_FIFO0_ITM_ATVALID_Pos) /*!< TPIU FIFO0: ITM_ATVALID Mask */ + +#define TPIU_FIFO0_ITM_bytecount_Pos 27U /*!< TPIU FIFO0: ITM_bytecount Position */ +#define TPIU_FIFO0_ITM_bytecount_Msk (0x3UL << TPIU_FIFO0_ITM_bytecount_Pos) /*!< TPIU FIFO0: ITM_bytecount Mask */ + +#define TPIU_FIFO0_ETM_ATVALID_Pos 26U /*!< TPIU FIFO0: ETM_ATVALID Position */ +#define TPIU_FIFO0_ETM_ATVALID_Msk (1UL << TPIU_FIFO0_ETM_ATVALID_Pos) /*!< TPIU FIFO0: ETM_ATVALID Mask */ + +#define TPIU_FIFO0_ETM_bytecount_Pos 24U /*!< TPIU FIFO0: ETM_bytecount Position */ +#define TPIU_FIFO0_ETM_bytecount_Msk (0x3UL << TPIU_FIFO0_ETM_bytecount_Pos) /*!< TPIU FIFO0: ETM_bytecount Mask */ + +#define TPIU_FIFO0_ETM2_Pos 16U /*!< TPIU FIFO0: ETM2 Position */ +#define TPIU_FIFO0_ETM2_Msk (0xFFUL << TPIU_FIFO0_ETM2_Pos) /*!< TPIU FIFO0: ETM2 Mask */ + +#define TPIU_FIFO0_ETM1_Pos 8U /*!< TPIU FIFO0: ETM1 Position */ +#define TPIU_FIFO0_ETM1_Msk (0xFFUL << TPIU_FIFO0_ETM1_Pos) /*!< TPIU FIFO0: ETM1 Mask */ + +#define TPIU_FIFO0_ETM0_Pos 0U /*!< TPIU FIFO0: ETM0 Position */ +#define TPIU_FIFO0_ETM0_Msk (0xFFUL /*<< TPIU_FIFO0_ETM0_Pos*/) /*!< TPIU FIFO0: ETM0 Mask */ + +/** \brief TPIU ITATBCTR2 Register Definitions */ +#define TPIU_ITATBCTR2_ATREADY2_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2 Position */ +#define TPIU_ITATBCTR2_ATREADY2_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2 Mask */ + +#define TPIU_ITATBCTR2_ATREADY1_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1 Position */ +#define TPIU_ITATBCTR2_ATREADY1_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1 Mask */ + +/** \brief TPIU Integration ITM Data Register Definitions (FIFO1) */ +#define TPIU_FIFO1_ITM_ATVALID_Pos 29U /*!< TPIU FIFO1: ITM_ATVALID Position */ +#define TPIU_FIFO1_ITM_ATVALID_Msk (1UL << TPIU_FIFO1_ITM_ATVALID_Pos) /*!< TPIU FIFO1: ITM_ATVALID Mask */ + +#define TPIU_FIFO1_ITM_bytecount_Pos 27U /*!< TPIU FIFO1: ITM_bytecount Position */ +#define TPIU_FIFO1_ITM_bytecount_Msk (0x3UL << TPIU_FIFO1_ITM_bytecount_Pos) /*!< TPIU FIFO1: ITM_bytecount Mask */ + +#define TPIU_FIFO1_ETM_ATVALID_Pos 26U /*!< TPIU FIFO1: ETM_ATVALID Position */ +#define TPIU_FIFO1_ETM_ATVALID_Msk (1UL << TPIU_FIFO1_ETM_ATVALID_Pos) /*!< TPIU FIFO1: ETM_ATVALID Mask */ + +#define TPIU_FIFO1_ETM_bytecount_Pos 24U /*!< TPIU FIFO1: ETM_bytecount Position */ +#define TPIU_FIFO1_ETM_bytecount_Msk (0x3UL << TPIU_FIFO1_ETM_bytecount_Pos) /*!< TPIU FIFO1: ETM_bytecount Mask */ + +#define TPIU_FIFO1_ITM2_Pos 16U /*!< TPIU FIFO1: ITM2 Position */ +#define TPIU_FIFO1_ITM2_Msk (0xFFUL << TPIU_FIFO1_ITM2_Pos) /*!< TPIU FIFO1: ITM2 Mask */ + +#define TPIU_FIFO1_ITM1_Pos 8U /*!< TPIU FIFO1: ITM1 Position */ +#define TPIU_FIFO1_ITM1_Msk (0xFFUL << TPIU_FIFO1_ITM1_Pos) /*!< TPIU FIFO1: ITM1 Mask */ + +#define TPIU_FIFO1_ITM0_Pos 0U /*!< TPIU FIFO1: ITM0 Position */ +#define TPIU_FIFO1_ITM0_Msk (0xFFUL /*<< TPIU_FIFO1_ITM0_Pos*/) /*!< TPIU FIFO1: ITM0 Mask */ + +/** \brief TPIU ITATBCTR0 Register Definitions */ +#define TPIU_ITATBCTR0_ATREADY2_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2 Position */ +#define TPIU_ITATBCTR0_ATREADY2_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2 Mask */ + +#define TPIU_ITATBCTR0_ATREADY1_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1 Position */ +#define TPIU_ITATBCTR0_ATREADY1_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1 Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_MinBufSz_Pos 6U /*!< TPIU DEVID: MinBufSz Position */ +#define TPIU_DEVID_MinBufSz_Msk (0x7UL << TPIU_DEVID_MinBufSz_Pos) /*!< TPIU DEVID: MinBufSz Mask */ + +#define TPIU_DEVID_AsynClkIn_Pos 5U /*!< TPIU DEVID: AsynClkIn Position */ +#define TPIU_DEVID_AsynClkIn_Msk (1UL << TPIU_DEVID_AsynClkIn_Pos) /*!< TPIU DEVID: AsynClkIn Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/** \brief MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ +#define DCB_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ +#define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos DCB_DHCSR_C_SNAPSTALL_Pos +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk DCB_DHCSR_C_SNAPSTALL_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_TRCENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_MON_REQ_Pos DCB_DEMCR_MON_REQ_Pos +#define CoreDebug_DEMCR_MON_REQ_Msk DCB_DEMCR_MON_REQ_Msk + +#define CoreDebug_DEMCR_MON_STEP_Pos DCB_DEMCR_MON_STEP_Pos +#define CoreDebug_DEMCR_MON_STEP_Msk DCB_DEMCR_MON_STEP_Msk + +#define CoreDebug_DEMCR_MON_PEND_Pos DCB_DEMCR_MON_PEND_Pos +#define CoreDebug_DEMCR_MON_PEND_Msk DCB_DEMCR_MON_PEND_Msk + +#define CoreDebug_DEMCR_MON_EN_Pos DCB_DEMCR_MON_EN_Pos +#define CoreDebug_DEMCR_MON_EN_Msk DCB_DEMCR_MON_EN_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_INTERR_Pos DCB_DEMCR_VC_INTERR_Pos +#define CoreDebug_DEMCR_VC_INTERR_Msk DCB_DEMCR_VC_INTERR_Msk + +#define CoreDebug_DEMCR_VC_BUSERR_Pos DCB_DEMCR_VC_BUSERR_Pos +#define CoreDebug_DEMCR_VC_BUSERR_Msk DCB_DEMCR_VC_BUSERR_Msk + +#define CoreDebug_DEMCR_VC_STATERR_Pos DCB_DEMCR_VC_STATERR_Pos +#define CoreDebug_DEMCR_VC_STATERR_Msk DCB_DEMCR_VC_STATERR_Msk + +#define CoreDebug_DEMCR_VC_CHKERR_Pos DCB_DEMCR_VC_CHKERR_Pos +#define CoreDebug_DEMCR_VC_CHKERR_Msk DCB_DEMCR_VC_CHKERR_Msk + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos DCB_DEMCR_VC_NOCPERR_Pos +#define CoreDebug_DEMCR_VC_NOCPERR_Msk DCB_DEMCR_VC_NOCPERR_Msk + +#define CoreDebug_DEMCR_VC_MMERR_Pos DCB_DEMCR_VC_MMERR_Pos +#define CoreDebug_DEMCR_VC_MMERR_Msk DCB_DEMCR_VC_MMERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M3 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "m-profile/armv7m_mpu.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm33.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm33.h new file mode 100644 index 0000000..3ec4d4c --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm33.h @@ -0,0 +1,3245 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M33 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM33_H_GENERIC +#define __CORE_CM33_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M33 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM33 definitions */ + +#define __CORTEX_M (33U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM33_H_DEPENDANT +#define __CORE_CM33_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM33_REV + #define __CM33_REV 0x0000U + #warning "__CM33_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M33 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/** \brief SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/** \brief SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/** \brief SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/** \brief SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/** \brief SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/** \brief SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/** \brief SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/** \brief SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/** \brief SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/** \brief SCnSCB Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[27U]; + __IM uint32_t ITREAD; /*!< Offset: 0xEF0 (R/ ) Integration Read Register */ + uint32_t RESERVED4[1U]; + __OM uint32_t ITWRITE; /*!< Offset: 0xEF8 ( /W) Integration Write Register */ + uint32_t RESERVED5[1U]; + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control Register */ + uint32_t RESERVED6[46U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ + uint32_t RESERVED7[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} ITM_Type; + +/** \brief ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Integration Read Register Definitions */ +#define ITM_ITREAD_AFVALID_Pos 1U /*!< ITM ITREAD: AFVALID Position */ +#define ITM_ITREAD_AFVALID_Msk (1UL << ITM_ITREAD_AFVALID_Pos) /*!< ITM ITREAD: AFVALID Mask */ + +#define ITM_ITREAD_ATREADY_Pos 0U /*!< ITM ITREAD: ATREADY Position */ +#define ITM_ITREAD_ATREADY_Msk (1UL /*<< ITM_ITREAD_ATREADY_Pos*/) /*!< ITM ITREAD: ATREADY Mask */ + +/** \brief ITM Integration Write Register Definitions */ +#define ITM_ITWRITE_AFVALID_Pos 1U /*!< ITM ITWRITE: AFVALID Position */ +#define ITM_ITWRITE_AFVALID_Msk (1UL << ITM_ITWRITE_AFVALID_Pos) /*!< ITM ITWRITE: AFVALID Mask */ + +#define ITM_ITWRITE_ATREADY_Pos 0U /*!< ITM ITWRITE: ATREADY Position */ +#define ITM_ITWRITE_ATREADY_Msk (1UL /*<< ITM_ITWRITE_ATREADY_Pos*/) /*!< ITM ITWRITE: ATREADY Mask */ + +/** \brief ITM Integration Mode Control Register Definitions */ +#define ITM_ITCTRL_IME_Pos 0U /*!< ITM ITCTRL: IME Position */ +#define ITM_ITCTRL_IME_Msk (1UL /*<< ITM_ITCTRL_IME_Pos*/) /*!< ITM ITCTRL: IME Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED14[984U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Architecture Register */ + uint32_t RESERVED15[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_FOnMan_Pos 6U /*!< TPIU FFCR: FOnMan Position */ +#define TPIU_FFCR_FOnMan_Msk (1UL << TPIU_FFCR_FOnMan_Pos) /*!< TPIU FFCR: FOnMan Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU Periodic Synchronization Control Register Definitions */ +#define TPIU_PSCR_PSCount_Pos 0U /*!< TPIU PSCR: PSCount Position */ +#define TPIU_PSCR_PSCount_Msk (0x1FUL /*<< TPIU_PSCR_PSCount_Pos*/) /*!< TPIU PSCR: TPSCount Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 0 Register Definitions */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD0: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD0: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPIU ITFTTD0: ATB Interface 1 data2 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPIU ITFTTD0: ATB Interface 1 data1 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPIU ITFTTD0: ATB Interface 1 data0 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPIU_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPIU ITFTTD0: ATB Interface 1 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 2 Register Definitions */ +#define TPIU_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID2S Position */ +#define TPIU_ITATBCTR2_AFVALID2S_Msk (1UL << TPIU_ITATBCTR2_AFVALID2S_Pos) /*!< TPIU ITATBCTR2: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID1S Position */ +#define TPIU_ITATBCTR2_AFVALID1S_Msk (1UL << TPIU_ITATBCTR2_AFVALID1S_Pos) /*!< TPIU ITATBCTR2: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2S Position */ +#define TPIU_ITATBCTR2_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2S Mask */ + +#define TPIU_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1S Position */ +#define TPIU_ITATBCTR2_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1S Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 1 Register Definitions */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD1: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD1: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPIU ITFTTD1: ATB Interface 2 data2 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPIU ITFTTD1: ATB Interface 2 data1 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPIU ITFTTD1: ATB Interface 2 data0 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPIU_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPIU ITFTTD1: ATB Interface 2 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 0 Definitions */ +#define TPIU_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID2S Position */ +#define TPIU_ITATBCTR0_AFVALID2S_Msk (1UL << TPIU_ITATBCTR0_AFVALID2S_Pos) /*!< TPIU ITATBCTR0: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID1S Position */ +#define TPIU_ITATBCTR0_AFVALID1S_Msk (1UL << TPIU_ITATBCTR0_AFVALID1S_Pos) /*!< TPIU ITATBCTR0: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2S Position */ +#define TPIU_ITATBCTR0_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2S Mask */ + +#define TPIU_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1S Position */ +#define TPIU_ITATBCTR0_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1S Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_FIFOSZ_Pos 6U /*!< TPIU DEVID: FIFOSZ Position */ +#define TPIU_DEVID_FIFOSZ_Msk (0x7UL << TPIU_DEVID_FIFOSZ_Pos) /*!< TPIU DEVID: FIFOSZ Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/** \brief MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Mask */ + +/** \brief MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/** \brief MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/** \brief SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/** \brief SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/** \brief SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/** \brief SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/** \brief SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/** \brief SAU Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/** \brief FPU Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/** \brief FPU Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/** \brief FPU Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/** \brief FPU Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: Rounding modes bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: Rounding modes bits Mask */ + +#define FPU_MVFR0_FPShortvec_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_FPShortvec_Msk (0xFUL << FPU_MVFR0_FPShortvec_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPExceptrap_Pos 12U /*!< MVFR0: Exception trapping bits Position */ +#define FPU_MVFR0_FPExceptrap_Msk (0xFUL << FPU_MVFR0_FPExceptrap_Pos) /*!< MVFR0: Exception trapping bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMD registers bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMD registers bits Mask */ + +/** \brief FPU Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: Fused MAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: Fused MAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/** \brief FPU Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/** \brief DCB Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/** \brief DCB Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/** \brief DIB SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/** \brief DIB SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/** \brief DIB Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/** \brief DIB SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/** \brief DIB SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; + __IOM uint32_t DSCSR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos DCB_DHCSR_S_RESTART_ST_Pos +#define CoreDebug_DHCSR_S_RESTART_ST_Msk DCB_DHCSR_S_RESTART_ST_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos DCB_DHCSR_C_SNAPSTALL_Pos +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk DCB_DHCSR_C_SNAPSTALL_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_TRCENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_MON_REQ_Pos DCB_DEMCR_MON_REQ_Pos +#define CoreDebug_DEMCR_MON_REQ_Msk DCB_DEMCR_MON_REQ_Msk + +#define CoreDebug_DEMCR_MON_STEP_Pos DCB_DEMCR_MON_STEP_Pos +#define CoreDebug_DEMCR_MON_STEP_Msk DCB_DEMCR_MON_STEP_Msk + +#define CoreDebug_DEMCR_MON_PEND_Pos DCB_DEMCR_MON_PEND_Pos +#define CoreDebug_DEMCR_MON_PEND_Msk DCB_DEMCR_MON_PEND_Msk + +#define CoreDebug_DEMCR_MON_EN_Pos DCB_DEMCR_MON_EN_Pos +#define CoreDebug_DEMCR_MON_EN_Msk DCB_DEMCR_MON_EN_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_INTERR_Pos DCB_DEMCR_VC_INTERR_Pos +#define CoreDebug_DEMCR_VC_INTERR_Msk DCB_DEMCR_VC_INTERR_Msk + +#define CoreDebug_DEMCR_VC_BUSERR_Pos DCB_DEMCR_VC_BUSERR_Pos +#define CoreDebug_DEMCR_VC_BUSERR_Msk DCB_DEMCR_VC_BUSERR_Msk + +#define CoreDebug_DEMCR_VC_STATERR_Pos DCB_DEMCR_VC_STATERR_Pos +#define CoreDebug_DEMCR_VC_STATERR_Msk DCB_DEMCR_VC_STATERR_Msk + +#define CoreDebug_DEMCR_VC_CHKERR_Pos DCB_DEMCR_VC_CHKERR_Pos +#define CoreDebug_DEMCR_VC_CHKERR_Msk DCB_DEMCR_VC_CHKERR_Msk + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos DCB_DEMCR_VC_NOCPERR_Pos +#define CoreDebug_DEMCR_VC_NOCPERR_Msk DCB_DEMCR_VC_NOCPERR_Msk + +#define CoreDebug_DEMCR_VC_MMERR_Pos DCB_DEMCR_VC_MMERR_Pos +#define CoreDebug_DEMCR_VC_MMERR_Msk DCB_DEMCR_VC_MMERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos DCB_DAUTHCTRL_INTSPNIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk DCB_DAUTHCTRL_INTSPNIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos DCB_DAUTHCTRL_SPNIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk DCB_DAUTHCTRL_SPNIDENSEL_Msk + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos DCB_DAUTHCTRL_INTSPIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk DCB_DAUTHCTRL_INTSPIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos DCB_DAUTHCTRL_SPIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk DCB_DAUTHCTRL_SPIDENSEL_Msk + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos DCB_DSCSR_CDS_Pos +#define CoreDebug_DSCSR_CDS_Msk DCB_DSCSR_CDS_Msk + +#define CoreDebug_DSCSR_SBRSEL_Pos DCB_DSCSR_SBRSEL_Pos +#define CoreDebug_DSCSR_SBRSEL_Msk DCB_DSCSR_SBRSEL_Msk + +#define CoreDebug_DSCSR_SBRSELEN_Pos DCB_DSCSR_SBRSELEN_Pos +#define CoreDebug_DSCSR_SBRSELEN_Msk DCB_DSCSR_SBRSELEN_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define CoreDebug_NS ((CoreDebug_Type *) DCB_BASE_NS) +#endif + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + + #include "m-profile/armv8m_mpu.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be written. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be written + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm35p.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm35p.h new file mode 100644 index 0000000..3ed97b1 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm35p.h @@ -0,0 +1,3245 @@ +/* + * Copyright (c) 2018-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M35P Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM35P_H_GENERIC +#define __CORE_CM35P_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M35P + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM35 definitions */ + +#define __CORTEX_M (35U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM35P_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM35P_H_DEPENDANT +#define __CORE_CM35P_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM35P_REV + #define __CM35P_REV 0x0000U + #warning "__CM35P_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M35P */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/** \brief SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/** \brief SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/** \brief SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/** \brief SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/** \brief SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/** \brief SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/** \brief SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/** \brief SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/** \brief SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/** \brief SCnSCB Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[27U]; + __IM uint32_t ITREAD; /*!< Offset: 0xEF0 (R/ ) Integration Read Register */ + uint32_t RESERVED4[1U]; + __OM uint32_t ITWRITE; /*!< Offset: 0xEF8 ( /W) Integration Write Register */ + uint32_t RESERVED5[1U]; + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control Register */ + uint32_t RESERVED6[46U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ + uint32_t RESERVED7[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} ITM_Type; + +/** \brief ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Integration Read Register Definitions */ +#define ITM_ITREAD_AFVALID_Pos 1U /*!< ITM ITREAD: AFVALID Position */ +#define ITM_ITREAD_AFVALID_Msk (1UL << ITM_ITREAD_AFVALID_Pos) /*!< ITM ITREAD: AFVALID Mask */ + +#define ITM_ITREAD_ATREADY_Pos 0U /*!< ITM ITREAD: ATREADY Position */ +#define ITM_ITREAD_ATREADY_Msk (1UL /*<< ITM_ITREAD_ATREADY_Pos*/) /*!< ITM ITREAD: ATREADY Mask */ + +/** \brief ITM Integration Write Register Definitions */ +#define ITM_ITWRITE_AFVALID_Pos 1U /*!< ITM ITWRITE: AFVALID Position */ +#define ITM_ITWRITE_AFVALID_Msk (1UL << ITM_ITWRITE_AFVALID_Pos) /*!< ITM ITWRITE: AFVALID Mask */ + +#define ITM_ITWRITE_ATREADY_Pos 0U /*!< ITM ITWRITE: ATREADY Position */ +#define ITM_ITWRITE_ATREADY_Msk (1UL /*<< ITM_ITWRITE_ATREADY_Pos*/) /*!< ITM ITWRITE: ATREADY Mask */ + +/** \brief ITM Integration Mode Control Register Definitions */ +#define ITM_ITCTRL_IME_Pos 0U /*!< ITM ITCTRL: IME Position */ +#define ITM_ITCTRL_IME_Msk (1UL /*<< ITM_ITCTRL_IME_Pos*/) /*!< ITM ITCTRL: IME Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED14[984U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Architecture Register */ + uint32_t RESERVED15[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_FOnMan_Pos 6U /*!< TPIU FFCR: FOnMan Position */ +#define TPIU_FFCR_FOnMan_Msk (1UL << TPIU_FFCR_FOnMan_Pos) /*!< TPIU FFCR: FOnMan Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU Periodic Synchronization Control Register Definitions */ +#define TPIU_PSCR_PSCount_Pos 0U /*!< TPIU PSCR: PSCount Position */ +#define TPIU_PSCR_PSCount_Msk (0x1FUL /*<< TPIU_PSCR_PSCount_Pos*/) /*!< TPIU PSCR: TPSCount Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 0 Register Definitions */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD0: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD0: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPIU ITFTTD0: ATB Interface 1 data2 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPIU ITFTTD0: ATB Interface 1 data1 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPIU ITFTTD0: ATB Interface 1 data0 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPIU_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPIU ITFTTD0: ATB Interface 1 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 2 Register Definitions */ +#define TPIU_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID2S Position */ +#define TPIU_ITATBCTR2_AFVALID2S_Msk (1UL << TPIU_ITATBCTR2_AFVALID2S_Pos) /*!< TPIU ITATBCTR2: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID1S Position */ +#define TPIU_ITATBCTR2_AFVALID1S_Msk (1UL << TPIU_ITATBCTR2_AFVALID1S_Pos) /*!< TPIU ITATBCTR2: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2S Position */ +#define TPIU_ITATBCTR2_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2S Mask */ + +#define TPIU_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1S Position */ +#define TPIU_ITATBCTR2_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1S Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 1 Register Definitions */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD1: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD1: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPIU ITFTTD1: ATB Interface 2 data2 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPIU ITFTTD1: ATB Interface 2 data1 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPIU ITFTTD1: ATB Interface 2 data0 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPIU_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPIU ITFTTD1: ATB Interface 2 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 0 Definitions */ +#define TPIU_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID2S Position */ +#define TPIU_ITATBCTR0_AFVALID2S_Msk (1UL << TPIU_ITATBCTR0_AFVALID2S_Pos) /*!< TPIU ITATBCTR0: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID1S Position */ +#define TPIU_ITATBCTR0_AFVALID1S_Msk (1UL << TPIU_ITATBCTR0_AFVALID1S_Pos) /*!< TPIU ITATBCTR0: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2S Position */ +#define TPIU_ITATBCTR0_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2S Mask */ + +#define TPIU_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1S Position */ +#define TPIU_ITATBCTR0_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1S Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_FIFOSZ_Pos 6U /*!< TPIU DEVID: FIFOSZ Position */ +#define TPIU_DEVID_FIFOSZ_Msk (0x7UL << TPIU_DEVID_FIFOSZ_Pos) /*!< TPIU DEVID: FIFOSZ Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/** \brief MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Mask */ + +/** \brief MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/** \brief MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/** \brief SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/** \brief SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/** \brief SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/** \brief SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/** \brief SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/** \brief SAU Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/** \brief FPU Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/** \brief FPU Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/** \brief FPU Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/** \brief FPU Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: Rounding modes bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: Rounding modes bits Mask */ + +#define FPU_MVFR0_FPShortvec_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_FPShortvec_Msk (0xFUL << FPU_MVFR0_FPShortvec_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPExceptrap_Pos 12U /*!< MVFR0: Exception trapping bits Position */ +#define FPU_MVFR0_FPExceptrap_Msk (0xFUL << FPU_MVFR0_FPExceptrap_Pos) /*!< MVFR0: Exception trapping bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMD registers bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMD registers bits Mask */ + +/** \brief FPU Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: Fused MAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: Fused MAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/** \brief FPU Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/** \brief DCB Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/** \brief DCB Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/** \brief DIB SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/** \brief DIB SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/** \brief DIB Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/** \brief DIB SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/** \brief DIB SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; + __IOM uint32_t DSCSR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos DCB_DHCSR_S_RESTART_ST_Pos +#define CoreDebug_DHCSR_S_RESTART_ST_Msk DCB_DHCSR_S_RESTART_ST_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos DCB_DHCSR_C_SNAPSTALL_Pos +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk DCB_DHCSR_C_SNAPSTALL_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_TRCENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_MON_REQ_Pos DCB_DEMCR_MON_REQ_Pos +#define CoreDebug_DEMCR_MON_REQ_Msk DCB_DEMCR_MON_REQ_Msk + +#define CoreDebug_DEMCR_MON_STEP_Pos DCB_DEMCR_MON_STEP_Pos +#define CoreDebug_DEMCR_MON_STEP_Msk DCB_DEMCR_MON_STEP_Msk + +#define CoreDebug_DEMCR_MON_PEND_Pos DCB_DEMCR_MON_PEND_Pos +#define CoreDebug_DEMCR_MON_PEND_Msk DCB_DEMCR_MON_PEND_Msk + +#define CoreDebug_DEMCR_MON_EN_Pos DCB_DEMCR_MON_EN_Pos +#define CoreDebug_DEMCR_MON_EN_Msk DCB_DEMCR_MON_EN_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_INTERR_Pos DCB_DEMCR_VC_INTERR_Pos +#define CoreDebug_DEMCR_VC_INTERR_Msk DCB_DEMCR_VC_INTERR_Msk + +#define CoreDebug_DEMCR_VC_BUSERR_Pos DCB_DEMCR_VC_BUSERR_Pos +#define CoreDebug_DEMCR_VC_BUSERR_Msk DCB_DEMCR_VC_BUSERR_Msk + +#define CoreDebug_DEMCR_VC_STATERR_Pos DCB_DEMCR_VC_STATERR_Pos +#define CoreDebug_DEMCR_VC_STATERR_Msk DCB_DEMCR_VC_STATERR_Msk + +#define CoreDebug_DEMCR_VC_CHKERR_Pos DCB_DEMCR_VC_CHKERR_Pos +#define CoreDebug_DEMCR_VC_CHKERR_Msk DCB_DEMCR_VC_CHKERR_Msk + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos DCB_DEMCR_VC_NOCPERR_Pos +#define CoreDebug_DEMCR_VC_NOCPERR_Msk DCB_DEMCR_VC_NOCPERR_Msk + +#define CoreDebug_DEMCR_VC_MMERR_Pos DCB_DEMCR_VC_MMERR_Pos +#define CoreDebug_DEMCR_VC_MMERR_Msk DCB_DEMCR_VC_MMERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos DCB_DAUTHCTRL_INTSPNIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk DCB_DAUTHCTRL_INTSPNIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos DCB_DAUTHCTRL_SPNIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk DCB_DAUTHCTRL_SPNIDENSEL_Msk + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos DCB_DAUTHCTRL_INTSPIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk DCB_DAUTHCTRL_INTSPIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos DCB_DAUTHCTRL_SPIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk DCB_DAUTHCTRL_SPIDENSEL_Msk + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos DCB_DSCSR_CDS_Pos +#define CoreDebug_DSCSR_CDS_Msk DCB_DSCSR_CDS_Msk + +#define CoreDebug_DSCSR_SBRSEL_Pos DCB_DSCSR_SBRSEL_Pos +#define CoreDebug_DSCSR_SBRSEL_Msk DCB_DSCSR_SBRSEL_Msk + +#define CoreDebug_DSCSR_SBRSELEN_Pos DCB_DSCSR_SBRSELEN_Pos +#define CoreDebug_DSCSR_SBRSELEN_Msk DCB_DSCSR_SBRSELEN_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define CoreDebug_NS ((CoreDebug_Type *) DCB_BASE_NS) +#endif + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + + #include "m-profile/armv8m_mpu.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM35P_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm4.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm4.h new file mode 100644 index 0000000..e84d6f7 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm4.h @@ -0,0 +1,2237 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M4 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM4_H_GENERIC +#define __CORE_CM4_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M4 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM4 definitions */ + +#define __CORTEX_M (4U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM4_H_DEPENDANT +#define __CORE_CM4_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM4_REV + #define __CM4_REV 0x0000U + #warning "__CM4_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M4 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IPR[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED3[93U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/** \brief SCnSCB Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/** \brief SCnSCB Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Lock Status Register */ +} ITM_Type; + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Lock Status Register Definitions */ +#define ITM_LSR_BYTEACC_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_BYTEACC_Msk (1UL << ITM_LSR_BYTEACC_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_ACCESS_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_ACCESS_Msk (1UL << ITM_LSR_ACCESS_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_PRESENT_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_PRESENT_Msk (1UL /*<< ITM_LSR_PRESENT_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration ETM Data Register Definitions (FIFO0) */ +#define TPIU_FIFO0_ITM_ATVALID_Pos 29U /*!< TPIU FIFO0: ITM_ATVALID Position */ +#define TPIU_FIFO0_ITM_ATVALID_Msk (1UL << TPIU_FIFO0_ITM_ATVALID_Pos) /*!< TPIU FIFO0: ITM_ATVALID Mask */ + +#define TPIU_FIFO0_ITM_bytecount_Pos 27U /*!< TPIU FIFO0: ITM_bytecount Position */ +#define TPIU_FIFO0_ITM_bytecount_Msk (0x3UL << TPIU_FIFO0_ITM_bytecount_Pos) /*!< TPIU FIFO0: ITM_bytecount Mask */ + +#define TPIU_FIFO0_ETM_ATVALID_Pos 26U /*!< TPIU FIFO0: ETM_ATVALID Position */ +#define TPIU_FIFO0_ETM_ATVALID_Msk (1UL << TPIU_FIFO0_ETM_ATVALID_Pos) /*!< TPIU FIFO0: ETM_ATVALID Mask */ + +#define TPIU_FIFO0_ETM_bytecount_Pos 24U /*!< TPIU FIFO0: ETM_bytecount Position */ +#define TPIU_FIFO0_ETM_bytecount_Msk (0x3UL << TPIU_FIFO0_ETM_bytecount_Pos) /*!< TPIU FIFO0: ETM_bytecount Mask */ + +#define TPIU_FIFO0_ETM2_Pos 16U /*!< TPIU FIFO0: ETM2 Position */ +#define TPIU_FIFO0_ETM2_Msk (0xFFUL << TPIU_FIFO0_ETM2_Pos) /*!< TPIU FIFO0: ETM2 Mask */ + +#define TPIU_FIFO0_ETM1_Pos 8U /*!< TPIU FIFO0: ETM1 Position */ +#define TPIU_FIFO0_ETM1_Msk (0xFFUL << TPIU_FIFO0_ETM1_Pos) /*!< TPIU FIFO0: ETM1 Mask */ + +#define TPIU_FIFO0_ETM0_Pos 0U /*!< TPIU FIFO0: ETM0 Position */ +#define TPIU_FIFO0_ETM0_Msk (0xFFUL /*<< TPIU_FIFO0_ETM0_Pos*/) /*!< TPIU FIFO0: ETM0 Mask */ + +/** \brief TPIU ITATBCTR2 Register Definitions */ +#define TPIU_ITATBCTR2_ATREADY2_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2 Position */ +#define TPIU_ITATBCTR2_ATREADY2_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2 Mask */ + +#define TPIU_ITATBCTR2_ATREADY1_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1 Position */ +#define TPIU_ITATBCTR2_ATREADY1_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1 Mask */ + +/** \brief TPIU Integration ITM Data Register Definitions (FIFO1) */ +#define TPIU_FIFO1_ITM_ATVALID_Pos 29U /*!< TPIU FIFO1: ITM_ATVALID Position */ +#define TPIU_FIFO1_ITM_ATVALID_Msk (1UL << TPIU_FIFO1_ITM_ATVALID_Pos) /*!< TPIU FIFO1: ITM_ATVALID Mask */ + +#define TPIU_FIFO1_ITM_bytecount_Pos 27U /*!< TPIU FIFO1: ITM_bytecount Position */ +#define TPIU_FIFO1_ITM_bytecount_Msk (0x3UL << TPIU_FIFO1_ITM_bytecount_Pos) /*!< TPIU FIFO1: ITM_bytecount Mask */ + +#define TPIU_FIFO1_ETM_ATVALID_Pos 26U /*!< TPIU FIFO1: ETM_ATVALID Position */ +#define TPIU_FIFO1_ETM_ATVALID_Msk (1UL << TPIU_FIFO1_ETM_ATVALID_Pos) /*!< TPIU FIFO1: ETM_ATVALID Mask */ + +#define TPIU_FIFO1_ETM_bytecount_Pos 24U /*!< TPIU FIFO1: ETM_bytecount Position */ +#define TPIU_FIFO1_ETM_bytecount_Msk (0x3UL << TPIU_FIFO1_ETM_bytecount_Pos) /*!< TPIU FIFO1: ETM_bytecount Mask */ + +#define TPIU_FIFO1_ITM2_Pos 16U /*!< TPIU FIFO1: ITM2 Position */ +#define TPIU_FIFO1_ITM2_Msk (0xFFUL << TPIU_FIFO1_ITM2_Pos) /*!< TPIU FIFO1: ITM2 Mask */ + +#define TPIU_FIFO1_ITM1_Pos 8U /*!< TPIU FIFO1: ITM1 Position */ +#define TPIU_FIFO1_ITM1_Msk (0xFFUL << TPIU_FIFO1_ITM1_Pos) /*!< TPIU FIFO1: ITM1 Mask */ + +#define TPIU_FIFO1_ITM0_Pos 0U /*!< TPIU FIFO1: ITM0 Position */ +#define TPIU_FIFO1_ITM0_Msk (0xFFUL /*<< TPIU_FIFO1_ITM0_Pos*/) /*!< TPIU FIFO1: ITM0 Mask */ + +/** \brief TPIU ITATBCTR0 Register Definitions */ +#define TPIU_ITATBCTR0_ATREADY2_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2 Position */ +#define TPIU_ITATBCTR0_ATREADY2_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2 Mask */ + +#define TPIU_ITATBCTR0_ATREADY1_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1 Position */ +#define TPIU_ITATBCTR0_ATREADY1_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1 Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_MinBufSz_Pos 6U /*!< TPIU DEVID: MinBufSz Position */ +#define TPIU_DEVID_MinBufSz_Msk (0x7UL << TPIU_DEVID_MinBufSz_Pos) /*!< TPIU DEVID: MinBufSz Mask */ + +#define TPIU_DEVID_AsynClkIn_Pos 5U /*!< TPIU DEVID: AsynClkIn Position */ +#define TPIU_DEVID_AsynClkIn_Msk (1UL << TPIU_DEVID_AsynClkIn_Pos) /*!< TPIU DEVID: AsynClkIn Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/** \brief MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/** \brief FPU Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/** \brief FPU Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/** \brief FPU Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/** \brief FPU Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: Rounding modes bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: Rounding modes bits Mask */ + +#define FPU_MVFR0_FPShortvec_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_FPShortvec_Msk (0xFUL << FPU_MVFR0_FPShortvec_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPExceptrap_Pos 12U /*!< MVFR0: Exception trapping bits Position */ +#define FPU_MVFR0_FPExceptrap_Msk (0xFUL << FPU_MVFR0_FPExceptrap_Pos) /*!< MVFR0: Exception trapping bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMD registers bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMD registers bits Mask */ + +/** \brief FPU Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: Fused MAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: Fused MAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/** \brief FPU Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ +#define DCB_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ +#define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos DCB_DHCSR_C_SNAPSTALL_Pos +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk DCB_DHCSR_C_SNAPSTALL_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_TRCENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_MON_REQ_Pos DCB_DEMCR_MON_REQ_Pos +#define CoreDebug_DEMCR_MON_REQ_Msk DCB_DEMCR_MON_REQ_Msk + +#define CoreDebug_DEMCR_MON_STEP_Pos DCB_DEMCR_MON_STEP_Pos +#define CoreDebug_DEMCR_MON_STEP_Msk DCB_DEMCR_MON_STEP_Msk + +#define CoreDebug_DEMCR_MON_PEND_Pos DCB_DEMCR_MON_PEND_Pos +#define CoreDebug_DEMCR_MON_PEND_Msk DCB_DEMCR_MON_PEND_Msk + +#define CoreDebug_DEMCR_MON_EN_Pos DCB_DEMCR_MON_EN_Pos +#define CoreDebug_DEMCR_MON_EN_Msk DCB_DEMCR_MON_EN_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_INTERR_Pos DCB_DEMCR_VC_INTERR_Pos +#define CoreDebug_DEMCR_VC_INTERR_Msk DCB_DEMCR_VC_INTERR_Msk + +#define CoreDebug_DEMCR_VC_BUSERR_Pos DCB_DEMCR_VC_BUSERR_Pos +#define CoreDebug_DEMCR_VC_BUSERR_Msk DCB_DEMCR_VC_BUSERR_Msk + +#define CoreDebug_DEMCR_VC_STATERR_Pos DCB_DEMCR_VC_STATERR_Pos +#define CoreDebug_DEMCR_VC_STATERR_Msk DCB_DEMCR_VC_STATERR_Msk + +#define CoreDebug_DEMCR_VC_CHKERR_Pos DCB_DEMCR_VC_CHKERR_Pos +#define CoreDebug_DEMCR_VC_CHKERR_Msk DCB_DEMCR_VC_CHKERR_Msk + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos DCB_DEMCR_VC_NOCPERR_Pos +#define CoreDebug_DEMCR_VC_NOCPERR_Msk DCB_DEMCR_VC_NOCPERR_Msk + +#define CoreDebug_DEMCR_VC_MMERR_Pos DCB_DEMCR_VC_MMERR_Pos +#define CoreDebug_DEMCR_VC_MMERR_Msk DCB_DEMCR_VC_MMERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M4 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "m-profile/armv7m_mpu.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm52.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm52.h new file mode 100644 index 0000000..b7daa77 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm52.h @@ -0,0 +1,4783 @@ +/* + * Copyright (c) 2018-2024 Arm Limited. Copyright (c) 2024 Arm Technology (China) Co., Ltd. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M52 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM52_H_GENERIC +#define __CORE_CM52_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M52 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM52 definitions */ + +#define __CORTEX_M (52U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM52_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM52_H_DEPENDANT +#define __CORE_CM52_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM52_REV + #define __CM52_REV 0x0002U + #warning "__CM52_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __UCACHE_PRESENT + #define __UCACHE_PRESENT 0U + #warning "__UCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M52 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core EWIC Register + - Core EWIC Interrupt Status Access Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core PMU Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:1; /*!< bit: 20 Reserved */ + uint32_t B:1; /*!< bit: 21 BTI active (read 0) */ + uint32_t _reserved2:2; /*!< bit: 22..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_B_Pos 21U /*!< xPSR: B Position */ +#define xPSR_B_Msk (1UL << xPSR_B_Pos) /*!< xPSR: B Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t BTI_EN:1; /*!< bit: 4 Privileged branch target identification enable */ + uint32_t UBTI_EN:1; /*!< bit: 5 Unprivileged branch target identification enable */ + uint32_t PAC_EN:1; /*!< bit: 6 Privileged pointer authentication enable */ + uint32_t UPAC_EN:1; /*!< bit: 7 Unprivileged pointer authentication enable */ + uint32_t _reserved1:24; /*!< bit: 8..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_UPAC_EN_Pos 7U /*!< CONTROL: UPAC_EN Position */ +#define CONTROL_UPAC_EN_Msk (1UL << CONTROL_UPAC_EN_Pos) /*!< CONTROL: UPAC_EN Mask */ + +#define CONTROL_PAC_EN_Pos 6U /*!< CONTROL: PAC_EN Position */ +#define CONTROL_PAC_EN_Msk (1UL << CONTROL_PAC_EN_Pos) /*!< CONTROL: PAC_EN Mask */ + +#define CONTROL_UBTI_EN_Pos 5U /*!< CONTROL: UBTI_EN Position */ +#define CONTROL_UBTI_EN_Msk (1UL << CONTROL_UBTI_EN_Pos) /*!< CONTROL: UBTI_EN Mask */ + +#define CONTROL_BTI_EN_Pos 4U /*!< CONTROL: BTI_EN Position */ +#define CONTROL_BTI_EN_Msk (1UL << CONTROL_BTI_EN_Pos) /*!< CONTROL: BTI_EN Mask */ + +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED0[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED1[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED2[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED3[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED4[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/** \brief SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/** \brief SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/** \brief SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +#define SCB_CLIDR_CTYPE1_Pos 0U +#define SCB_CLIDR_CTYPE1_Msk (7UL << SCB_CLIDR_CTYPE1_Pos) + +/** \brief SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/** \brief SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/** \brief SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/** \brief SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/** \brief SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/** \brief SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/** \brief SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/** \brief SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + + +/** \brief SCB U-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_UC_WAY_Pos 31U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_UC_WAY_Msk (1UL << SCB_DCISW_UC_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_UC_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_UC_SET_Msk (0x3FFUL << SCB_DCISW_UC_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/** \brief SCB U-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_UC_WAY_Pos 31U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_UC_WAY_Msk (1UL << SCB_DCCSW_UC_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_UC_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_UC_SET_Msk (0x3FFUL << SCB_DCCSW_UC_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/** \brief SCB U-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_UC_WAY_Pos 31U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_UC_WAY_Msk (1UL << SCB_DCCISW_UC_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_UC_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_UC_SET_Msk (0x3FFUL << SCB_DCCISW_UC_SET_Pos) /*!< SCB DCCISW: Set Mask */ + + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register + @{ + */ + +/** + \brief Structure type to access the Implementation Control Block (ICB). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} ICB_Type; + +/** \brief ICB Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define ICB_ACTLR_DISDI_Msk (3UL << ICB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define ICB_ACTLR_DISOLAP_Pos 7U /*!< ACTLR: DISOLAP Position */ +#define ICB_ACTLR_DISOLAP_Msk (1UL << ICB_ACTLR_DISOLAP_Pos) /*!< ACTLR: DISOLAP Mask */ + +#define ICB_ACTLR_DISOLAPS_Pos 6U /*!< ACTLR: DISOLAPS Position */ +#define ICB_ACTLR_DISOLAPS_Msk (1UL << ICB_ACTLR_DISOLAPS_Pos) /*!< ACTLR: DISOLAPS Mask */ + +#define ICB_ACTLR_DISLOBR_Pos 5U /*!< ACTLR: DISLOBR Position */ +#define ICB_ACTLR_DISLOBR_Msk (1UL << ICB_ACTLR_DISLOBR_Pos) /*!< ACTLR: DISLOBR Mask */ + +#define ICB_ACTLR_DISLO_Pos 4U /*!< ACTLR: DISLO Position */ +#define ICB_ACTLR_DISLO_Msk (1UL << ICB_ACTLR_DISLO_Pos) /*!< ACTLR: DISLO Mask */ + +#define ICB_ACTLR_DISLOLEP_Pos 3U /*!< ACTLR: DISLOLEP Position */ +#define ICB_ACTLR_DISLOLEP_Msk (1UL << ICB_ACTLR_DISLOLEP_Pos) /*!< ACTLR: DISLOLEP Mask */ + +#define ICB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define ICB_ACTLR_DISFOLD_Msk (1UL << ICB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +/** \brief ICB Interrupt Controller Type Register Definitions */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_ICB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[27U]; + __IM uint32_t ITREAD; /*!< Offset: 0xEF0 (R/ ) Integration Read Register */ + uint32_t RESERVED4[1U]; + __OM uint32_t ITWRITE; /*!< Offset: 0xEF8 ( /W) Integration Write Register */ + uint32_t RESERVED5[1U]; + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control Register */ + uint32_t RESERVED6[46U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ + uint32_t RESERVED7[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} ITM_Type; + +/** \brief ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Integration Read Register Definitions */ +#define ITM_ITREAD_AFVALID_Pos 1U /*!< ITM ITREAD: AFVALID Position */ +#define ITM_ITREAD_AFVALID_Msk (1UL << ITM_ITREAD_AFVALID_Pos) /*!< ITM ITREAD: AFVALID Mask */ + +#define ITM_ITREAD_ATREADY_Pos 0U /*!< ITM ITREAD: ATREADY Position */ +#define ITM_ITREAD_ATREADY_Msk (1UL /*<< ITM_ITREAD_ATREADY_Pos*/) /*!< ITM ITREAD: ATREADY Mask */ + +/** \brief ITM Integration Write Register Definitions */ +#define ITM_ITWRITE_AFVALID_Pos 1U /*!< ITM ITWRITE: AFVALID Position */ +#define ITM_ITWRITE_AFVALID_Msk (1UL << ITM_ITWRITE_AFVALID_Pos) /*!< ITM ITWRITE: AFVALID Mask */ + +#define ITM_ITWRITE_ATREADY_Pos 0U /*!< ITM ITWRITE: ATREADY Position */ +#define ITM_ITWRITE_ATREADY_Msk (1UL /*<< ITM_ITWRITE_ATREADY_Pos*/) /*!< ITM ITWRITE: ATREADY Mask */ + +/** \brief ITM Integration Mode Control Register Definitions */ +#define ITM_ITCTRL_IME_Pos 0U /*!< ITM ITCTRL: IME Position */ +#define ITM_ITCTRL_IME_Msk (1UL /*<< ITM_ITCTRL_IME_Pos*/) /*!< ITM ITCTRL: IME Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + __IOM uint32_t VMASK1; /*!< Offset: 0x03C (R/W) Comparator Value Mask 1 */ + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + __IOM uint32_t VMASK3; /*!< Offset: 0x05C (R/W) Comparator Value Mask 3 */ + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED14[968U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Architecture Register */ + uint32_t RESERVED15[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + uint32_t RESERVED1[3U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/** \brief MemSysCtl Memory System Control Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_TECCCHKDIS_Pos 4U /*!< MEMSYSCTL MSCR: TECCCHKDIS Position */ +#define MEMSYSCTL_MSCR_TECCCHKDIS_Msk (1UL << MEMSYSCTL_MSCR_TECCCHKDIS_Pos) /*!< MEMSYSCTL MSCR: TECCCHKDIS Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/** \brief MemSysCtl ITCM Control Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/** \brief MemSysCtl DTCM Control Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/** \brief MemSysCtl P-AHB Control Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/** \brief MemSysCtl ITGU Control Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/** \brief MemSysCtl ITGU Configuration Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/** \brief MemSysCtl DTGU Control Registers Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/** \brief MemSysCtl DTGU Configuration Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup DCAR_Type Direct Cache Access Registers + \brief Type definitions for the Direct Cache Access Registers (DCAR) + @{ + */ + +/** + \brief Structure type to access the Direct Cache Access Registers (DCAR). + */ +typedef struct +{ + __IM uint32_t DCADCRR; /*!< Offset: 0x000 (R/W) Direct Cache Access Data Cache Read Register */ + __IM uint32_t DCAICRR; /*!< Offset: 0x004 (R/W) Direct Cache Access Instruction Cache Read Register */ + uint32_t RESERVED1[2]; + __IOM uint32_t DCADCLR; /*!< Offset: 0x010 (R/W) Direct Cache Access Data Cache Location Registers */ + __IOM uint32_t DCAICLR; /*!< Offset: 0x014 (R/W) Direct Cache Access Instruction Cache Location Registers */ +} DCAR_Type; + +/*@}*/ /* end of group DCAR_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/** \brief PwrModCtl Core Power Domain Low Power State Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/** \brief PwrModCtl Debug Power Domain Low Power State Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __IOM uint32_t EWIC_CR; /*!< Offset: 0x000 (R/W) EWIC Control Register */ + __IOM uint32_t EWIC_ASCR; /*!< Offset: 0x004 (R/W) EWIC Automatic Sequence Control Register */ + __OM uint32_t EWIC_CLRMASK; /*!< Offset: 0x008 ( /W) EWIC Clear Mask Register */ + __IM uint32_t EWIC_NUMID; /*!< Offset: 0x00C (R/ ) EWIC Event Number ID Register */ + uint32_t RESERVED0[124U]; + __IOM uint32_t EWIC_MASKA; /*!< Offset: 0x200 (R/W) EWIC MaskA Register */ + __IOM uint32_t EWIC_MASKn[15]; /*!< Offset: 0x204 (R/W) EWIC Maskn Registers */ + uint32_t RESERVED1[112U]; + __IM uint32_t EWIC_PENDA; /*!< Offset: 0x400 (R/ ) EWIC PendA Event Register */ + __IOM uint32_t EWIC_PENDn[15]; /*!< Offset: 0x404 (R/W) EWIC Pendn Event Registers */ + uint32_t RESERVED2[112U]; + __IM uint32_t EWIC_PSR; /*!< Offset: 0x600 (R/ ) EWIC Pend Summary Register */ +} EWIC_Type; + +/** \brief EWIC Control Register Definitions */ +#define EWIC_EWIC_CR_EN_Pos 0U /*!< EWIC EWIC_CR: EN Position */ +#define EWIC_EWIC_CR_EN_Msk (1UL /*<< EWIC_EWIC_CR_EN_Pos*/) /*!< EWIC EWIC_CR: EN Mask */ + +/** \brief EWIC Automatic Sequence Control Register Definitions */ +#define EWIC_EWIC_ASCR_ASPU_Pos 1U /*!< EWIC EWIC_ASCR: ASPU Position */ +#define EWIC_EWIC_ASCR_ASPU_Msk (1UL << EWIC_EWIC_ASCR_ASPU_Pos) /*!< EWIC EWIC_ASCR: ASPU Mask */ + +#define EWIC_EWIC_ASCR_ASPD_Pos 0U /*!< EWIC EWIC_ASCR: ASPD Position */ +#define EWIC_EWIC_ASCR_ASPD_Msk (1UL /*<< EWIC_EWIC_ASCR_ASPD_Pos*/) /*!< EWIC EWIC_ASCR: ASPD Mask */ + +/** \brief EWIC Event Number ID Register Definitions */ +#define EWIC_EWIC_NUMID_NUMEVENT_Pos 0U /*!< EWIC_NUMID: NUMEVENT Position */ +#define EWIC_EWIC_NUMID_NUMEVENT_Msk (0xFFFFUL /*<< EWIC_EWIC_NUMID_NUMEVENT_Pos*/) /*!< EWIC_NUMID: NUMEVENT Mask */ + +/** \brief EWIC Mask A Register Definitions */ +#define EWIC_EWIC_MASKA_EDBGREQ_Pos 2U /*!< EWIC EWIC_MASKA: EDBGREQ Position */ +#define EWIC_EWIC_MASKA_EDBGREQ_Msk (1UL << EWIC_EWIC_MASKA_EDBGREQ_Pos) /*!< EWIC EWIC_MASKA: EDBGREQ Mask */ + +#define EWIC_EWIC_MASKA_NMI_Pos 1U /*!< EWIC EWIC_MASKA: NMI Position */ +#define EWIC_EWIC_MASKA_NMI_Msk (1UL << EWIC_EWIC_MASKA_NMI_Pos) /*!< EWIC EWIC_MASKA: NMI Mask */ + +#define EWIC_EWIC_MASKA_EVENT_Pos 0U /*!< EWIC EWIC_MASKA: EVENT Position */ +#define EWIC_EWIC_MASKA_EVENT_Msk (1UL /*<< EWIC_EWIC_MASKA_EVENT_Pos*/) /*!< EWIC EWIC_MASKA: EVENT Mask */ + +/** \brief EWIC Mask n Register Definitions */ +#define EWIC_EWIC_MASKn_IRQ_Pos 0U /*!< EWIC EWIC_MASKn: IRQ Position */ +#define EWIC_EWIC_MASKn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EWIC_MASKn_IRQ_Pos*/) /*!< EWIC EWIC_MASKn: IRQ Mask */ + +/** \brief EWIC Pend A Register Definitions */ +#define EWIC_EWIC_PENDA_EDBGREQ_Pos 2U /*!< EWIC EWIC_PENDA: EDBGREQ Position */ +#define EWIC_EWIC_PENDA_EDBGREQ_Msk (1UL << EWIC_EWIC_PENDA_EDBGREQ_Pos) /*!< EWIC EWIC_PENDA: EDBGREQ Mask */ + +#define EWIC_EWIC_PENDA_NMI_Pos 1U /*!< EWIC EWIC_PENDA: NMI Position */ +#define EWIC_EWIC_PENDA_NMI_Msk (1UL << EWIC_EWIC_PENDA_NMI_Pos) /*!< EWIC EWIC_PENDA: NMI Mask */ + +#define EWIC_EWIC_PENDA_EVENT_Pos 0U /*!< EWIC EWIC_PENDA: EVENT Position */ +#define EWIC_EWIC_PENDA_EVENT_Msk (1UL /*<< EWIC_EWIC_PENDA_EVENT_Pos*/) /*!< EWIC EWIC_PENDA: EVENT Mask */ + +/** \brief EWIC Pend n Register Definitions */ +#define EWIC_EWIC_PENDn_IRQ_Pos 0U /*!< EWIC EWIC_PENDn: IRQ Position */ +#define EWIC_EWIC_PENDn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EWIC_PENDn_IRQ_Pos*/) /*!< EWIC EWIC_PENDn: IRQ Mask */ + +/** \brief EWIC Pend Summary Register Definitions */ +#define EWIC_EWIC_PSR_NZ_Pos 1U /*!< EWIC EWIC_PSR: NZ Position */ +#define EWIC_EWIC_PSR_NZ_Msk (0x7FFFUL << EWIC_EWIC_PSR_NZ_Pos) /*!< EWIC EWIC_PSR: NZ Mask */ + +#define EWIC_EWIC_PSR_NZA_Pos 0U /*!< EWIC EWIC_PSR: NZA Position */ +#define EWIC_EWIC_PSR_NZA_Msk (1UL /*<< EWIC_EWIC_PSR_NZA_Pos*/) /*!< EWIC EWIC_PSR: NZA Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_ISA_Type External Wakeup Interrupt Controller (EWIC) interrupt status access registers + \brief Type definitions for the External Wakeup Interrupt Controller interrupt status access registers (EWIC_ISA) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller interrupt status access registers (EWIC_ISA). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/ ) Event Mask A Register */ + __IM uint32_t EVENTMASKn[15]; /*!< Offset: 0x084 (R/ ) Event Mask Register */ +} EWIC_ISA_Type; + +/** \brief EWIC_ISA Event Set Pending Register Definitions */ +#define EWIC_ISA_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC_ISA EVENTSPR: EDBGREQ Position */ +#define EWIC_ISA_EVENTSPR_EDBGREQ_Msk (1UL << EWIC_ISA_EVENTSPR_EDBGREQ_Pos) /*!< EWIC_ISA EVENTSPR: EDBGREQ Mask */ + +#define EWIC_ISA_EVENTSPR_NMI_Pos 1U /*!< EWIC_ISA EVENTSPR: NMI Position */ +#define EWIC_ISA_EVENTSPR_NMI_Msk (1UL << EWIC_ISA_EVENTSPR_NMI_Pos) /*!< EWIC_ISA EVENTSPR: NMI Mask */ + +#define EWIC_ISA_EVENTSPR_EVENT_Pos 0U /*!< EWIC_ISA EVENTSPR: EVENT Position */ +#define EWIC_ISA_EVENTSPR_EVENT_Msk (1UL /*<< EWIC_ISA_EVENTSPR_EVENT_Pos*/) /*!< EWIC_ISA EVENTSPR: EVENT Mask */ + +/** \brief EWIC_ISA Event Mask A Register Definitions */ +#define EWIC_ISA_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC_ISA EVENTMASKA: EDBGREQ Position */ +#define EWIC_ISA_EVENTMASKA_EDBGREQ_Msk (1UL << EWIC_ISA_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC_ISA EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_ISA_EVENTMASKA_NMI_Pos 1U /*!< EWIC_ISA EVENTMASKA: NMI Position */ +#define EWIC_ISA_EVENTMASKA_NMI_Msk (1UL << EWIC_ISA_EVENTMASKA_NMI_Pos) /*!< EWIC_ISA EVENTMASKA: NMI Mask */ + +#define EWIC_ISA_EVENTMASKA_EVENT_Pos 0U /*!< EWIC_ISA EVENTMASKA: EVENT Position */ +#define EWIC_ISA_EVENTMASKA_EVENT_Msk (1UL /*<< EWIC_ISA_EVENTMASKA_EVENT_Pos*/) /*!< EWIC_ISA EVENTMASKA: EVENT Mask */ + +/** \brief EWIC_ISA Event Mask n Register Definitions */ +#define EWIC_ISA_EVENTMASKn_IRQ_Pos 0U /*!< EWIC_ISA EVENTMASKn: IRQ Position */ +#define EWIC_ISA_EVENTMASKn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_ISA_EVENTMASKn_IRQ_Pos*/) /*!< EWIC_ISA EVENTMASKn: IRQ Mask */ + +/*@}*/ /* end of group EWIC_ISA_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + __IM uint32_t TEBRDATA0; /*!< Offset: 0x024 (RO) Storage for corrected data that is associated with an error.*/ + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ + __IM uint32_t TEBRDATA1; /*!< Offset: 0x02c (RO) Storage for corrected data that is associated with an error.*/ +} ErrBnk_Type; + +/** \brief ErrBnk Instruction Cache Error Bank Register 0 Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/** \brief ErrBnk Instruction Cache Error Bank Register 1 Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/** \brief ErrBnk Data Cache Error Bank Register 0 Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/** \brief ErrBnk Data Cache Error Bank Register 1 Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/** \brief ErrBnk TCM Error Bank Register 0 Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 27U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 26U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x3UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/** \brief ErrBnk TCM Error Bank Register 1 Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 27U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 26U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x3UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/** \brief PrcCfgInf Processor Configuration Information Selection Register Definitions */ + +/** \brief PrcCfgInf Processor Configuration Information Read Data Register Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup STL_Type Software Test Library Observation Registers + \brief Type definitions for the Software Test Library Observation Registerss (STL) + @{ + */ + +/** + \brief Structure type to access the Software Test Library Observation Registerss (STL). + */ +typedef struct +{ + __IM uint32_t STLNVICPENDOR; /*!< Offset: 0x000 (R/ ) NVIC Pending Priority Tree Register */ + __IM uint32_t STLNVICACTVOR; /*!< Offset: 0x004 (R/ ) NVIC Active Priority Tree Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t STLIDMPUSR; /*!< Offset: 0x010 ( /W) MPU Sample Register */ + __IM uint32_t STLIMPUOR; /*!< Offset: 0x014 (R/ ) MPU Region Hit Register */ + __IM uint32_t STLDMPUOR; /*!< Offset: 0x018 (R/ ) MPU Memory Attributes Register */ + +} STL_Type; + +/** \brief STL NVIC Pending Priority Tree Register Definitions */ +#define STL_STLNVICPENDOR_VALID_Pos 18U /*!< STL STLNVICPENDOR: VALID Position */ +#define STL_STLNVICPENDOR_VALID_Msk (1UL << STL_STLNVICPENDOR_VALID_Pos) /*!< STL STLNVICPENDOR: VALID Mask */ + +#define STL_STLNVICPENDOR_TARGET_Pos 17U /*!< STL STLNVICPENDOR: TARGET Position */ +#define STL_STLNVICPENDOR_TARGET_Msk (1UL << STL_STLNVICPENDOR_TARGET_Pos) /*!< STL STLNVICPENDOR: TARGET Mask */ + +#define STL_STLNVICPENDOR_PRIORITY_Pos 9U /*!< STL STLNVICPENDOR: PRIORITY Position */ +#define STL_STLNVICPENDOR_PRIORITY_Msk (0xFFUL << STL_STLNVICPENDOR_PRIORITY_Pos) /*!< STL STLNVICPENDOR: PRIORITY Mask */ + +#define STL_STLNVICPENDOR_INTNUM_Pos 0U /*!< STL STLNVICPENDOR: INTNUM Position */ +#define STL_STLNVICPENDOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICPENDOR_INTNUM_Pos*/) /*!< STL STLNVICPENDOR: INTNUM Mask */ + +/** \brief STL NVIC Active Priority Tree Register Definitions */ +#define STL_STLNVICACTVOR_VALID_Pos 18U /*!< STL STLNVICACTVOR: VALID Position */ +#define STL_STLNVICACTVOR_VALID_Msk (1UL << STL_STLNVICACTVOR_VALID_Pos) /*!< STL STLNVICACTVOR: VALID Mask */ + +#define STL_STLNVICACTVOR_TARGET_Pos 17U /*!< STL STLNVICACTVOR: TARGET Position */ +#define STL_STLNVICACTVOR_TARGET_Msk (1UL << STL_STLNVICACTVOR_TARGET_Pos) /*!< STL STLNVICACTVOR: TARGET Mask */ + +#define STL_STLNVICACTVOR_PRIORITY_Pos 9U /*!< STL STLNVICACTVOR: PRIORITY Position */ +#define STL_STLNVICACTVOR_PRIORITY_Msk (0xFFUL << STL_STLNVICACTVOR_PRIORITY_Pos) /*!< STL STLNVICACTVOR: PRIORITY Mask */ + +#define STL_STLNVICACTVOR_INTNUM_Pos 0U /*!< STL STLNVICACTVOR: INTNUM Position */ +#define STL_STLNVICACTVOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICACTVOR_INTNUM_Pos*/) /*!< STL STLNVICACTVOR: INTNUM Mask */ + +/** \brief STL MPU Sample Register Definitions */ +#define STL_STLIDMPUSR_ADDR_Pos 5U /*!< STL STLIDMPUSR: ADDR Position */ +#define STL_STLIDMPUSR_ADDR_Msk (0x7FFFFFFUL << STL_STLIDMPUSR_ADDR_Pos) /*!< STL STLIDMPUSR: ADDR Mask */ + +#define STL_STLIDMPUSR_INSTR_Pos 2U /*!< STL STLIDMPUSR: INSTR Position */ +#define STL_STLIDMPUSR_INSTR_Msk (1UL << STL_STLIDMPUSR_INSTR_Pos) /*!< STL STLIDMPUSR: INSTR Mask */ + +#define STL_STLIDMPUSR_DATA_Pos 1U /*!< STL STLIDMPUSR: DATA Position */ +#define STL_STLIDMPUSR_DATA_Msk (1UL << STL_STLIDMPUSR_DATA_Pos) /*!< STL STLIDMPUSR: DATA Mask */ + +/** \brief STL MPU Region Hit Register Definitions */ +#define STL_STLIMPUOR_HITREGION_Pos 9U /*!< STL STLIMPUOR: HITREGION Position */ +#define STL_STLIMPUOR_HITREGION_Msk (0xFFUL << STL_STLIMPUOR_HITREGION_Pos) /*!< STL STLIMPUOR: HITREGION Mask */ + +#define STL_STLIMPUOR_ATTR_Pos 0U /*!< STL STLIMPUOR: ATTR Position */ +#define STL_STLIMPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLIMPUOR_ATTR_Pos*/) /*!< STL STLIMPUOR: ATTR Mask */ + +/** \brief STL MPU Memory Attributes Register Definitions */ +#define STL_STLDMPUOR_HITREGION_Pos 9U /*!< STL STLDMPUOR: HITREGION Position */ +#define STL_STLDMPUOR_HITREGION_Msk (0xFFUL << STL_STLDMPUOR_HITREGION_Pos) /*!< STL STLDMPUOR: HITREGION Mask */ + +#define STL_STLDMPUOR_ATTR_Pos 0U /*!< STL STLDMPUOR: ATTR Position */ +#define STL_STLDMPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLDMPUOR_ATTR_Pos*/) /*!< STL STLDMPUOR: ATTR Mask */ + +/*@}*/ /* end of group STL_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_FOnMan_Pos 6U /*!< TPIU FFCR: FOnMan Position */ +#define TPIU_FFCR_FOnMan_Msk (1UL << TPIU_FFCR_FOnMan_Pos) /*!< TPIU FFCR: FOnMan Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU Periodic Synchronization Control Register Definitions */ +#define TPIU_PSCR_PSCount_Pos 0U /*!< TPIU PSCR: PSCount Position */ +#define TPIU_PSCR_PSCount_Msk (0x1FUL /*<< TPIU_PSCR_PSCount_Pos*/) /*!< TPIU PSCR: TPSCount Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 0 Register Definitions */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD0: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD0: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPIU ITFTTD0: ATB Interface 1 data2 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPIU ITFTTD0: ATB Interface 1 data1 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPIU ITFTTD0: ATB Interface 1 data0 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPIU_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPIU ITFTTD0: ATB Interface 1 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 2 Register Definitions */ +#define TPIU_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID2S Position */ +#define TPIU_ITATBCTR2_AFVALID2S_Msk (1UL << TPIU_ITATBCTR2_AFVALID2S_Pos) /*!< TPIU ITATBCTR2: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID1S Position */ +#define TPIU_ITATBCTR2_AFVALID1S_Msk (1UL << TPIU_ITATBCTR2_AFVALID1S_Pos) /*!< TPIU ITATBCTR2: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2S Position */ +#define TPIU_ITATBCTR2_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2S Mask */ + +#define TPIU_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1S Position */ +#define TPIU_ITATBCTR2_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1S Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 1 Register Definitions */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD1: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD1: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPIU ITFTTD1: ATB Interface 2 data2 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPIU ITFTTD1: ATB Interface 2 data1 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPIU ITFTTD1: ATB Interface 2 data0 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPIU_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPIU ITFTTD1: ATB Interface 2 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 0 Definitions */ +#define TPIU_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID2S Position */ +#define TPIU_ITATBCTR0_AFVALID2S_Msk (1UL << TPIU_ITATBCTR0_AFVALID2S_Pos) /*!< TPIU ITATBCTR0: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID1S Position */ +#define TPIU_ITATBCTR0_AFVALID1S_Msk (1UL << TPIU_ITATBCTR0_AFVALID1S_Pos) /*!< TPIU ITATBCTR0: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2S Position */ +#define TPIU_ITATBCTR0_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2S Mask */ + +#define TPIU_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1S Position */ +#define TPIU_ITATBCTR0_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1S Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU Claim Tag Set Register Definitions */ +#define TPIU_CLAIMSET_SET_Pos 0U /*!< TPIU CLAIMSET: SET Position */ +#define TPIU_CLAIMSET_SET_Msk (0xFUL /*<< TPIU_CLAIMSET_SET_Pos*/) /*!< TPIU CLAIMSET: SET Mask */ + +/** \brief TPIU Claim Tag Clear Register Definitions */ +#define TPIU_CLAIMCLR_CLR_Pos 0U /*!< TPIU CLAIMCLR: CLR Position */ +#define TPIU_CLAIMCLR_CLR_Msk (0xFUL /*<< TPIU_CLAIMCLR_CLR_Pos*/) /*!< TPIU CLAIMCLR: CLR Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_FIFOSZ_Pos 6U /*!< TPIU DEVID: FIFOSZ Position */ +#define TPIU_DEVID_FIFOSZ_Msk (0x7UL << TPIU_DEVID_FIFOSZ_Pos) /*!< TPIU DEVID: FIFOSZ Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) Device Type Register */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + +/*@} end of group CMSIS_PMU */ +#endif + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/** \brief MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Mask */ + +/** \brief MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/** \brief MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/** \brief SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/** \brief SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/** \brief SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/** \brief SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/** \brief SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/** \brief SAU Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/** \brief FPU Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/** \brief FPU Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/** \brief FPU Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/** \brief FPU Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: Rounding modes bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: Rounding modes bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMD registers bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMD registers bits Mask */ + +/** \brief FPU Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: Fused MAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: Fused MAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/** \brief FPU Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/** \brief DCB Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/** \brief DCB Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/** \brief DCB Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + uint32_t RESERVED1[3U]; + __IM uint32_t DDEVTYPE; /*!< Offset: 0x01C (R/ ) SCS Device Type Register */ +} DIB_Type; + +/** \brief DIB Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/** \brief DIB SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/** \brief DIB SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define DCAR_BASE (0xE001E200UL) /*!< Direct Cache Access Registers */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_ISA_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller interrupt status access Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define STL_BASE (0xE001E800UL) /*!< Software Test Library Base Address */ + #define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ + #define EWIC_BASE (0xE0047000UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define DCAR ((DCAR_Type *) DCAR_BASE ) /*!< Direct Read Access to the embedded RAM associated with the L1 instruction and data cache */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC_ISA ((EWIC_ISA_Type *) EWIC_ISA_BASE ) /*!< EWIC interrupt status access struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define STL ((STL_Type *) STL_BASE ) /*!< Software Test Library configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + + #include "m-profile/armv8m_mpu.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "m-profile/armv8m_pmu.h" + +/** + \brief Cortex-M52 PMU events + \note Architectural PMU events can be found in armv8m_pmu.h +*/ + +#define ARMCM52_PMU_ECC_ERR 0xC000 /*!< One or more Error Correcting Code (ECC) errors detected */ +#define ARMCM52_PMU_ECC_ERR_MBIT 0xC001 /*!< One or more multi-bit ECC errors detected */ +#define ARMCM52_PMU_ECC_ERR_DCACHE 0xC010 /*!< One or more ECC errors in the data cache */ +#define ARMCM52_PMU_ECC_ERR_ICACHE 0xC011 /*!< One or more ECC errors in the instruction cache */ +#define ARMCM52_PMU_ECC_ERR_MBIT_DCACHE 0xC012 /*!< One or more multi-bit ECC errors in the data cache */ +#define ARMCM52_PMU_ECC_ERR_MBIT_ICACHE 0xC013 /*!< One or more multi-bit ECC errors in the instruction cache */ +#define ARMCM52_PMU_ECC_ERR_DTCM 0xC020 /*!< Any ECC error in the DTCM */ +#define ARMCM52_PMU_ECC_ERR_ITCM 0xC021 /*!< Any ECC error in the ITCM */ +#define ARMCM52_PMU_ECC_ERR_MBIT_DTCM 0xC022 /*!< One or more multi-bit ECC errors in the DTCM */ +#define ARMCM52_PMU_ECC_ERR_MBIT_ITCM 0xC023 /*!< One or more multi-bit ECC errors in the ITCM */ +#define ARMCM52_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM52_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM52_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM52_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access to the P-AHB write interface */ +#define ARMCM52_PMU_AXI_SAHB_WRITE_ACCESS 0xC302 /*!< M-AXI configuration: Any beat access to the M-AXI write interface.M-AHB configuration: Any write beat access to the SYS-AHB interface */ +#define ARMCM52_PMU_AXI_SAHB_READ_ACCESS 0xC303 /*!< M-AXI configuration: Any beat access to the M-AXI read interface.M-AHB configuration: Any read beat access to the SYS-AHB interface */ +#define ARMCM52_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM52_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ +#define ARMCM52_PMU_CDE_INST_RETIRED 0xC402 /*!< CDE instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_CX1_INST_RETIRED 0xC404 /*!< CDE CX1 instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_CX2_INST_RETIRED 0xC406 /*!< CDE CX2 instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_CX3_INST_RETIRED 0xC408 /*!< CDE CX3 instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_VCX1_INST_RETIRED 0xC40A /*!< CDE VCX1 instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_VCX2_INST_RETIRED 0xC40C /*!< CDE VCX2 instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_VCX3_INST_RETIRED 0xC40E /*!< CDE VCX3 instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_VCX1_VEC_INST_RETIRED 0xC410 /*!< CDE VCX1 Vector instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_VCX2_VEC_INST_RETIRED 0xC412 /*!< CDE VCX2 Vector instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_VCX3_VEC_INST_RETIRED 0xC414 /*!< CDE VCX3 Vector instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_PRED 0xC416 /*!< Cycles where one or more predicated beats of a CDE instruction architecturally executed. */ +#define ARMCM52_PMU_CDE_STALL 0xC417 /*!< Stall cycles caused by a CDE instruction. */ +#define ARMCM52_PMU_CDE_STALL_RESOURCE 0xC418 /*!< Stall cycles caused by a CDE instruction because of resource conflicts */ +#define ARMCM52_PMU_CDE_STALL_DEPENDENCY 0xC419 /*!< Stall cycles caused by a CDE register dependency. */ +#define ARMCM52_PMU_CDE_STALL_CUSTOM 0xC41A /*!< Stall cycles caused by a CDE instruction are generated by the custom hardware. */ +#define ARMCM52_PMU_CDE_STALL_OTHER 0xC41B /*!< Stall cycles caused by a CDE instruction are not covered by the other counters. */ +#define ARMCM52_PMU_CAHB_WRITE_ACCESS 0xC420 /*!< M-AHB configuration: A Write beat transfer on Code-AHB */ +#define ARMCM52_PMU_CAHB_READ_ACCESS 0xC421 /*!< M-AHB configuration: A Read beat transfer on Code-AHB. */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) + #include "m-profile/armv7m_cachel1.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + +/* ################### PAC Key functions ########################### */ + +#if (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) +#include "m-profile/armv81m_pac.h" +#endif + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM52_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ + + + + + diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm55.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm55.h new file mode 100644 index 0000000..62ebfc8 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm55.h @@ -0,0 +1,4908 @@ +/* + * Copyright (c) 2018-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M55 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM55_H_GENERIC +#define __CORE_CM55_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M55 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM55 definitions */ + +#define __CORTEX_M (55U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM55_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM55_H_DEPENDANT +#define __CORE_CM55_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM55_REV + #define __CM55_REV 0x0000U + #warning "__CM55_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M55 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core EWIC Register + - Core EWIC Interrupt Status Access Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core PMU Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/** \brief SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/** \brief SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/** \brief SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/** \brief SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/** \brief SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/** \brief SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/** \brief SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/** \brief SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/** \brief SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/** \brief SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/** \brief SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register + @{ + */ + +/** + \brief Structure type to access the Implementation Control Block (ICB). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} ICB_Type; + +/** \brief ICB Coprocessor Power Control Register Definitions */ +#define ICB_CPPWR_SUS11_Pos 23U /*!< CPPWR: SUS11 Position */ +#define ICB_CPPWR_SUS11_Msk (1UL << ICB_CPPWR_SUS11_Pos) /*!< CPPWR: SUS11 Mask */ + +#define ICB_CPPWR_SU11_Pos 22U /*!< CPPWR: SU11 Position */ +#define ICB_CPPWR_SU11_Msk (1UL << ICB_CPPWR_SU11_Pos) /*!< CPPWR: SU11 Mask */ + +#define ICB_CPPWR_SUS10_Pos 21U /*!< CPPWR: SUS10 Position */ +#define ICB_CPPWR_SUS10_Msk (1UL << ICB_CPPWR_SUS10_Pos) /*!< CPPWR: SUS10 Mask */ + +#define ICB_CPPWR_SU10_Pos 20U /*!< CPPWR: SU10 Position */ +#define ICB_CPPWR_SU10_Msk (1UL << ICB_CPPWR_SU10_Pos) /*!< CPPWR: SU10 Mask */ + +/** \brief ICB Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define ICB_ACTLR_DISDI_Msk (3UL << ICB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define ICB_ACTLR_DISOLAP_Pos 7U /*!< ACTLR: DISOLAP Position */ +#define ICB_ACTLR_DISOLAP_Msk (1UL << ICB_ACTLR_DISOLAP_Pos) /*!< ACTLR: DISOLAP Mask */ + +#define ICB_ACTLR_DISOLAPS_Pos 6U /*!< ACTLR: DISOLAPS Position */ +#define ICB_ACTLR_DISOLAPS_Msk (1UL << ICB_ACTLR_DISOLAPS_Pos) /*!< ACTLR: DISOLAPS Mask */ + +#define ICB_ACTLR_DISLOBR_Pos 5U /*!< ACTLR: DISLOBR Position */ +#define ICB_ACTLR_DISLOBR_Msk (1UL << ICB_ACTLR_DISLOBR_Pos) /*!< ACTLR: DISLOBR Mask */ + +#define ICB_ACTLR_DISLO_Pos 4U /*!< ACTLR: DISLO Position */ +#define ICB_ACTLR_DISLO_Msk (1UL << ICB_ACTLR_DISLO_Pos) /*!< ACTLR: DISLO Mask */ + +#define ICB_ACTLR_DISLOLEP_Pos 3U /*!< ACTLR: DISLOLEP Position */ +#define ICB_ACTLR_DISLOLEP_Msk (1UL << ICB_ACTLR_DISLOLEP_Pos) /*!< ACTLR: DISLOLEP Mask */ + +#define ICB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define ICB_ACTLR_DISFOLD_Msk (1UL << ICB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +/** \brief ICB Interrupt Controller Type Register Definitions */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_ICB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[27U]; + __IM uint32_t ITREAD; /*!< Offset: 0xEF0 (R/ ) Integration Read Register */ + uint32_t RESERVED4[1U]; + __OM uint32_t ITWRITE; /*!< Offset: 0xEF8 ( /W) Integration Write Register */ + uint32_t RESERVED5[1U]; + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control Register */ + uint32_t RESERVED6[46U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ + uint32_t RESERVED7[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} ITM_Type; + +/** \brief ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Integration Read Register Definitions */ +#define ITM_ITREAD_AFVALID_Pos 1U /*!< ITM ITREAD: AFVALID Position */ +#define ITM_ITREAD_AFVALID_Msk (1UL << ITM_ITREAD_AFVALID_Pos) /*!< ITM ITREAD: AFVALID Mask */ + +#define ITM_ITREAD_ATREADY_Pos 0U /*!< ITM ITREAD: ATREADY Position */ +#define ITM_ITREAD_ATREADY_Msk (1UL /*<< ITM_ITREAD_ATREADY_Pos*/) /*!< ITM ITREAD: ATREADY Mask */ + +/** \brief ITM Integration Write Register Definitions */ +#define ITM_ITWRITE_AFVALID_Pos 1U /*!< ITM ITWRITE: AFVALID Position */ +#define ITM_ITWRITE_AFVALID_Msk (1UL << ITM_ITWRITE_AFVALID_Pos) /*!< ITM ITWRITE: AFVALID Mask */ + +#define ITM_ITWRITE_ATREADY_Pos 0U /*!< ITM ITWRITE: ATREADY Position */ +#define ITM_ITWRITE_ATREADY_Msk (1UL /*<< ITM_ITWRITE_ATREADY_Pos*/) /*!< ITM ITWRITE: ATREADY Mask */ + +/** \brief ITM Integration Mode Control Register Definitions */ +#define ITM_ITCTRL_IME_Pos 0U /*!< ITM ITCTRL: IME Position */ +#define ITM_ITCTRL_IME_Msk (1UL /*<< ITM_ITCTRL_IME_Pos*/) /*!< ITM ITCTRL: IME Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + __IOM uint32_t VMASK1; /*!< Offset: 0x03C (R/W) Comparator Value Mask 1 */ + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + __IOM uint32_t VMASK3; /*!< Offset: 0x05C (R/W) Comparator Value Mask 3 */ + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED14[968U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Architecture Register */ + uint32_t RESERVED15[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + __IOM uint32_t PFCR; /*!< Offset: 0x004 (R/W) Prefetcher Control Register */ + uint32_t RESERVED1[2U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/** \brief MemSysCtl Memory System Control Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_TECCCHKDIS_Pos 4U /*!< MEMSYSCTL MSCR: TECCCHKDIS Position */ +#define MEMSYSCTL_MSCR_TECCCHKDIS_Msk (1UL << MEMSYSCTL_MSCR_TECCCHKDIS_Pos) /*!< MEMSYSCTL MSCR: TECCCHKDIS Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/** \brief MemSysCtl Prefetcher Control Register Definitions */ +#define MEMSYSCTL_PFCR_MAX_OS_Pos 7U /*!< MEMSYSCTL PFCR: MAX_OS Position */ +#define MEMSYSCTL_PFCR_MAX_OS_Msk (0x7UL << MEMSYSCTL_PFCR_MAX_OS_Pos) /*!< MEMSYSCTL PFCR: MAX_OS Mask */ + +#define MEMSYSCTL_PFCR_MAX_LA_Pos 4U /*!< MEMSYSCTL PFCR: MAX_LA Position */ +#define MEMSYSCTL_PFCR_MAX_LA_Msk (0x7UL << MEMSYSCTL_PFCR_MAX_LA_Pos) /*!< MEMSYSCTL PFCR: MAX_LA Mask */ + +#define MEMSYSCTL_PFCR_MIN_LA_Pos 1U /*!< MEMSYSCTL PFCR: MIN_LA Position */ +#define MEMSYSCTL_PFCR_MIN_LA_Msk (0x7UL << MEMSYSCTL_PFCR_MIN_LA_Pos) /*!< MEMSYSCTL PFCR: MIN_LA Mask */ + +#define MEMSYSCTL_PFCR_ENABLE_Pos 0U /*!< MEMSYSCTL PFCR: ENABLE Position */ +#define MEMSYSCTL_PFCR_ENABLE_Msk (1UL /*<< MEMSYSCTL_PFCR_ENABLE_Pos*/) /*!< MEMSYSCTL PFCR: ENABLE Mask */ + +/** \brief MemSysCtl ITCM Control Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/** \brief MemSysCtl DTCM Control Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/** \brief MemSysCtl P-AHB Control Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/** \brief MemSysCtl ITGU Control Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/** \brief MemSysCtl ITGU Configuration Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/** \brief MemSysCtl DTGU Control Registers Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/** \brief MemSysCtl DTGU Configuration Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/** \brief PwrModCtl Core Power Domain Low Power State Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/** \brief PwrModCtl Debug Power Domain Low Power State Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __IOM uint32_t EWIC_CR; /*!< Offset: 0x000 (R/W) EWIC Control Register */ + __IOM uint32_t EWIC_ASCR; /*!< Offset: 0x004 (R/W) EWIC Automatic Sequence Control Register */ + __OM uint32_t EWIC_CLRMASK; /*!< Offset: 0x008 ( /W) EWIC Clear Mask Register */ + __IM uint32_t EWIC_NUMID; /*!< Offset: 0x00C (R/ ) EWIC Event Number ID Register */ + uint32_t RESERVED0[124U]; + __IOM uint32_t EWIC_MASKA; /*!< Offset: 0x200 (R/W) EWIC MaskA Register */ + __IOM uint32_t EWIC_MASKn[15]; /*!< Offset: 0x204 (R/W) EWIC Maskn Registers */ + uint32_t RESERVED1[112U]; + __IM uint32_t EWIC_PENDA; /*!< Offset: 0x400 (R/ ) EWIC PendA Event Register */ + __IOM uint32_t EWIC_PENDn[15]; /*!< Offset: 0x404 (R/W) EWIC Pendn Event Registers */ + uint32_t RESERVED2[112U]; + __IM uint32_t EWIC_PSR; /*!< Offset: 0x600 (R/ ) EWIC Pend Summary Register */ +} EWIC_Type; + +/** \brief EWIC Control Register Definitions */ +#define EWIC_EWIC_CR_EN_Pos 0U /*!< EWIC EWIC_CR: EN Position */ +#define EWIC_EWIC_CR_EN_Msk (1UL /*<< EWIC_EWIC_CR_EN_Pos*/) /*!< EWIC EWIC_CR: EN Mask */ + +/** \brief EWIC Automatic Sequence Control Register Definitions */ +#define EWIC_EWIC_ASCR_ASPU_Pos 1U /*!< EWIC EWIC_ASCR: ASPU Position */ +#define EWIC_EWIC_ASCR_ASPU_Msk (1UL << EWIC_EWIC_ASCR_ASPU_Pos) /*!< EWIC EWIC_ASCR: ASPU Mask */ + +#define EWIC_EWIC_ASCR_ASPD_Pos 0U /*!< EWIC EWIC_ASCR: ASPD Position */ +#define EWIC_EWIC_ASCR_ASPD_Msk (1UL /*<< EWIC_EWIC_ASCR_ASPD_Pos*/) /*!< EWIC EWIC_ASCR: ASPD Mask */ + +/** \brief EWIC Event Number ID Register Definitions */ +#define EWIC_EWIC_NUMID_NUMEVENT_Pos 0U /*!< EWIC_NUMID: NUMEVENT Position */ +#define EWIC_EWIC_NUMID_NUMEVENT_Msk (0xFFFFUL /*<< EWIC_EWIC_NUMID_NUMEVENT_Pos*/) /*!< EWIC_NUMID: NUMEVENT Mask */ + +/** \brief EWIC Mask A Register Definitions */ +#define EWIC_EWIC_MASKA_EDBGREQ_Pos 2U /*!< EWIC EWIC_MASKA: EDBGREQ Position */ +#define EWIC_EWIC_MASKA_EDBGREQ_Msk (1UL << EWIC_EWIC_MASKA_EDBGREQ_Pos) /*!< EWIC EWIC_MASKA: EDBGREQ Mask */ + +#define EWIC_EWIC_MASKA_NMI_Pos 1U /*!< EWIC EWIC_MASKA: NMI Position */ +#define EWIC_EWIC_MASKA_NMI_Msk (1UL << EWIC_EWIC_MASKA_NMI_Pos) /*!< EWIC EWIC_MASKA: NMI Mask */ + +#define EWIC_EWIC_MASKA_EVENT_Pos 0U /*!< EWIC EWIC_MASKA: EVENT Position */ +#define EWIC_EWIC_MASKA_EVENT_Msk (1UL /*<< EWIC_EWIC_MASKA_EVENT_Pos*/) /*!< EWIC EWIC_MASKA: EVENT Mask */ + +/** \brief EWIC Mask n Register Definitions */ +#define EWIC_EWIC_MASKn_IRQ_Pos 0U /*!< EWIC EWIC_MASKn: IRQ Position */ +#define EWIC_EWIC_MASKn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EWIC_MASKn_IRQ_Pos*/) /*!< EWIC EWIC_MASKn: IRQ Mask */ + +/** \brief EWIC Pend A Register Definitions */ +#define EWIC_EWIC_PENDA_EDBGREQ_Pos 2U /*!< EWIC EWIC_PENDA: EDBGREQ Position */ +#define EWIC_EWIC_PENDA_EDBGREQ_Msk (1UL << EWIC_EWIC_PENDA_EDBGREQ_Pos) /*!< EWIC EWIC_PENDA: EDBGREQ Mask */ + +#define EWIC_EWIC_PENDA_NMI_Pos 1U /*!< EWIC EWIC_PENDA: NMI Position */ +#define EWIC_EWIC_PENDA_NMI_Msk (1UL << EWIC_EWIC_PENDA_NMI_Pos) /*!< EWIC EWIC_PENDA: NMI Mask */ + +#define EWIC_EWIC_PENDA_EVENT_Pos 0U /*!< EWIC EWIC_PENDA: EVENT Position */ +#define EWIC_EWIC_PENDA_EVENT_Msk (1UL /*<< EWIC_EWIC_PENDA_EVENT_Pos*/) /*!< EWIC EWIC_PENDA: EVENT Mask */ + +/** \brief EWIC Pend n Register Definitions */ +#define EWIC_EWIC_PENDn_IRQ_Pos 0U /*!< EWIC EWIC_PENDn: IRQ Position */ +#define EWIC_EWIC_PENDn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EWIC_PENDn_IRQ_Pos*/) /*!< EWIC EWIC_PENDn: IRQ Mask */ + +/** \brief EWIC Pend Summary Register Definitions */ +#define EWIC_EWIC_PSR_NZ_Pos 1U /*!< EWIC EWIC_PSR: NZ Position */ +#define EWIC_EWIC_PSR_NZ_Msk (0x7FFFUL << EWIC_EWIC_PSR_NZ_Pos) /*!< EWIC EWIC_PSR: NZ Mask */ + +#define EWIC_EWIC_PSR_NZA_Pos 0U /*!< EWIC EWIC_PSR: NZA Position */ +#define EWIC_EWIC_PSR_NZA_Msk (1UL /*<< EWIC_EWIC_PSR_NZA_Pos*/) /*!< EWIC EWIC_PSR: NZA Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_ISA_Type External Wakeup Interrupt Controller (EWIC) interrupt status access registers + \brief Type definitions for the External Wakeup Interrupt Controller interrupt status access registers (EWIC_ISA) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller interrupt status access registers (EWIC_ISA). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/ ) Event Mask A Register */ + __IM uint32_t EVENTMASKn[15]; /*!< Offset: 0x084 (R/ ) Event Mask Register */ +} EWIC_ISA_Type; + +/** \brief EWIC_ISA Event Set Pending Register Definitions */ +#define EWIC_ISA_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC_ISA EVENTSPR: EDBGREQ Position */ +#define EWIC_ISA_EVENTSPR_EDBGREQ_Msk (1UL << EWIC_ISA_EVENTSPR_EDBGREQ_Pos) /*!< EWIC_ISA EVENTSPR: EDBGREQ Mask */ + +#define EWIC_ISA_EVENTSPR_NMI_Pos 1U /*!< EWIC_ISA EVENTSPR: NMI Position */ +#define EWIC_ISA_EVENTSPR_NMI_Msk (1UL << EWIC_ISA_EVENTSPR_NMI_Pos) /*!< EWIC_ISA EVENTSPR: NMI Mask */ + +#define EWIC_ISA_EVENTSPR_EVENT_Pos 0U /*!< EWIC_ISA EVENTSPR: EVENT Position */ +#define EWIC_ISA_EVENTSPR_EVENT_Msk (1UL /*<< EWIC_ISA_EVENTSPR_EVENT_Pos*/) /*!< EWIC_ISA EVENTSPR: EVENT Mask */ + +/** \brief EWIC_ISA Event Mask A Register Definitions */ +#define EWIC_ISA_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC_ISA EVENTMASKA: EDBGREQ Position */ +#define EWIC_ISA_EVENTMASKA_EDBGREQ_Msk (1UL << EWIC_ISA_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC_ISA EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_ISA_EVENTMASKA_NMI_Pos 1U /*!< EWIC_ISA EVENTMASKA: NMI Position */ +#define EWIC_ISA_EVENTMASKA_NMI_Msk (1UL << EWIC_ISA_EVENTMASKA_NMI_Pos) /*!< EWIC_ISA EVENTMASKA: NMI Mask */ + +#define EWIC_ISA_EVENTMASKA_EVENT_Pos 0U /*!< EWIC_ISA EVENTMASKA: EVENT Position */ +#define EWIC_ISA_EVENTMASKA_EVENT_Msk (1UL /*<< EWIC_ISA_EVENTMASKA_EVENT_Pos*/) /*!< EWIC_ISA EVENTMASKA: EVENT Mask */ + +/** \brief EWIC_ISA Event Mask n Register Definitions */ +#define EWIC_ISA_EVENTMASKn_IRQ_Pos 0U /*!< EWIC_ISA EVENTMASKn: IRQ Position */ +#define EWIC_ISA_EVENTMASKn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_ISA_EVENTMASKn_IRQ_Pos*/) /*!< EWIC_ISA EVENTMASKn: IRQ Mask */ + +/*@}*/ /* end of group EWIC_ISA_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ +} ErrBnk_Type; + +/** \brief ErrBnk Instruction Cache Error Bank Register 0 Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/** \brief ErrBnk Instruction Cache Error Bank Register 1 Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/** \brief ErrBnk Data Cache Error Bank Register 0 Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/** \brief ErrBnk Data Cache Error Bank Register 1 Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/** \brief ErrBnk TCM Error Bank Register 0 Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 28U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 27U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x7UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/** \brief ErrBnk TCM Error Bank Register 1 Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 28U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 27U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x7UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/** \brief PrcCfgInf Processor Configuration Information Selection Register Definitions */ + +/** \brief PrcCfgInf Processor Configuration Information Read Data Register Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup STL_Type Software Test Library Observation Registers + \brief Type definitions for the Software Test Library Observation Registerss (STL) + @{ + */ + +/** + \brief Structure type to access the Software Test Library Observation Registerss (STL). + */ +typedef struct +{ + __IM uint32_t STLNVICPENDOR; /*!< Offset: 0x000 (R/ ) NVIC Pending Priority Tree Register */ + __IM uint32_t STLNVICACTVOR; /*!< Offset: 0x004 (R/ ) NVIC Active Priority Tree Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t STLIDMPUSR; /*!< Offset: 0x010 ( /W) MPU Sample Register */ + __IM uint32_t STLIMPUOR; /*!< Offset: 0x014 (R/ ) MPU Region Hit Register */ + __IM uint32_t STLD0MPUOR; /*!< Offset: 0x018 (R/ ) MPU Memory Attributes Register 0 */ + __IM uint32_t STLD1MPUOR; /*!< Offset: 0x01C (R/ ) MPU Memory Attributes Register 1 */ + +} STL_Type; + +/** \brief STL NVIC Pending Priority Tree Register Definitions */ +#define STL_STLNVICPENDOR_VALID_Pos 18U /*!< STL STLNVICPENDOR: VALID Position */ +#define STL_STLNVICPENDOR_VALID_Msk (1UL << STL_STLNVICPENDOR_VALID_Pos) /*!< STL STLNVICPENDOR: VALID Mask */ + +#define STL_STLNVICPENDOR_TARGET_Pos 17U /*!< STL STLNVICPENDOR: TARGET Position */ +#define STL_STLNVICPENDOR_TARGET_Msk (1UL << STL_STLNVICPENDOR_TARGET_Pos) /*!< STL STLNVICPENDOR: TARGET Mask */ + +#define STL_STLNVICPENDOR_PRIORITY_Pos 9U /*!< STL STLNVICPENDOR: PRIORITY Position */ +#define STL_STLNVICPENDOR_PRIORITY_Msk (0xFFUL << STL_STLNVICPENDOR_PRIORITY_Pos) /*!< STL STLNVICPENDOR: PRIORITY Mask */ + +#define STL_STLNVICPENDOR_INTNUM_Pos 0U /*!< STL STLNVICPENDOR: INTNUM Position */ +#define STL_STLNVICPENDOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICPENDOR_INTNUM_Pos*/) /*!< STL STLNVICPENDOR: INTNUM Mask */ + +/** \brief STL NVIC Active Priority Tree Register Definitions */ +#define STL_STLNVICACTVOR_VALID_Pos 18U /*!< STL STLNVICACTVOR: VALID Position */ +#define STL_STLNVICACTVOR_VALID_Msk (1UL << STL_STLNVICACTVOR_VALID_Pos) /*!< STL STLNVICACTVOR: VALID Mask */ + +#define STL_STLNVICACTVOR_TARGET_Pos 17U /*!< STL STLNVICACTVOR: TARGET Position */ +#define STL_STLNVICACTVOR_TARGET_Msk (1UL << STL_STLNVICACTVOR_TARGET_Pos) /*!< STL STLNVICACTVOR: TARGET Mask */ + +#define STL_STLNVICACTVOR_PRIORITY_Pos 9U /*!< STL STLNVICACTVOR: PRIORITY Position */ +#define STL_STLNVICACTVOR_PRIORITY_Msk (0xFFUL << STL_STLNVICACTVOR_PRIORITY_Pos) /*!< STL STLNVICACTVOR: PRIORITY Mask */ + +#define STL_STLNVICACTVOR_INTNUM_Pos 0U /*!< STL STLNVICACTVOR: INTNUM Position */ +#define STL_STLNVICACTVOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICACTVOR_INTNUM_Pos*/) /*!< STL STLNVICACTVOR: INTNUM Mask */ + +/** \brief STL MPU Sample Register Definitions */ +#define STL_STLIDMPUSR_ADDR_Pos 5U /*!< STL STLIDMPUSR: ADDR Position */ +#define STL_STLIDMPUSR_ADDR_Msk (0x7FFFFFFUL << STL_STLIDMPUSR_ADDR_Pos) /*!< STL STLIDMPUSR: ADDR Mask */ + +#define STL_STLIDMPUSR_INSTR_Pos 2U /*!< STL STLIDMPUSR: INSTR Position */ +#define STL_STLIDMPUSR_INSTR_Msk (1UL << STL_STLIDMPUSR_INSTR_Pos) /*!< STL STLIDMPUSR: INSTR Mask */ + +#define STL_STLIDMPUSR_DATA_Pos 1U /*!< STL STLIDMPUSR: DATA Position */ +#define STL_STLIDMPUSR_DATA_Msk (1UL << STL_STLIDMPUSR_DATA_Pos) /*!< STL STLIDMPUSR: DATA Mask */ + +/** \brief STL MPU Region Hit Register Definitions */ +#define STL_STLIMPUOR_HITREGION_Pos 9U /*!< STL STLIMPUOR: HITREGION Position */ +#define STL_STLIMPUOR_HITREGION_Msk (0xFFUL << STL_STLIMPUOR_HITREGION_Pos) /*!< STL STLIMPUOR: HITREGION Mask */ + +#define STL_STLIMPUOR_ATTR_Pos 0U /*!< STL STLIMPUOR: ATTR Position */ +#define STL_STLIMPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLIMPUOR_ATTR_Pos*/) /*!< STL STLIMPUOR: ATTR Mask */ + +/** \brief STL MPU Memory Attributes Register 0 Definitions */ +#define STL_STLD0MPUOR_HITREGION_Pos 9U /*!< STL STLD0MPUOR: HITREGION Position */ +#define STL_STLD0MPUOR_HITREGION_Msk (0xFFUL << STL_STLD0MPUOR_HITREGION_Pos) /*!< STL STLD0MPUOR: HITREGION Mask */ + +#define STL_STLD0MPUOR_ATTR_Pos 0U /*!< STL STLD0MPUOR: ATTR Position */ +#define STL_STLD0MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD0MPUOR_ATTR_Pos*/) /*!< STL STLD0MPUOR: ATTR Mask */ + +/** \brief STL MPU Memory Attributes Register 1 Definitions */ +#define STL_STLD1MPUOR_HITREGION_Pos 9U /*!< STL STLD1MPUOR: HITREGION Position */ +#define STL_STLD1MPUOR_HITREGION_Msk (0xFFUL << STL_STLD1MPUOR_HITREGION_Pos) /*!< STL STLD1MPUOR: HITREGION Mask */ + +#define STL_STLD1MPUOR_ATTR_Pos 0U /*!< STL STLD1MPUOR: ATTR Position */ +#define STL_STLD1MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD1MPUOR_ATTR_Pos*/) /*!< STL STLD1MPUOR: ATTR Mask */ + +/*@}*/ /* end of group STL_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_FOnMan_Pos 6U /*!< TPIU FFCR: FOnMan Position */ +#define TPIU_FFCR_FOnMan_Msk (1UL << TPIU_FFCR_FOnMan_Pos) /*!< TPIU FFCR: FOnMan Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU Periodic Synchronization Control Register Definitions */ +#define TPIU_PSCR_PSCount_Pos 0U /*!< TPIU PSCR: PSCount Position */ +#define TPIU_PSCR_PSCount_Msk (0x1FUL /*<< TPIU_PSCR_PSCount_Pos*/) /*!< TPIU PSCR: TPSCount Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 0 Register Definitions */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD0: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD0: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPIU ITFTTD0: ATB Interface 1 data2 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPIU ITFTTD0: ATB Interface 1 data1 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPIU ITFTTD0: ATB Interface 1 data0 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPIU_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPIU ITFTTD0: ATB Interface 1 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 2 Register Definitions */ +#define TPIU_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID2S Position */ +#define TPIU_ITATBCTR2_AFVALID2S_Msk (1UL << TPIU_ITATBCTR2_AFVALID2S_Pos) /*!< TPIU ITATBCTR2: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID1S Position */ +#define TPIU_ITATBCTR2_AFVALID1S_Msk (1UL << TPIU_ITATBCTR2_AFVALID1S_Pos) /*!< TPIU ITATBCTR2: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2S Position */ +#define TPIU_ITATBCTR2_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2S Mask */ + +#define TPIU_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1S Position */ +#define TPIU_ITATBCTR2_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1S Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 1 Register Definitions */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD1: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD1: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPIU ITFTTD1: ATB Interface 2 data2 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPIU ITFTTD1: ATB Interface 2 data1 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPIU ITFTTD1: ATB Interface 2 data0 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPIU_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPIU ITFTTD1: ATB Interface 2 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 0 Definitions */ +#define TPIU_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID2S Position */ +#define TPIU_ITATBCTR0_AFVALID2S_Msk (1UL << TPIU_ITATBCTR0_AFVALID2S_Pos) /*!< TPIU ITATBCTR0: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID1S Position */ +#define TPIU_ITATBCTR0_AFVALID1S_Msk (1UL << TPIU_ITATBCTR0_AFVALID1S_Pos) /*!< TPIU ITATBCTR0: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2S Position */ +#define TPIU_ITATBCTR0_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2S Mask */ + +#define TPIU_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1S Position */ +#define TPIU_ITATBCTR0_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1S Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU Claim Tag Set Register Definitions */ +#define TPIU_CLAIMSET_SET_Pos 0U /*!< TPIU CLAIMSET: SET Position */ +#define TPIU_CLAIMSET_SET_Msk (0xFUL /*<< TPIU_CLAIMSET_SET_Pos*/) /*!< TPIU CLAIMSET: SET Mask */ + +/** \brief TPIU Claim Tag Clear Register Definitions */ +#define TPIU_CLAIMCLR_CLR_Pos 0U /*!< TPIU CLAIMCLR: CLR Position */ +#define TPIU_CLAIMCLR_CLR_Msk (0xFUL /*<< TPIU_CLAIMCLR_CLR_Pos*/) /*!< TPIU CLAIMCLR: CLR Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_FIFOSZ_Pos 6U /*!< TPIU DEVID: FIFOSZ Position */ +#define TPIU_DEVID_FIFOSZ_Msk (0x7UL << TPIU_DEVID_FIFOSZ_Pos) /*!< TPIU DEVID: FIFOSZ Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) Device Type Register */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + +/*@} end of group CMSIS_PMU */ +#endif + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/** \brief MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Mask */ + +/** \brief MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/** \brief MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/** \brief SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/** \brief SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/** \brief SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/** \brief SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/** \brief SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/** \brief SAU Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/** \brief FPU Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/** \brief FPU Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/** \brief FPU Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/** \brief FPU Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: Rounding modes bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: Rounding modes bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMD registers bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMD registers bits Mask */ + +/** \brief FPU Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: Fused MAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: Fused MAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/** \brief FPU Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/** \brief DCB Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/** \brief DCB Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/** \brief DCB Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + uint32_t RESERVED1[3U]; + __IM uint32_t DDEVTYPE; /*!< Offset: 0x01C (R/ ) SCS Device Type Register */ +} DIB_Type; + +/** \brief DIB Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/** \brief DIB SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/** \brief DIB SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_ISA_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller interrupt status access Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define STL_BASE (0xE001E800UL) /*!< Software Test Library Base Address */ + #define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ + #define EWIC_BASE (0xE0047000UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC_ISA ((EWIC_ISA_Type *) EWIC_ISA_BASE ) /*!< EWIC interrupt status access struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define STL ((STL_Type *) STL_BASE ) /*!< Software Test Library configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; + __OM uint32_t DSCEMCR; + __IOM uint32_t DAUTHCTRL; + __IOM uint32_t DSCSR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos DCB_DHCSR_S_RESTART_ST_Pos +#define CoreDebug_DHCSR_S_RESTART_ST_Msk DCB_DHCSR_S_RESTART_ST_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_FPD_Pos DCB_DHCSR_S_FPD_Pos +#define CoreDebug_DHCSR_S_FPD_Msk DCB_DHCSR_S_FPD_Msk + +#define CoreDebug_DHCSR_S_SUIDE_Pos DCB_DHCSR_S_SUIDE_Pos +#define CoreDebug_DHCSR_S_SUIDE_Msk DCB_DHCSR_S_SUIDE_Msk + +#define CoreDebug_DHCSR_S_NSUIDE_Pos DCB_DHCSR_S_NSUIDE_Pos +#define CoreDebug_DHCSR_S_NSUIDE_Msk DCB_DHCSR_S_NSUIDE_Msk + +#define CoreDebug_DHCSR_S_SDE_Pos DCB_DHCSR_S_SDE_Pos +#define CoreDebug_DHCSR_S_SDE_Msk DCB_DHCSR_S_SDE_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_PMOV_Pos DCB_DHCSR_C_PMOV_Pos +#define CoreDebug_DHCSR_C_PMOV_Msk DCB_DHCSR_C_PMOV_Msk + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos DCB_DHCSR_C_SNAPSTALL_Pos +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk DCB_DHCSR_C_SNAPSTALL_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_TRCENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_MON_REQ_Pos DCB_DEMCR_MON_REQ_Pos +#define CoreDebug_DEMCR_MON_REQ_Msk DCB_DEMCR_MON_REQ_Msk + +#define CoreDebug_DEMCR_MON_STEP_Pos DCB_DEMCR_MON_STEP_Pos +#define CoreDebug_DEMCR_MON_STEP_Msk DCB_DEMCR_MON_STEP_Msk + +#define CoreDebug_DEMCR_MON_PEND_Pos DCB_DEMCR_MON_PEND_Pos +#define CoreDebug_DEMCR_MON_PEND_Msk DCB_DEMCR_MON_PEND_Msk + +#define CoreDebug_DEMCR_MON_EN_Pos DCB_DEMCR_MON_EN_Pos +#define CoreDebug_DEMCR_MON_EN_Msk DCB_DEMCR_MON_EN_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_INTERR_Pos DCB_DEMCR_VC_INTERR_Pos +#define CoreDebug_DEMCR_VC_INTERR_Msk DCB_DEMCR_VC_INTERR_Msk + +#define CoreDebug_DEMCR_VC_BUSERR_Pos DCB_DEMCR_VC_BUSERR_Pos +#define CoreDebug_DEMCR_VC_BUSERR_Msk DCB_DEMCR_VC_BUSERR_Msk + +#define CoreDebug_DEMCR_VC_STATERR_Pos DCB_DEMCR_VC_STATERR_Pos +#define CoreDebug_DEMCR_VC_STATERR_Msk DCB_DEMCR_VC_STATERR_Msk + +#define CoreDebug_DEMCR_VC_CHKERR_Pos DCB_DEMCR_VC_CHKERR_Pos +#define CoreDebug_DEMCR_VC_CHKERR_Msk DCB_DEMCR_VC_CHKERR_Msk + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos DCB_DEMCR_VC_NOCPERR_Pos +#define CoreDebug_DEMCR_VC_NOCPERR_Msk DCB_DEMCR_VC_NOCPERR_Msk + +#define CoreDebug_DEMCR_VC_MMERR_Pos DCB_DEMCR_VC_MMERR_Pos +#define CoreDebug_DEMCR_VC_MMERR_Msk DCB_DEMCR_VC_MMERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos DCB_DSCEMCR_CLR_MON_REQ_Pos +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk DCB_DSCEMCR_CLR_MON_REQ_Msk + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos DCB_DSCEMCR_CLR_MON_PEND_Pos +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk DCB_DSCEMCR_CLR_MON_PEND_Msk + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos DCB_DSCEMCR_SET_MON_REQ_Pos +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk DCB_DSCEMCR_SET_MON_REQ_Msk + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos DCB_DSCEMCR_SET_MON_PEND_Pos +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk DCB_DSCEMCR_SET_MON_PEND_Msk + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos DCB_DAUTHCTRL_UIDEN_Pos +#define CoreDebug_DAUTHCTRL_UIDEN_Msk DCB_DAUTHCTRL_UIDEN_Msk + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos DCB_DAUTHCTRL_UIDAPEN_Pos +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk DCB_DAUTHCTRL_UIDAPEN_Msk + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos DCB_DAUTHCTRL_FSDMA_Pos +#define CoreDebug_DAUTHCTRL_FSDMA_Msk DCB_DAUTHCTRL_FSDMA_Msk + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos DCB_DAUTHCTRL_INTSPNIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk DCB_DAUTHCTRL_INTSPNIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos DCB_DAUTHCTRL_SPNIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk DCB_DAUTHCTRL_SPNIDENSEL_Msk + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos DCB_DAUTHCTRL_INTSPIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk DCB_DAUTHCTRL_INTSPIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos DCB_DAUTHCTRL_SPIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk DCB_DAUTHCTRL_SPIDENSEL_Msk + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos DCB_DSCSR_CDS_Pos +#define CoreDebug_DSCSR_CDS_Msk DCB_DSCSR_CDS_Msk + +#define CoreDebug_DSCSR_SBRSEL_Pos DCB_DSCSR_SBRSEL_Pos +#define CoreDebug_DSCSR_SBRSEL_Msk DCB_DSCSR_SBRSEL_Msk + +#define CoreDebug_DSCSR_SBRSELEN_Pos DCB_DSCSR_SBRSELEN_Pos +#define CoreDebug_DSCSR_SBRSELEN_Msk DCB_DSCSR_SBRSELEN_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define CoreDebug_NS ((CoreDebug_Type *) DCB_BASE_NS) +#endif + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + + #include "m-profile/armv8m_mpu.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "m-profile/armv8m_pmu.h" + +/** + \brief Cortex-M55 PMU events + \note Architectural PMU events can be found in armv8m_pmu.h +*/ + +#define ARMCM55_PMU_ECC_ERR 0xC000 /*!< Any ECC error */ +#define ARMCM55_PMU_ECC_ERR_FATAL 0xC001 /*!< Any fatal ECC error */ +#define ARMCM55_PMU_ECC_ERR_DCACHE 0xC010 /*!< Any ECC error in the data cache */ +#define ARMCM55_PMU_ECC_ERR_ICACHE 0xC011 /*!< Any ECC error in the instruction cache */ +#define ARMCM55_PMU_ECC_ERR_FATAL_DCACHE 0xC012 /*!< Any fatal ECC error in the data cache */ +#define ARMCM55_PMU_ECC_ERR_FATAL_ICACHE 0xC013 /*!< Any fatal ECC error in the instruction cache*/ +#define ARMCM55_PMU_ECC_ERR_DTCM 0xC020 /*!< Any ECC error in the DTCM */ +#define ARMCM55_PMU_ECC_ERR_ITCM 0xC021 /*!< Any ECC error in the ITCM */ +#define ARMCM55_PMU_ECC_ERR_FATAL_DTCM 0xC022 /*!< Any fatal ECC error in the DTCM */ +#define ARMCM55_PMU_ECC_ERR_FATAL_ITCM 0xC023 /*!< Any fatal ECC error in the ITCM */ +#define ARMCM55_PMU_PF_LINEFILL 0xC100 /*!< A prefetcher starts a line-fill */ +#define ARMCM55_PMU_PF_CANCEL 0xC101 /*!< A prefetcher stops prefetching */ +#define ARMCM55_PMU_PF_DROP_LINEFILL 0xC102 /*!< A linefill triggered by a prefetcher has been dropped because of lack of buffering */ +#define ARMCM55_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM55_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM55_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM55_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access to the P-AHB write interface */ +#define ARMCM55_PMU_AXI_WRITE_ACCESS 0xC302 /*!< Any beat access to M-AXI write interface */ +#define ARMCM55_PMU_AXI_READ_ACCESS 0xC303 /*!< Any beat access to M-AXI read interface */ +#define ARMCM55_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM55_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ +#define ARMCM55_PMU_CDE_INST_RETIRED 0xC402 /*!< CDE instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_CX1_INST_RETIRED 0xC404 /*!< CDE CX1 instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_CX2_INST_RETIRED 0xC406 /*!< CDE CX2 instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_CX3_INST_RETIRED 0xC408 /*!< CDE CX3 instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_VCX1_INST_RETIRED 0xC40A /*!< CDE VCX1 instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_VCX2_INST_RETIRED 0xC40C /*!< CDE VCX2 instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_VCX3_INST_RETIRED 0xC40E /*!< CDE VCX3 instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_VCX1_VEC_INST_RETIRED 0xC410 /*!< CDE VCX1 Vector instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_VCX2_VEC_INST_RETIRED 0xC412 /*!< CDE VCX2 Vector instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_VCX3_VEC_INST_RETIRED 0xC414 /*!< CDE VCX3 Vector instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_PRED 0xC416 /*!< Cycles where one or more predicated beats of a CDE instruction architecturally executed. */ +#define ARMCM55_PMU_CDE_STALL 0xC417 /*!< Stall cycles caused by a CDE instruction. */ +#define ARMCM55_PMU_CDE_STALL_RESOURCE 0xC418 /*!< Stall cycles caused by a CDE instruction because of resource conflicts */ +#define ARMCM55_PMU_CDE_STALL_DEPENDENCY 0xC419 /*!< Stall cycles caused by a CDE register dependency. */ +#define ARMCM55_PMU_CDE_STALL_CUSTOM 0xC41A /*!< Stall cycles caused by a CDE instruction are generated by the custom hardware. */ +#define ARMCM55_PMU_CDE_STALL_OTHER 0xC41B /*!< Stall cycles caused by a CDE instruction are not covered by the other counters. */ +#define ARMCM55_PMU_PF_LF_LA_1 0xC41C /*!< A data prefetcher line-fill request is made while the lookahead distance is 1. */ +#define ARMCM55_PMU_PF_LF_LA_2 0xC41D /*!< A data prefetcher line-fill request is made while the lookahead distance is 2. */ +#define ARMCM55_PMU_PF_LF_LA_3 0xC41E /*!< A data prefetcher line-fill request is made while the lookahead distance is 3. */ +#define ARMCM55_PMU_PF_LF_LA_4 0xC41F /*!< A data prefetcher line-fill request is made while the lookahead distance is 4. */ +#define ARMCM55_PMU_PF_LF_LA_5 0xC420 /*!< A data prefetcher line-fill request is made while the lookahead distance is 5. */ +#define ARMCM55_PMU_PF_LF_LA_6 0xC421 /*!< A data prefetcher line-fill request is made while the lookahead distance is 6. */ +#define ARMCM55_PMU_PF_BUFFER_FULL 0xC422 /*!< A data prefetcher request is made while the buffer is full. */ +#define ARMCM55_PMU_PF_BUFFER_MISS 0xC423 /*!< A load requires a line-fill which misses in the data prefetcher buffer. */ +#define ARMCM55_PMU_PF_BUFFER_HIT 0xC424 /*!< A load access hits in the data prefetcher buffer. */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) + #include "m-profile/armv7m_cachel1.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM55_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm7.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm7.h new file mode 100644 index 0000000..5a181dc --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm7.h @@ -0,0 +1,2471 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M7 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM7_H_GENERIC +#define __CORE_CM7_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M7 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM7 definitions */ + +#define __CORTEX_M (7U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM7_H_DEPENDANT +#define __CORE_CM7_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM7_REV + #define __CM7_REV 0x0000U + #warning "__CM7_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M7 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IPR[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED3[93U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ + uint32_t RESERVED7[5U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: Branch prediction enable bit Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: Branch prediction enable bit Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: Instruction cache enable bit Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: Instruction cache enable bit Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: Cache enable bit Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: Cache enable bit Mask */ + +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/** \brief SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/** \brief SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/** \brief SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/** \brief SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/** \brief SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/** \brief SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/** \brief SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/** \brief SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/** \brief SCB Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/** \brief SCB Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/** \brief SCB AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/** \brief SCB L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCDIS_Pos 1U /*!< SCB CACR: ECCDIS Position */ +#define SCB_CACR_ECCDIS_Msk (1UL << SCB_CACR_ECCDIS_Pos) /*!< SCB CACR: ECCDIS Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/** \brief SCB AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBSCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBSCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBSCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/** \brief SCB Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/** \brief SCnSCB Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/** \brief SCnSCB Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISDYNADD_Pos 26U /*!< ACTLR: DISDYNADD Position */ +#define SCnSCB_ACTLR_DISDYNADD_Msk (1UL << SCnSCB_ACTLR_DISDYNADD_Pos) /*!< ACTLR: DISDYNADD Mask */ + +#define SCnSCB_ACTLR_DISISSCH1_Pos 21U /*!< ACTLR: DISISSCH1 Position */ +#define SCnSCB_ACTLR_DISISSCH1_Msk (0x1FUL << SCnSCB_ACTLR_DISISSCH1_Pos) /*!< ACTLR: DISISSCH1 Mask */ + +#define SCnSCB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define SCnSCB_ACTLR_DISDI_Msk (0x1FUL << SCnSCB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define SCnSCB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define SCnSCB_ACTLR_DISCRITAXIRUR_Msk (1UL << SCnSCB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define SCnSCB_ACTLR_DISBTACALLOC_Pos 14U /*!< ACTLR: DISBTACALLOC Position */ +#define SCnSCB_ACTLR_DISBTACALLOC_Msk (1UL << SCnSCB_ACTLR_DISBTACALLOC_Pos) /*!< ACTLR: DISBTACALLOC Mask */ + +#define SCnSCB_ACTLR_DISBTACREAD_Pos 13U /*!< ACTLR: DISBTACREAD Position */ +#define SCnSCB_ACTLR_DISBTACREAD_Msk (1UL << SCnSCB_ACTLR_DISBTACREAD_Pos) /*!< ACTLR: DISBTACREAD Mask */ + +#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define SCnSCB_ACTLR_DISRAMODE_Pos 11U /*!< ACTLR: DISRAMODE Position */ +#define SCnSCB_ACTLR_DISRAMODE_Msk (1UL << SCnSCB_ACTLR_DISRAMODE_Pos) /*!< ACTLR: DISRAMODE Mask */ + +#define SCnSCB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define SCnSCB_ACTLR_FPEXCODIS_Msk (1UL << SCnSCB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Lock Status Register */ +} ITM_Type; + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Lock Status Register Definitions */ +#define ITM_LSR_BYTEACC_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_BYTEACC_Msk (1UL << ITM_LSR_BYTEACC_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_ACCESS_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_ACCESS_Msk (1UL << ITM_LSR_ACCESS_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_PRESENT_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_PRESENT_Msk (1UL /*<< ITM_LSR_PRESENT_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED3[981U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration ETM Data Register Definitions (FIFO0) */ +#define TPIU_FIFO0_ITM_ATVALID_Pos 29U /*!< TPIU FIFO0: ITM_ATVALID Position */ +#define TPIU_FIFO0_ITM_ATVALID_Msk (1UL << TPIU_FIFO0_ITM_ATVALID_Pos) /*!< TPIU FIFO0: ITM_ATVALID Mask */ + +#define TPIU_FIFO0_ITM_bytecount_Pos 27U /*!< TPIU FIFO0: ITM_bytecount Position */ +#define TPIU_FIFO0_ITM_bytecount_Msk (0x3UL << TPIU_FIFO0_ITM_bytecount_Pos) /*!< TPIU FIFO0: ITM_bytecount Mask */ + +#define TPIU_FIFO0_ETM_ATVALID_Pos 26U /*!< TPIU FIFO0: ETM_ATVALID Position */ +#define TPIU_FIFO0_ETM_ATVALID_Msk (1UL << TPIU_FIFO0_ETM_ATVALID_Pos) /*!< TPIU FIFO0: ETM_ATVALID Mask */ + +#define TPIU_FIFO0_ETM_bytecount_Pos 24U /*!< TPIU FIFO0: ETM_bytecount Position */ +#define TPIU_FIFO0_ETM_bytecount_Msk (0x3UL << TPIU_FIFO0_ETM_bytecount_Pos) /*!< TPIU FIFO0: ETM_bytecount Mask */ + +#define TPIU_FIFO0_ETM2_Pos 16U /*!< TPIU FIFO0: ETM2 Position */ +#define TPIU_FIFO0_ETM2_Msk (0xFFUL << TPIU_FIFO0_ETM2_Pos) /*!< TPIU FIFO0: ETM2 Mask */ + +#define TPIU_FIFO0_ETM1_Pos 8U /*!< TPIU FIFO0: ETM1 Position */ +#define TPIU_FIFO0_ETM1_Msk (0xFFUL << TPIU_FIFO0_ETM1_Pos) /*!< TPIU FIFO0: ETM1 Mask */ + +#define TPIU_FIFO0_ETM0_Pos 0U /*!< TPIU FIFO0: ETM0 Position */ +#define TPIU_FIFO0_ETM0_Msk (0xFFUL /*<< TPIU_FIFO0_ETM0_Pos*/) /*!< TPIU FIFO0: ETM0 Mask */ + +/** \brief TPIU ITATBCTR2 Register Definitions */ +#define TPIU_ITATBCTR2_ATREADY2_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2 Position */ +#define TPIU_ITATBCTR2_ATREADY2_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2 Mask */ + +#define TPIU_ITATBCTR2_ATREADY1_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1 Position */ +#define TPIU_ITATBCTR2_ATREADY1_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1 Mask */ + +/** \brief TPIU Integration ITM Data Register Definitions (FIFO1) */ +#define TPIU_FIFO1_ITM_ATVALID_Pos 29U /*!< TPIU FIFO1: ITM_ATVALID Position */ +#define TPIU_FIFO1_ITM_ATVALID_Msk (1UL << TPIU_FIFO1_ITM_ATVALID_Pos) /*!< TPIU FIFO1: ITM_ATVALID Mask */ + +#define TPIU_FIFO1_ITM_bytecount_Pos 27U /*!< TPIU FIFO1: ITM_bytecount Position */ +#define TPIU_FIFO1_ITM_bytecount_Msk (0x3UL << TPIU_FIFO1_ITM_bytecount_Pos) /*!< TPIU FIFO1: ITM_bytecount Mask */ + +#define TPIU_FIFO1_ETM_ATVALID_Pos 26U /*!< TPIU FIFO1: ETM_ATVALID Position */ +#define TPIU_FIFO1_ETM_ATVALID_Msk (1UL << TPIU_FIFO1_ETM_ATVALID_Pos) /*!< TPIU FIFO1: ETM_ATVALID Mask */ + +#define TPIU_FIFO1_ETM_bytecount_Pos 24U /*!< TPIU FIFO1: ETM_bytecount Position */ +#define TPIU_FIFO1_ETM_bytecount_Msk (0x3UL << TPIU_FIFO1_ETM_bytecount_Pos) /*!< TPIU FIFO1: ETM_bytecount Mask */ + +#define TPIU_FIFO1_ITM2_Pos 16U /*!< TPIU FIFO1: ITM2 Position */ +#define TPIU_FIFO1_ITM2_Msk (0xFFUL << TPIU_FIFO1_ITM2_Pos) /*!< TPIU FIFO1: ITM2 Mask */ + +#define TPIU_FIFO1_ITM1_Pos 8U /*!< TPIU FIFO1: ITM1 Position */ +#define TPIU_FIFO1_ITM1_Msk (0xFFUL << TPIU_FIFO1_ITM1_Pos) /*!< TPIU FIFO1: ITM1 Mask */ + +#define TPIU_FIFO1_ITM0_Pos 0U /*!< TPIU FIFO1: ITM0 Position */ +#define TPIU_FIFO1_ITM0_Msk (0xFFUL /*<< TPIU_FIFO1_ITM0_Pos*/) /*!< TPIU FIFO1: ITM0 Mask */ + +/** \brief TPIU ITATBCTR0 Register Definitions */ +#define TPIU_ITATBCTR0_ATREADY2_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2 Position */ +#define TPIU_ITATBCTR0_ATREADY2_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2 Mask */ + +#define TPIU_ITATBCTR0_ATREADY1_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1 Position */ +#define TPIU_ITATBCTR0_ATREADY1_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1 Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_MinBufSz_Pos 6U /*!< TPIU DEVID: MinBufSz Position */ +#define TPIU_DEVID_MinBufSz_Msk (0x7UL << TPIU_DEVID_MinBufSz_Pos) /*!< TPIU DEVID: MinBufSz Mask */ + +#define TPIU_DEVID_AsynClkIn_Pos 5U /*!< TPIU DEVID: AsynClkIn Position */ +#define TPIU_DEVID_AsynClkIn_Msk (1UL << TPIU_DEVID_AsynClkIn_Pos) /*!< TPIU DEVID: AsynClkIn Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/** \brief MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/** \brief FPU Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/** \brief FPU Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/** \brief FPU Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/** \brief FPU Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: Rounding modes bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: Rounding modes bits Mask */ + +#define FPU_MVFR0_FPShortvec_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_FPShortvec_Msk (0xFUL << FPU_MVFR0_FPShortvec_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPExceptrap_Pos 12U /*!< MVFR0: Exception trapping bits Position */ +#define FPU_MVFR0_FPExceptrap_Msk (0xFUL << FPU_MVFR0_FPExceptrap_Pos) /*!< MVFR0: Exception trapping bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMD registers bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMD registers bits Mask */ + +/** \brief FPU Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: Fused MAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: Fused MAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/** \brief FPU Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ +#define DCB_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ +#define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos DCB_DHCSR_C_SNAPSTALL_Pos +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk DCB_DHCSR_C_SNAPSTALL_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_TRCENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_MON_REQ_Pos DCB_DEMCR_MON_REQ_Pos +#define CoreDebug_DEMCR_MON_REQ_Msk DCB_DEMCR_MON_REQ_Msk + +#define CoreDebug_DEMCR_MON_STEP_Pos DCB_DEMCR_MON_STEP_Pos +#define CoreDebug_DEMCR_MON_STEP_Msk DCB_DEMCR_MON_STEP_Msk + +#define CoreDebug_DEMCR_MON_PEND_Pos DCB_DEMCR_MON_PEND_Pos +#define CoreDebug_DEMCR_MON_PEND_Msk DCB_DEMCR_MON_PEND_Msk + +#define CoreDebug_DEMCR_MON_EN_Pos DCB_DEMCR_MON_EN_Pos +#define CoreDebug_DEMCR_MON_EN_Msk DCB_DEMCR_MON_EN_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_INTERR_Pos DCB_DEMCR_VC_INTERR_Pos +#define CoreDebug_DEMCR_VC_INTERR_Msk DCB_DEMCR_VC_INTERR_Msk + +#define CoreDebug_DEMCR_VC_BUSERR_Pos DCB_DEMCR_VC_BUSERR_Pos +#define CoreDebug_DEMCR_VC_BUSERR_Msk DCB_DEMCR_VC_BUSERR_Msk + +#define CoreDebug_DEMCR_VC_STATERR_Pos DCB_DEMCR_VC_STATERR_Pos +#define CoreDebug_DEMCR_VC_STATERR_Msk DCB_DEMCR_VC_STATERR_Msk + +#define CoreDebug_DEMCR_VC_CHKERR_Pos DCB_DEMCR_VC_CHKERR_Pos +#define CoreDebug_DEMCR_VC_CHKERR_Msk DCB_DEMCR_VC_CHKERR_Msk + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos DCB_DEMCR_VC_NOCPERR_Pos +#define CoreDebug_DEMCR_VC_NOCPERR_Msk DCB_DEMCR_VC_NOCPERR_Msk + +#define CoreDebug_DEMCR_VC_MMERR_Pos DCB_DEMCR_VC_MMERR_Pos +#define CoreDebug_DEMCR_VC_MMERR_Msk DCB_DEMCR_VC_MMERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "m-profile/armv7m_mpu.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) + #include "m-profile/armv7m_cachel1.h" +#endif + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_cm85.h b/src/third_party/cmsis/CMSIS/Core/Include/core_cm85.h new file mode 100644 index 0000000..5440850 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_cm85.h @@ -0,0 +1,4949 @@ +/* + * Copyright (c) 2022-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Cortex-M85 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM85_H_GENERIC +#define __CORE_CM85_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M85 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS CM85 definitions */ + +#define __CORTEX_M (85U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM85_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM85_H_DEPENDANT +#define __CORE_CM85_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM85_REV + #define __CM85_REV 0x0001U + #warning "__CM85_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M85 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core EWIC Register + - Core EWIC Interrupt Status Access Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core PMU Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:1; /*!< bit: 20 Reserved */ + uint32_t B:1; /*!< bit: 21 BTI active (read 0) */ + uint32_t _reserved2:2; /*!< bit: 22..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_B_Pos 21U /*!< xPSR: B Position */ +#define xPSR_B_Msk (1UL << xPSR_B_Pos) /*!< xPSR: B Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t BTI_EN:1; /*!< bit: 4 Privileged branch target identification enable */ + uint32_t UBTI_EN:1; /*!< bit: 5 Unprivileged branch target identification enable */ + uint32_t PAC_EN:1; /*!< bit: 6 Privileged pointer authentication enable */ + uint32_t UPAC_EN:1; /*!< bit: 7 Unprivileged pointer authentication enable */ + uint32_t _reserved1:24; /*!< bit: 8..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_UPAC_EN_Pos 7U /*!< CONTROL: UPAC_EN Position */ +#define CONTROL_UPAC_EN_Msk (1UL << CONTROL_UPAC_EN_Pos) /*!< CONTROL: UPAC_EN Mask */ + +#define CONTROL_PAC_EN_Pos 6U /*!< CONTROL: PAC_EN Position */ +#define CONTROL_PAC_EN_Msk (1UL << CONTROL_PAC_EN_Pos) /*!< CONTROL: PAC_EN Mask */ + +#define CONTROL_UBTI_EN_Pos 5U /*!< CONTROL: UBTI_EN Position */ +#define CONTROL_UBTI_EN_Msk (1UL << CONTROL_UBTI_EN_Pos) /*!< CONTROL: UBTI_EN Mask */ + +#define CONTROL_BTI_EN_Pos 4U /*!< CONTROL: BTI_EN Position */ +#define CONTROL_BTI_EN_Msk (1UL << CONTROL_BTI_EN_Pos) /*!< CONTROL: BTI_EN Mask */ + +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/** \brief SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/** \brief SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/** \brief SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/** \brief SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/** \brief SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/** \brief SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/** \brief SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/** \brief SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/** \brief SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/** \brief SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/** \brief SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register + @{ + */ + +/** + \brief Structure type to access the Implementation Control Block (ICB). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} ICB_Type; + +/** \brief ICB Coprocessor Power Control Register Definitions */ +#define ICB_CPPWR_SUS11_Pos 23U /*!< CPPWR: SUS11 Position */ +#define ICB_CPPWR_SUS11_Msk (1UL << ICB_CPPWR_SUS11_Pos) /*!< CPPWR: SUS11 Mask */ + +#define ICB_CPPWR_SU11_Pos 22U /*!< CPPWR: SU11 Position */ +#define ICB_CPPWR_SU11_Msk (1UL << ICB_CPPWR_SU11_Pos) /*!< CPPWR: SU11 Mask */ + +#define ICB_CPPWR_SUS10_Pos 21U /*!< CPPWR: SUS10 Position */ +#define ICB_CPPWR_SUS10_Msk (1UL << ICB_CPPWR_SUS10_Pos) /*!< CPPWR: SUS10 Mask */ + +#define ICB_CPPWR_SU10_Pos 20U /*!< CPPWR: SU10 Position */ +#define ICB_CPPWR_SU10_Msk (1UL << ICB_CPPWR_SU10_Pos) /*!< CPPWR: SU10 Mask */ + +/** \brief ICB Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +/** \brief ICB Interrupt Controller Type Register Definitions */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_ICB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[27U]; + __IM uint32_t ITREAD; /*!< Offset: 0xEF0 (R/ ) Integration Read Register */ + uint32_t RESERVED4[1U]; + __OM uint32_t ITWRITE; /*!< Offset: 0xEF8 ( /W) Integration Write Register */ + uint32_t RESERVED5[1U]; + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control Register */ + uint32_t RESERVED6[46U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ + uint32_t RESERVED7[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} ITM_Type; + +/** \brief ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Integration Read Register Definitions */ +#define ITM_ITREAD_AFVALID_Pos 1U /*!< ITM ITREAD: AFVALID Position */ +#define ITM_ITREAD_AFVALID_Msk (1UL << ITM_ITREAD_AFVALID_Pos) /*!< ITM ITREAD: AFVALID Mask */ + +#define ITM_ITREAD_ATREADY_Pos 0U /*!< ITM ITREAD: ATREADY Position */ +#define ITM_ITREAD_ATREADY_Msk (1UL /*<< ITM_ITREAD_ATREADY_Pos*/) /*!< ITM ITREAD: ATREADY Mask */ + +/** \brief ITM Integration Write Register Definitions */ +#define ITM_ITWRITE_AFVALID_Pos 1U /*!< ITM ITWRITE: AFVALID Position */ +#define ITM_ITWRITE_AFVALID_Msk (1UL << ITM_ITWRITE_AFVALID_Pos) /*!< ITM ITWRITE: AFVALID Mask */ + +#define ITM_ITWRITE_ATREADY_Pos 0U /*!< ITM ITWRITE: ATREADY Position */ +#define ITM_ITWRITE_ATREADY_Msk (1UL /*<< ITM_ITWRITE_ATREADY_Pos*/) /*!< ITM ITWRITE: ATREADY Mask */ + +/** \brief ITM Integration Mode Control Register Definitions */ +#define ITM_ITCTRL_IME_Pos 0U /*!< ITM ITCTRL: IME Position */ +#define ITM_ITCTRL_IME_Msk (1UL /*<< ITM_ITCTRL_IME_Pos*/) /*!< ITM ITCTRL: IME Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + __IOM uint32_t VMASK1; /*!< Offset: 0x03C (R/W) Comparator Value Mask 1 */ + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + __IOM uint32_t VMASK3; /*!< Offset: 0x05C (R/W) Comparator Value Mask 3 */ + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED14[968U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Architecture Register */ + uint32_t RESERVED15[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + __IOM uint32_t PFCR; /*!< Offset: 0x004 (R/W) Prefetcher Control Register */ + uint32_t RESERVED1[2U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/** \brief MemSysCtl Memory System Control Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_TECCCHKDIS_Pos 4U /*!< MEMSYSCTL MSCR: TECCCHKDIS Position */ +#define MEMSYSCTL_MSCR_TECCCHKDIS_Msk (1UL << MEMSYSCTL_MSCR_TECCCHKDIS_Pos) /*!< MEMSYSCTL MSCR: TECCCHKDIS Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/** \brief MemSysCtl Prefetcher Control Register Definitions */ +#define MEMSYSCTL_PFCR_DIS_NLP_Pos 7U /*!< MEMSYSCTL PFCR: DIS_NLP Position */ +#define MEMSYSCTL_PFCR_DIS_NLP_Msk (1UL << MEMSYSCTL_PFCR_DIS_NLP_Pos) /*!< MEMSYSCTL PFCR: DIS_NLP Mask */ + +#define MEMSYSCTL_PFCR_ENABLE_Pos 0U /*!< MEMSYSCTL PFCR: ENABLE Position */ +#define MEMSYSCTL_PFCR_ENABLE_Msk (1UL /*<< MEMSYSCTL_PFCR_ENABLE_Pos*/) /*!< MEMSYSCTL PFCR: ENABLE Mask */ + +/** \brief MemSysCtl ITCM Control Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/** \brief MemSysCtl DTCM Control Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/** \brief MemSysCtl P-AHB Control Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/** \brief MemSysCtl ITGU Control Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/** \brief MemSysCtl ITGU Configuration Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/** \brief MemSysCtl DTGU Control Registers Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/** \brief MemSysCtl DTGU Configuration Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/** \brief PwrModCtl Core Power Domain Low Power State Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/** \brief PwrModCtl Debug Power Domain Low Power State Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __IOM uint32_t EWIC_CR; /*!< Offset: 0x000 (R/W) EWIC Control Register */ + __IOM uint32_t EWIC_ASCR; /*!< Offset: 0x004 (R/W) EWIC Automatic Sequence Control Register */ + __OM uint32_t EWIC_CLRMASK; /*!< Offset: 0x008 ( /W) EWIC Clear Mask Register */ + __IM uint32_t EWIC_NUMID; /*!< Offset: 0x00C (R/ ) EWIC Event Number ID Register */ + uint32_t RESERVED0[124U]; + __IOM uint32_t EWIC_MASKA; /*!< Offset: 0x200 (R/W) EWIC MaskA Register */ + __IOM uint32_t EWIC_MASKn[15]; /*!< Offset: 0x204 (R/W) EWIC Maskn Registers */ + uint32_t RESERVED1[112U]; + __IM uint32_t EWIC_PENDA; /*!< Offset: 0x400 (R/ ) EWIC PendA Event Register */ + __IOM uint32_t EWIC_PENDn[15]; /*!< Offset: 0x404 (R/W) EWIC Pendn Event Registers */ + uint32_t RESERVED2[112U]; + __IM uint32_t EWIC_PSR; /*!< Offset: 0x600 (R/ ) EWIC Pend Summary Register */ +} EWIC_Type; + +/** \brief EWIC Control Register Definitions */ +#define EWIC_EWIC_CR_EN_Pos 0U /*!< EWIC EWIC_CR: EN Position */ +#define EWIC_EWIC_CR_EN_Msk (1UL /*<< EWIC_EWIC_CR_EN_Pos*/) /*!< EWIC EWIC_CR: EN Mask */ + +/** \brief EWIC Automatic Sequence Control Register Definitions */ +#define EWIC_EWIC_ASCR_ASPU_Pos 1U /*!< EWIC EWIC_ASCR: ASPU Position */ +#define EWIC_EWIC_ASCR_ASPU_Msk (1UL << EWIC_EWIC_ASCR_ASPU_Pos) /*!< EWIC EWIC_ASCR: ASPU Mask */ + +#define EWIC_EWIC_ASCR_ASPD_Pos 0U /*!< EWIC EWIC_ASCR: ASPD Position */ +#define EWIC_EWIC_ASCR_ASPD_Msk (1UL /*<< EWIC_EWIC_ASCR_ASPD_Pos*/) /*!< EWIC EWIC_ASCR: ASPD Mask */ + +/** \brief EWIC Event Number ID Register Definitions */ +#define EWIC_EWIC_NUMID_NUMEVENT_Pos 0U /*!< EWIC_NUMID: NUMEVENT Position */ +#define EWIC_EWIC_NUMID_NUMEVENT_Msk (0xFFFFUL /*<< EWIC_EWIC_NUMID_NUMEVENT_Pos*/) /*!< EWIC_NUMID: NUMEVENT Mask */ + +/** \brief EWIC Mask A Register Definitions */ +#define EWIC_EWIC_MASKA_EDBGREQ_Pos 2U /*!< EWIC EWIC_MASKA: EDBGREQ Position */ +#define EWIC_EWIC_MASKA_EDBGREQ_Msk (1UL << EWIC_EWIC_MASKA_EDBGREQ_Pos) /*!< EWIC EWIC_MASKA: EDBGREQ Mask */ + +#define EWIC_EWIC_MASKA_NMI_Pos 1U /*!< EWIC EWIC_MASKA: NMI Position */ +#define EWIC_EWIC_MASKA_NMI_Msk (1UL << EWIC_EWIC_MASKA_NMI_Pos) /*!< EWIC EWIC_MASKA: NMI Mask */ + +#define EWIC_EWIC_MASKA_EVENT_Pos 0U /*!< EWIC EWIC_MASKA: EVENT Position */ +#define EWIC_EWIC_MASKA_EVENT_Msk (1UL /*<< EWIC_EWIC_MASKA_EVENT_Pos*/) /*!< EWIC EWIC_MASKA: EVENT Mask */ + +/** \brief EWIC Mask n Register Definitions */ +#define EWIC_EWIC_MASKn_IRQ_Pos 0U /*!< EWIC EWIC_MASKn: IRQ Position */ +#define EWIC_EWIC_MASKn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EWIC_MASKn_IRQ_Pos*/) /*!< EWIC EWIC_MASKn: IRQ Mask */ + +/** \brief EWIC Pend A Register Definitions */ +#define EWIC_EWIC_PENDA_EDBGREQ_Pos 2U /*!< EWIC EWIC_PENDA: EDBGREQ Position */ +#define EWIC_EWIC_PENDA_EDBGREQ_Msk (1UL << EWIC_EWIC_PENDA_EDBGREQ_Pos) /*!< EWIC EWIC_PENDA: EDBGREQ Mask */ + +#define EWIC_EWIC_PENDA_NMI_Pos 1U /*!< EWIC EWIC_PENDA: NMI Position */ +#define EWIC_EWIC_PENDA_NMI_Msk (1UL << EWIC_EWIC_PENDA_NMI_Pos) /*!< EWIC EWIC_PENDA: NMI Mask */ + +#define EWIC_EWIC_PENDA_EVENT_Pos 0U /*!< EWIC EWIC_PENDA: EVENT Position */ +#define EWIC_EWIC_PENDA_EVENT_Msk (1UL /*<< EWIC_EWIC_PENDA_EVENT_Pos*/) /*!< EWIC EWIC_PENDA: EVENT Mask */ + +/** \brief EWIC Pend n Register Definitions */ +#define EWIC_EWIC_PENDn_IRQ_Pos 0U /*!< EWIC EWIC_PENDn: IRQ Position */ +#define EWIC_EWIC_PENDn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EWIC_PENDn_IRQ_Pos*/) /*!< EWIC EWIC_PENDn: IRQ Mask */ + +/** \brief EWIC Pend Summary Register Definitions */ +#define EWIC_EWIC_PSR_NZ_Pos 1U /*!< EWIC EWIC_PSR: NZ Position */ +#define EWIC_EWIC_PSR_NZ_Msk (0x7FFFUL << EWIC_EWIC_PSR_NZ_Pos) /*!< EWIC EWIC_PSR: NZ Mask */ + +#define EWIC_EWIC_PSR_NZA_Pos 0U /*!< EWIC EWIC_PSR: NZA Position */ +#define EWIC_EWIC_PSR_NZA_Msk (1UL /*<< EWIC_EWIC_PSR_NZA_Pos*/) /*!< EWIC EWIC_PSR: NZA Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_ISA_Type External Wakeup Interrupt Controller (EWIC) interrupt status access registers + \brief Type definitions for the External Wakeup Interrupt Controller interrupt status access registers (EWIC_ISA) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller interrupt status access registers (EWIC_ISA). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/ ) Event Mask A Register */ + __IM uint32_t EVENTMASKn[15]; /*!< Offset: 0x084 (R/ ) Event Mask Register */ +} EWIC_ISA_Type; + +/** \brief EWIC_ISA Event Set Pending Register Definitions */ +#define EWIC_ISA_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC_ISA EVENTSPR: EDBGREQ Position */ +#define EWIC_ISA_EVENTSPR_EDBGREQ_Msk (1UL << EWIC_ISA_EVENTSPR_EDBGREQ_Pos) /*!< EWIC_ISA EVENTSPR: EDBGREQ Mask */ + +#define EWIC_ISA_EVENTSPR_NMI_Pos 1U /*!< EWIC_ISA EVENTSPR: NMI Position */ +#define EWIC_ISA_EVENTSPR_NMI_Msk (1UL << EWIC_ISA_EVENTSPR_NMI_Pos) /*!< EWIC_ISA EVENTSPR: NMI Mask */ + +#define EWIC_ISA_EVENTSPR_EVENT_Pos 0U /*!< EWIC_ISA EVENTSPR: EVENT Position */ +#define EWIC_ISA_EVENTSPR_EVENT_Msk (1UL /*<< EWIC_ISA_EVENTSPR_EVENT_Pos*/) /*!< EWIC_ISA EVENTSPR: EVENT Mask */ + +/** \brief EWIC_ISA Event Mask A Register Definitions */ +#define EWIC_ISA_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC_ISA EVENTMASKA: EDBGREQ Position */ +#define EWIC_ISA_EVENTMASKA_EDBGREQ_Msk (1UL << EWIC_ISA_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC_ISA EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_ISA_EVENTMASKA_NMI_Pos 1U /*!< EWIC_ISA EVENTMASKA: NMI Position */ +#define EWIC_ISA_EVENTMASKA_NMI_Msk (1UL << EWIC_ISA_EVENTMASKA_NMI_Pos) /*!< EWIC_ISA EVENTMASKA: NMI Mask */ + +#define EWIC_ISA_EVENTMASKA_EVENT_Pos 0U /*!< EWIC_ISA EVENTMASKA: EVENT Position */ +#define EWIC_ISA_EVENTMASKA_EVENT_Msk (1UL /*<< EWIC_ISA_EVENTMASKA_EVENT_Pos*/) /*!< EWIC_ISA EVENTMASKA: EVENT Mask */ + +/** \brief EWIC_ISA Event Mask n Register Definitions */ +#define EWIC_ISA_EVENTMASKn_IRQ_Pos 0U /*!< EWIC_ISA EVENTMASKn: IRQ Position */ +#define EWIC_ISA_EVENTMASKn_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_ISA_EVENTMASKn_IRQ_Pos*/) /*!< EWIC_ISA EVENTMASKn: IRQ Mask */ + +/*@}*/ /* end of group EWIC_ISA_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ +} ErrBnk_Type; + +/** \brief ErrBnk Instruction Cache Error Bank Register 0 Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/** \brief ErrBnk Instruction Cache Error Bank Register 1 Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/** \brief ErrBnk Data Cache Error Bank Register 0 Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/** \brief ErrBnk Data Cache Error Bank Register 1 Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/** \brief ErrBnk TCM Error Bank Register 0 Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 28U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 27U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x7UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/** \brief ErrBnk TCM Error Bank Register 1 Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 28U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 27U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x7UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/** \brief PrcCfgInf Processor Configuration Information Selection Register Definitions */ + +/** \brief PrcCfgInf Processor Configuration Information Read Data Register Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup STL_Type Software Test Library Observation Registers + \brief Type definitions for the Software Test Library Observation Registerss (STL) + @{ + */ + +/** + \brief Structure type to access the Software Test Library Observation Registerss (STL). + */ +typedef struct +{ + __IM uint32_t STLNVICPENDOR; /*!< Offset: 0x000 (R/ ) NVIC Pending Priority Tree Register */ + __IM uint32_t STLNVICACTVOR; /*!< Offset: 0x004 (R/ ) NVIC Active Priority Tree Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t STLIDMPUSR; /*!< Offset: 0x010 ( /W) MPU Sample Register */ + __IM uint32_t STLIMPUOR; /*!< Offset: 0x014 (R/ ) MPU Region Hit Register */ + __IM uint32_t STLD0MPUOR; /*!< Offset: 0x018 (R/ ) MPU Memory Attributes Register 0 */ + __IM uint32_t STLD1MPUOR; /*!< Offset: 0x01C (R/ ) MPU Memory Attributes Register 1 */ + __IM uint32_t STLD2MPUOR; /*!< Offset: 0x020 (R/ ) MPU Memory Attributes Register 2 */ + __IM uint32_t STLD3MPUOR; /*!< Offset: 0x024 (R/ ) MPU Memory Attributes Register 3 */ + __IOM uint32_t STLSTBSLOTSR; /*!< Offset: 0x028 (R/W) STB Control Register */ + __IOM uint32_t STLLFDENTRYSR; /*!< Offset: 0x02C (R/W) LFD Control Register */ +} STL_Type; + +/** \brief STL NVIC Pending Priority Tree Register Definitions */ +#define STL_STLNVICPENDOR_VALID_Pos 18U /*!< STL STLNVICPENDOR: VALID Position */ +#define STL_STLNVICPENDOR_VALID_Msk (1UL << STL_STLNVICPENDOR_VALID_Pos) /*!< STL STLNVICPENDOR: VALID Mask */ + +#define STL_STLNVICPENDOR_TARGET_Pos 17U /*!< STL STLNVICPENDOR: TARGET Position */ +#define STL_STLNVICPENDOR_TARGET_Msk (1UL << STL_STLNVICPENDOR_TARGET_Pos) /*!< STL STLNVICPENDOR: TARGET Mask */ + +#define STL_STLNVICPENDOR_PRIORITY_Pos 9U /*!< STL STLNVICPENDOR: PRIORITY Position */ +#define STL_STLNVICPENDOR_PRIORITY_Msk (0xFFUL << STL_STLNVICPENDOR_PRIORITY_Pos) /*!< STL STLNVICPENDOR: PRIORITY Mask */ + +#define STL_STLNVICPENDOR_INTNUM_Pos 0U /*!< STL STLNVICPENDOR: INTNUM Position */ +#define STL_STLNVICPENDOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICPENDOR_INTNUM_Pos*/) /*!< STL STLNVICPENDOR: INTNUM Mask */ + +/** \brief STL NVIC Active Priority Tree Register Definitions */ +#define STL_STLNVICACTVOR_VALID_Pos 18U /*!< STL STLNVICACTVOR: VALID Position */ +#define STL_STLNVICACTVOR_VALID_Msk (1UL << STL_STLNVICACTVOR_VALID_Pos) /*!< STL STLNVICACTVOR: VALID Mask */ + +#define STL_STLNVICACTVOR_TARGET_Pos 17U /*!< STL STLNVICACTVOR: TARGET Position */ +#define STL_STLNVICACTVOR_TARGET_Msk (1UL << STL_STLNVICACTVOR_TARGET_Pos) /*!< STL STLNVICACTVOR: TARGET Mask */ + +#define STL_STLNVICACTVOR_PRIORITY_Pos 9U /*!< STL STLNVICACTVOR: PRIORITY Position */ +#define STL_STLNVICACTVOR_PRIORITY_Msk (0xFFUL << STL_STLNVICACTVOR_PRIORITY_Pos) /*!< STL STLNVICACTVOR: PRIORITY Mask */ + +#define STL_STLNVICACTVOR_INTNUM_Pos 0U /*!< STL STLNVICACTVOR: INTNUM Position */ +#define STL_STLNVICACTVOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICACTVOR_INTNUM_Pos*/) /*!< STL STLNVICACTVOR: INTNUM Mask */ + +/** \brief STL MPU Sample Register Definitions */ +#define STL_STLIDMPUSR_ADDR_Pos 5U /*!< STL STLIDMPUSR: ADDR Position */ +#define STL_STLIDMPUSR_ADDR_Msk (0x7FFFFFFUL << STL_STLIDMPUSR_ADDR_Pos) /*!< STL STLIDMPUSR: ADDR Mask */ + +#define STL_STLIDMPUSR_INSTR_Pos 2U /*!< STL STLIDMPUSR: INSTR Position */ +#define STL_STLIDMPUSR_INSTR_Msk (1UL << STL_STLIDMPUSR_INSTR_Pos) /*!< STL STLIDMPUSR: INSTR Mask */ + +#define STL_STLIDMPUSR_DATA_Pos 1U /*!< STL STLIDMPUSR: DATA Position */ +#define STL_STLIDMPUSR_DATA_Msk (1UL << STL_STLIDMPUSR_DATA_Pos) /*!< STL STLIDMPUSR: DATA Mask */ + +/** \brief STL MPU Region Hit Register Definitions */ +#define STL_STLIMPUOR_HITREGION_Pos 9U /*!< STL STLIMPUOR: HITREGION Position */ +#define STL_STLIMPUOR_HITREGION_Msk (0xFFUL << STL_STLIMPUOR_HITREGION_Pos) /*!< STL STLIMPUOR: HITREGION Mask */ + +#define STL_STLIMPUOR_ATTR_Pos 0U /*!< STL STLIMPUOR: ATTR Position */ +#define STL_STLIMPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLIMPUOR_ATTR_Pos*/) /*!< STL STLIMPUOR: ATTR Mask */ + +/** \brief STL MPU Memory Attributes Register 0 Definitions */ +#define STL_STLD0MPUOR_HITREGION_Pos 9U /*!< STL STLD0MPUOR: HITREGION Position */ +#define STL_STLD0MPUOR_HITREGION_Msk (0xFFUL << STL_STLD0MPUOR_HITREGION_Pos) /*!< STL STLD0MPUOR: HITREGION Mask */ + +#define STL_STLD0MPUOR_ATTR_Pos 0U /*!< STL STLD0MPUOR: ATTR Position */ +#define STL_STLD0MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD0MPUOR_ATTR_Pos*/) /*!< STL STLD0MPUOR: ATTR Mask */ + +/** \brief STL MPU Memory Attributes Register 1 Definitions */ +#define STL_STLD1MPUOR_HITREGION_Pos 9U /*!< STL STLD1MPUOR: HITREGION Position */ +#define STL_STLD1MPUOR_HITREGION_Msk (0xFFUL << STL_STLD1MPUOR_HITREGION_Pos) /*!< STL STLD1MPUOR: HITREGION Mask */ + +#define STL_STLD1MPUOR_ATTR_Pos 0U /*!< STL STLD1MPUOR: ATTR Position */ +#define STL_STLD1MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD1MPUOR_ATTR_Pos*/) /*!< STL STLD1MPUOR: ATTR Mask */ + +/** \brief STL MPU Memory Attributes Register 2 Definitions */ +#define STL_STLD2MPUOR_HITREGION_Pos 9U /*!< STL STLD2MPUOR: HITREGION Position */ +#define STL_STLD2MPUOR_HITREGION_Msk (0xFFUL << STL_STLD2MPUOR_HITREGION_Pos) /*!< STL STLD2MPUOR: HITREGION Mask */ + +#define STL_STLD2MPUOR_ATTR_Pos 0U /*!< STL STLD2MPUOR: ATTR Position */ +#define STL_STLD2MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD2MPUOR_ATTR_Pos*/) /*!< STL STLD2MPUOR: ATTR Mask */ + +/** \brief STL MPU Memory Attributes Register 3 Definitions */ +#define STL_STLD3MPUOR_HITREGION_Pos 9U /*!< STL STLD3MPUOR: HITREGION Position */ +#define STL_STLD3MPUOR_HITREGION_Msk (0xFFUL << STL_STLD3MPUOR_HITREGION_Pos) /*!< STL STLD3MPUOR: HITREGION Mask */ + +#define STL_STLD3MPUOR_ATTR_Pos 0U /*!< STL STLD3MPUOR: ATTR Position */ +#define STL_STLD3MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD3MPUOR_ATTR_Pos*/) /*!< STL STLD3MPUOR: ATTR Mask */ + +/** \brief STL STB Control Register Definitions */ +#define STL_STLSTBSLOTSR_VALID_Pos 4U /*!< STL STLSTBSLOTSR: VALID Position */ +#define STL_STLSTBSLOTSR_VALID_Msk (1UL << STL_STLSTBSLOTSR_VALID_Pos) /*!< STL STLSTBSLOTSR: VALID Mask */ + +#define STL_STLSTBSLOTSR_STBSLOTNUM_Pos 0U /*!< STL STLSTBSLOTSR: STBSLOTNUM Position */ +#define STL_STLSTBSLOTSR_STBSLOTNUM_Msk (0xFUL /*<< STL_STLSTBSLOTSR_STBSLOTNUM_Pos*/) /*!< STL STLSTBSLOTSR: STBSLOTNUM Mask */ + +/** \brief STL LFD Control Register Definitions */ +#define STL_STLLFDENTRYSR_VALID_Pos 4U /*!< STL STLLFDENTRYSR: VALID Position */ +#define STL_STLLFDENTRYSR_VALID_Msk (1UL << STL_STLLFDENTRYSR_VALID_Pos) /*!< STL STLLFDENTRYSR: VALID Mask */ + +#define STL_STLLFDENTRYSR_LFDENTRYNUM_Pos 0U /*!< STL STLLFDENTRYSR: LFDENTRYNUM Position */ +#define STL_STLLFDENTRYSR_LFDENTRYNUM_Msk (0xFUL /*<< STL_STLLFDENTRYSR_LFDENTRYNUM_Pos*/) /*!< STL STLLFDENTRYSR: LFDENTRYNUM Mask */ +/*@}*/ /* end of group STL_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_FOnMan_Pos 6U /*!< TPIU FFCR: FOnMan Position */ +#define TPIU_FFCR_FOnMan_Msk (1UL << TPIU_FFCR_FOnMan_Pos) /*!< TPIU FFCR: FOnMan Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU Periodic Synchronization Control Register Definitions */ +#define TPIU_PSCR_PSCount_Pos 0U /*!< TPIU PSCR: PSCount Position */ +#define TPIU_PSCR_PSCount_Msk (0x1FUL /*<< TPIU_PSCR_PSCount_Pos*/) /*!< TPIU PSCR: TPSCount Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 0 Register Definitions */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD0: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD0: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPIU ITFTTD0: ATB Interface 1 data2 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPIU ITFTTD0: ATB Interface 1 data1 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPIU ITFTTD0: ATB Interface 1 data0 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPIU_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPIU ITFTTD0: ATB Interface 1 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 2 Register Definitions */ +#define TPIU_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID2S Position */ +#define TPIU_ITATBCTR2_AFVALID2S_Msk (1UL << TPIU_ITATBCTR2_AFVALID2S_Pos) /*!< TPIU ITATBCTR2: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID1S Position */ +#define TPIU_ITATBCTR2_AFVALID1S_Msk (1UL << TPIU_ITATBCTR2_AFVALID1S_Pos) /*!< TPIU ITATBCTR2: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2S Position */ +#define TPIU_ITATBCTR2_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2S Mask */ + +#define TPIU_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1S Position */ +#define TPIU_ITATBCTR2_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1S Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 1 Register Definitions */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD1: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD1: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPIU ITFTTD1: ATB Interface 2 data2 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPIU ITFTTD1: ATB Interface 2 data1 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPIU ITFTTD1: ATB Interface 2 data0 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPIU_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPIU ITFTTD1: ATB Interface 2 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 0 Definitions */ +#define TPIU_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID2S Position */ +#define TPIU_ITATBCTR0_AFVALID2S_Msk (1UL << TPIU_ITATBCTR0_AFVALID2S_Pos) /*!< TPIU ITATBCTR0: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID1S Position */ +#define TPIU_ITATBCTR0_AFVALID1S_Msk (1UL << TPIU_ITATBCTR0_AFVALID1S_Pos) /*!< TPIU ITATBCTR0: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2S Position */ +#define TPIU_ITATBCTR0_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2S Mask */ + +#define TPIU_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1S Position */ +#define TPIU_ITATBCTR0_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1S Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU Claim Tag Set Register Definitions */ +#define TPIU_CLAIMSET_SET_Pos 0U /*!< TPIU CLAIMSET: SET Position */ +#define TPIU_CLAIMSET_SET_Msk (0xFUL /*<< TPIU_CLAIMSET_SET_Pos*/) /*!< TPIU CLAIMSET: SET Mask */ + +/** \brief TPIU Claim Tag Clear Register Definitions */ +#define TPIU_CLAIMCLR_CLR_Pos 0U /*!< TPIU CLAIMCLR: CLR Position */ +#define TPIU_CLAIMCLR_CLR_Msk (0xFUL /*<< TPIU_CLAIMCLR_CLR_Pos*/) /*!< TPIU CLAIMCLR: CLR Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_FIFOSZ_Pos 6U /*!< TPIU DEVID: FIFOSZ Position */ +#define TPIU_DEVID_FIFOSZ_Msk (0x7UL << TPIU_DEVID_FIFOSZ_Pos) /*!< TPIU DEVID: FIFOSZ Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) Device Type Register */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + +/*@} end of group CMSIS_PMU */ +#endif + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/** \brief MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Mask */ + +/** \brief MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/** \brief MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/** \brief SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/** \brief SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/** \brief SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/** \brief SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/** \brief SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/** \brief SAU Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/** \brief FPU Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/** \brief FPU Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/** \brief FPU Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/** \brief FPU Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: Rounding modes bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: Rounding modes bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMD registers bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMD registers bits Mask */ + +/** \brief FPU Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: Fused MAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: Fused MAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/** \brief FPU Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/** \brief DCB Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/** \brief DCB Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/** \brief DCB Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + uint32_t RESERVED1[3U]; + __IM uint32_t DDEVTYPE; /*!< Offset: 0x01C (R/ ) SCS Device Type Register */ +} DIB_Type; + +/** \brief DIB Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/** \brief DIB SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/** \brief DIB SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_ISA_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller interrupt status access Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define STL_BASE (0xE001E800UL) /*!< Software Test Library Base Address */ + #define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ + #define EWIC_BASE (0xE0047000UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC_ISA ((EWIC_ISA_Type *) EWIC_ISA_BASE ) /*!< EWIC interrupt status access struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define STL ((STL_Type *) STL_BASE ) /*!< Software Test Library configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; + __OM uint32_t DSCEMCR; + __IOM uint32_t DAUTHCTRL; + __IOM uint32_t DSCSR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos DCB_DHCSR_S_RESTART_ST_Pos +#define CoreDebug_DHCSR_S_RESTART_ST_Msk DCB_DHCSR_S_RESTART_ST_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_FPD_Pos DCB_DHCSR_S_FPD_Pos +#define CoreDebug_DHCSR_S_FPD_Msk DCB_DHCSR_S_FPD_Msk + +#define CoreDebug_DHCSR_S_SUIDE_Pos DCB_DHCSR_S_SUIDE_Pos +#define CoreDebug_DHCSR_S_SUIDE_Msk DCB_DHCSR_S_SUIDE_Msk + +#define CoreDebug_DHCSR_S_NSUIDE_Pos DCB_DHCSR_S_NSUIDE_Pos +#define CoreDebug_DHCSR_S_NSUIDE_Msk DCB_DHCSR_S_NSUIDE_Msk + +#define CoreDebug_DHCSR_S_SDE_Pos DCB_DHCSR_S_SDE_Pos +#define CoreDebug_DHCSR_S_SDE_Msk DCB_DHCSR_S_SDE_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_PMOV_Pos DCB_DHCSR_C_PMOV_Pos +#define CoreDebug_DHCSR_C_PMOV_Msk DCB_DHCSR_C_PMOV_Msk + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos DCB_DHCSR_C_SNAPSTALL_Pos +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk DCB_DHCSR_C_SNAPSTALL_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_TRCENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_MON_REQ_Pos DCB_DEMCR_MON_REQ_Pos +#define CoreDebug_DEMCR_MON_REQ_Msk DCB_DEMCR_MON_REQ_Msk + +#define CoreDebug_DEMCR_MON_STEP_Pos DCB_DEMCR_MON_STEP_Pos +#define CoreDebug_DEMCR_MON_STEP_Msk DCB_DEMCR_MON_STEP_Msk + +#define CoreDebug_DEMCR_MON_PEND_Pos DCB_DEMCR_MON_PEND_Pos +#define CoreDebug_DEMCR_MON_PEND_Msk DCB_DEMCR_MON_PEND_Msk + +#define CoreDebug_DEMCR_MON_EN_Pos DCB_DEMCR_MON_EN_Pos +#define CoreDebug_DEMCR_MON_EN_Msk DCB_DEMCR_MON_EN_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_INTERR_Pos DCB_DEMCR_VC_INTERR_Pos +#define CoreDebug_DEMCR_VC_INTERR_Msk DCB_DEMCR_VC_INTERR_Msk + +#define CoreDebug_DEMCR_VC_BUSERR_Pos DCB_DEMCR_VC_BUSERR_Pos +#define CoreDebug_DEMCR_VC_BUSERR_Msk DCB_DEMCR_VC_BUSERR_Msk + +#define CoreDebug_DEMCR_VC_STATERR_Pos DCB_DEMCR_VC_STATERR_Pos +#define CoreDebug_DEMCR_VC_STATERR_Msk DCB_DEMCR_VC_STATERR_Msk + +#define CoreDebug_DEMCR_VC_CHKERR_Pos DCB_DEMCR_VC_CHKERR_Pos +#define CoreDebug_DEMCR_VC_CHKERR_Msk DCB_DEMCR_VC_CHKERR_Msk + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos DCB_DEMCR_VC_NOCPERR_Pos +#define CoreDebug_DEMCR_VC_NOCPERR_Msk DCB_DEMCR_VC_NOCPERR_Msk + +#define CoreDebug_DEMCR_VC_MMERR_Pos DCB_DEMCR_VC_MMERR_Pos +#define CoreDebug_DEMCR_VC_MMERR_Msk DCB_DEMCR_VC_MMERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos DCB_DSCEMCR_CLR_MON_REQ_Pos +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk DCB_DSCEMCR_CLR_MON_REQ_Msk + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos DCB_DSCEMCR_CLR_MON_PEND_Pos +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk DCB_DSCEMCR_CLR_MON_PEND_Msk + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos DCB_DSCEMCR_SET_MON_REQ_Pos +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk DCB_DSCEMCR_SET_MON_REQ_Msk + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos DCB_DSCEMCR_SET_MON_PEND_Pos +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk DCB_DSCEMCR_SET_MON_PEND_Msk + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos DCB_DAUTHCTRL_UIDEN_Pos +#define CoreDebug_DAUTHCTRL_UIDEN_Msk DCB_DAUTHCTRL_UIDEN_Msk + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos DCB_DAUTHCTRL_UIDAPEN_Pos +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk DCB_DAUTHCTRL_UIDAPEN_Msk + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos DCB_DAUTHCTRL_FSDMA_Pos +#define CoreDebug_DAUTHCTRL_FSDMA_Msk DCB_DAUTHCTRL_FSDMA_Msk + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos DCB_DAUTHCTRL_INTSPNIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk DCB_DAUTHCTRL_INTSPNIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos DCB_DAUTHCTRL_SPNIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk DCB_DAUTHCTRL_SPNIDENSEL_Msk + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos DCB_DAUTHCTRL_INTSPIDEN_Pos +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk DCB_DAUTHCTRL_INTSPIDEN_Msk + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos DCB_DAUTHCTRL_SPIDENSEL_Pos +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk DCB_DAUTHCTRL_SPIDENSEL_Msk + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos DCB_DSCSR_CDS_Pos +#define CoreDebug_DSCSR_CDS_Msk DCB_DSCSR_CDS_Msk + +#define CoreDebug_DSCSR_SBRSEL_Pos DCB_DSCSR_SBRSEL_Pos +#define CoreDebug_DSCSR_SBRSEL_Msk DCB_DSCSR_SBRSEL_Msk + +#define CoreDebug_DSCSR_SBRSELEN_Pos DCB_DSCSR_SBRSELEN_Pos +#define CoreDebug_DSCSR_SBRSELEN_Msk DCB_DSCSR_SBRSELEN_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define CoreDebug_NS ((CoreDebug_Type *) DCB_BASE_NS) +#endif + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + + #include "m-profile/armv8m_mpu.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "m-profile/armv8m_pmu.h" + +/** + \brief Cortex-M85 PMU events + \note Architectural PMU events can be found in armv8m_pmu.h +*/ + +#define ARMCM85_PMU_ECC_ERR 0xC000 /*!< One or more Error Correcting Code (ECC) errors detected */ +#define ARMCM85_PMU_ECC_ERR_MBIT 0xC001 /*!< One or more multi-bit ECC errors detected */ +#define ARMCM85_PMU_ECC_ERR_DCACHE 0xC010 /*!< One or more ECC errors in the data cache */ +#define ARMCM85_PMU_ECC_ERR_ICACHE 0xC011 /*!< One or more ECC errors in the instruction cache */ +#define ARMCM85_PMU_ECC_ERR_MBIT_DCACHE 0xC012 /*!< One or more multi-bit ECC errors in the data cache */ +#define ARMCM85_PMU_ECC_ERR_MBIT_ICACHE 0xC013 /*!< One or more multi-bit ECC errors in the instruction cache */ +#define ARMCM85_PMU_ECC_ERR_DTCM 0xC020 /*!< One or more ECC errors in the Data Tightly Coupled Memory (DTCM) */ +#define ARMCM85_PMU_ECC_ERR_ITCM 0xC021 /*!< One or more ECC errors in the Instruction Tightly Coupled Memory (ITCM) */ +#define ARMCM85_PMU_ECC_ERR_MBIT_DTCM 0xC022 /*!< One or more multi-bit ECC errors in the DTCM */ +#define ARMCM85_PMU_ECC_ERR_MBIT_ITCM 0xC023 /*!< One or more multi-bit ECC errors in the ITCM */ +#define ARMCM85_PMU_PF_LINEFILL 0xC100 /*!< The prefetcher starts a line-fill */ +#define ARMCM85_PMU_PF_CANCEL 0xC101 /*!< The prefetcher stops prefetching */ +#define ARMCM85_PMU_PF_DROP_LINEFILL 0xC102 /*!< A linefill triggered by a prefetcher has been dropped because of lack of buffering */ +#define ARMCM85_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM85_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM85_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM85_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access on the P-AHB write interface */ +#define ARMCM85_PMU_AXI_WRITE_ACCESS 0xC302 /*!< Any beat access to M-AXI write interface */ +#define ARMCM85_PMU_AXI_READ_ACCESS 0xC303 /*!< Any beat access to M-AXI read interface */ +#define ARMCM85_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM85_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ +#define ARMCM85_PMU_FUSED_INST_RETIRED 0xC500 /*!< Fused instructions architecturally executed */ +#define ARMCM85_PMU_BR_INDIRECT 0xC501 /*!< Indirect branch instruction architecturally executed */ +#define ARMCM85_PMU_BTAC_HIT 0xC502 /*!< BTAC branch predictor hit */ +#define ARMCM85_PMU_BTAC_HIT_RETURNS 0xC503 /*!< Return branch hits BTAC */ +#define ARMCM85_PMU_BTAC_HIT_CALLS 0xC504 /*!< Call branch hits BTAC */ +#define ARMCM85_PMU_BTAC_HIT_INDIRECT 0xC505 /*!< Indirect branch hits BTACT */ +#define ARMCM85_PMU_BTAC_NEW_ALLOC 0xC506 /*!< New allocation to BTAC */ +#define ARMCM85_PMU_BR_IND_MIS_PRED 0xC507 /*!< Indirect branch mis-predicted */ +#define ARMCM85_PMU_BR_RETURN_MIS_PRED 0xC508 /*!< Return branch mis-predicted */ +#define ARMCM85_PMU_BR_BTAC_OFFSET_OVERFLOW 0xC509 /*!< Branch does not allocate in BTAC due to offset overflow */ +#define ARMCM85_PMU_STB_FULL_STALL_AXI 0xC50A /*!< STore Buffer (STB) full with AXI requests causing CPU to stall */ +#define ARMCM85_PMU_STB_FULL_STALL_TCM 0xC50B /*!< STB full with TCM requests causing CPU to stall */ +#define ARMCM85_PMU_CPU_STALLED_AHBS 0xC50C /*!< CPU is stalled because TCM access through AHBS */ +#define ARMCM85_PMU_AHBS_STALLED_CPU 0xC50D /*!< AHBS is stalled due to TCM access by CPU */ +#define ARMCM85_PMU_BR_INTERSTATING_MIS_PRED 0xC50E /*!< Inter-stating branch is mis-predicted. */ +#define ARMCM85_PMU_DWT_STALL 0xC50F /*!< Data Watchpoint and Trace (DWT) stall */ +#define ARMCM85_PMU_DWT_FLUSH 0xC510 /*!< DWT flush */ +#define ARMCM85_PMU_ETM_STALL 0xC511 /*!< Embedded Trace Macrocell (ETM) stall */ +#define ARMCM85_PMU_ETM_FLUSH 0xC512 /*!< ETM flush */ +#define ARMCM85_PMU_ADDRESS_BANK_CONFLICT 0xC513 /*!< Bank conflict prevents memory instruction dual issue */ +#define ARMCM85_PMU_BLOCKED_DUAL_ISSUE 0xC514 /*!< Dual instruction issuing is prevented */ +#define ARMCM85_PMU_FP_CONTEXT_TRIGGER 0xC515 /*!< Floating Point Context is created */ +#define ARMCM85_PMU_TAIL_CHAIN 0xC516 /*!< New exception is handled without first unstacking */ +#define ARMCM85_PMU_LATE_ARRIVAL 0xC517 /*!< Late-arriving exception taken during exception entry */ +#define ARMCM85_PMU_INT_STALL_FAULT 0xC518 /*!< Delayed exception entry due to ongoing fault processing */ +#define ARMCM85_PMU_INT_STALL_DEV 0xC519 /*!< Delayed exception entry due to outstanding device access */ +#define ARMCM85_PMU_PAC_STALL 0xC51A /*!< Stall caused by authentication code computation */ +#define ARMCM85_PMU_PAC_RETIRED 0xC51B /*!< PAC instruction architecturally executed */ +#define ARMCM85_PMU_AUT_RETIRED 0xC51C /*!< AUT instruction architecturally executed */ +#define ARMCM85_PMU_BTI_RETIRED 0xC51D /*!< BTI instruction architecturally executed */ +#define ARMCM85_PMU_PF_NL_MODE 0xC51E /*!< Prefetch in next line mode */ +#define ARMCM85_PMU_PF_STREAM_MODE 0xC51F /*!< Prefetch in stream mode */ +#define ARMCM85_PMU_PF_BUFF_CACHE_HIT 0xC520 /*!< Prefetch request that hit in the cache */ +#define ARMCM85_PMU_PF_REQ_LFB_HIT 0xC521 /*!< Prefetch request that hit in line fill buffers */ +#define ARMCM85_PMU_PF_BUFF_FULL 0xC522 /*!< Number of times prefetch buffer is full */ +#define ARMCM85_PMU_PF_REQ_DCACHE_HIT 0xC523 /*!< Generated prefetch request address that hit in D-Cache */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) + #include "m-profile/armv7m_cachel1.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + +/* ################### PAC Key functions ########################### */ + +#if (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) +#include "m-profile/armv81m_pac.h" +#endif + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM85_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_sc000.h b/src/third_party/cmsis/CMSIS/Core/Include/core_sc000.h new file mode 100644 index 0000000..9bf2159 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_sc000.h @@ -0,0 +1,1055 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS SC000 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_SC000_H_GENERIC +#define __CORE_SC000_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC000 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS SC000 definitions */ + +#define __CORTEX_SC (000U) /*!< Cortex Secure Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC000_H_DEPENDANT +#define __CORE_SC000_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC000_REV + #define __SC000_REV 0x0000U + #warning "__SC000_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC000 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IPR[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + uint32_t RESERVED1[154U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/** \brief SCnSCB Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/** \brief MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief SC000 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the SC000 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for SC000 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for SC000 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for SC000 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M0 and M0+ do not require the architectural barrier - assume SC000 is the same */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "m-profile/armv7m_mpu.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_sc300.h b/src/third_party/cmsis/CMSIS/Core/Include/core_sc300.h new file mode 100644 index 0000000..13d6b9b --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_sc300.h @@ -0,0 +1,2028 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS SC300 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_SC300_H_GENERIC +#define __CORE_SC300_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC3000 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* CMSIS SC300 definitions */ + +#define __CORTEX_SC (300U) /*!< Cortex Secure Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC300_H_DEPENDANT +#define __CORE_SC300_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC300_REV + #define __SC300_REV 0x0000U + #warning "__SC300_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC300 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IPR[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED1[129U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/** \brief SCnSCB Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/** \brief SCnSCB Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Lock Status Register */ +} ITM_Type; + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Lock Status Register Definitions */ +#define ITM_LSR_BYTEACC_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_BYTEACC_Msk (1UL << ITM_LSR_BYTEACC_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_ACCESS_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_ACCESS_Msk (1UL << ITM_LSR_ACCESS_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_PRESENT_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_PRESENT_Msk (1UL /*<< ITM_LSR_PRESENT_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration ETM Data Register Definitions (FIFO0) */ +#define TPIU_FIFO0_ITM_ATVALID_Pos 29U /*!< TPIU FIFO0: ITM_ATVALID Position */ +#define TPIU_FIFO0_ITM_ATVALID_Msk (1UL << TPIU_FIFO0_ITM_ATVALID_Pos) /*!< TPIU FIFO0: ITM_ATVALID Mask */ + +#define TPIU_FIFO0_ITM_bytecount_Pos 27U /*!< TPIU FIFO0: ITM_bytecount Position */ +#define TPIU_FIFO0_ITM_bytecount_Msk (0x3UL << TPIU_FIFO0_ITM_bytecount_Pos) /*!< TPIU FIFO0: ITM_bytecount Mask */ + +#define TPIU_FIFO0_ETM_ATVALID_Pos 26U /*!< TPIU FIFO0: ETM_ATVALID Position */ +#define TPIU_FIFO0_ETM_ATVALID_Msk (1UL << TPIU_FIFO0_ETM_ATVALID_Pos) /*!< TPIU FIFO0: ETM_ATVALID Mask */ + +#define TPIU_FIFO0_ETM_bytecount_Pos 24U /*!< TPIU FIFO0: ETM_bytecount Position */ +#define TPIU_FIFO0_ETM_bytecount_Msk (0x3UL << TPIU_FIFO0_ETM_bytecount_Pos) /*!< TPIU FIFO0: ETM_bytecount Mask */ + +#define TPIU_FIFO0_ETM2_Pos 16U /*!< TPIU FIFO0: ETM2 Position */ +#define TPIU_FIFO0_ETM2_Msk (0xFFUL << TPIU_FIFO0_ETM2_Pos) /*!< TPIU FIFO0: ETM2 Mask */ + +#define TPIU_FIFO0_ETM1_Pos 8U /*!< TPIU FIFO0: ETM1 Position */ +#define TPIU_FIFO0_ETM1_Msk (0xFFUL << TPIU_FIFO0_ETM1_Pos) /*!< TPIU FIFO0: ETM1 Mask */ + +#define TPIU_FIFO0_ETM0_Pos 0U /*!< TPIU FIFO0: ETM0 Position */ +#define TPIU_FIFO0_ETM0_Msk (0xFFUL /*<< TPIU_FIFO0_ETM0_Pos*/) /*!< TPIU FIFO0: ETM0 Mask */ + +/** \brief TPIU ITATBCTR2 Register Definitions */ +#define TPIU_ITATBCTR2_ATREADY2_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2 Position */ +#define TPIU_ITATBCTR2_ATREADY2_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2 Mask */ + +#define TPIU_ITATBCTR2_ATREADY1_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1 Position */ +#define TPIU_ITATBCTR2_ATREADY1_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1 Mask */ + +/** \brief TPIU Integration ITM Data Register Definitions (FIFO1) */ +#define TPIU_FIFO1_ITM_ATVALID_Pos 29U /*!< TPIU FIFO1: ITM_ATVALID Position */ +#define TPIU_FIFO1_ITM_ATVALID_Msk (1UL << TPIU_FIFO1_ITM_ATVALID_Pos) /*!< TPIU FIFO1: ITM_ATVALID Mask */ + +#define TPIU_FIFO1_ITM_bytecount_Pos 27U /*!< TPIU FIFO1: ITM_bytecount Position */ +#define TPIU_FIFO1_ITM_bytecount_Msk (0x3UL << TPIU_FIFO1_ITM_bytecount_Pos) /*!< TPIU FIFO1: ITM_bytecount Mask */ + +#define TPIU_FIFO1_ETM_ATVALID_Pos 26U /*!< TPIU FIFO1: ETM_ATVALID Position */ +#define TPIU_FIFO1_ETM_ATVALID_Msk (1UL << TPIU_FIFO1_ETM_ATVALID_Pos) /*!< TPIU FIFO1: ETM_ATVALID Mask */ + +#define TPIU_FIFO1_ETM_bytecount_Pos 24U /*!< TPIU FIFO1: ETM_bytecount Position */ +#define TPIU_FIFO1_ETM_bytecount_Msk (0x3UL << TPIU_FIFO1_ETM_bytecount_Pos) /*!< TPIU FIFO1: ETM_bytecount Mask */ + +#define TPIU_FIFO1_ITM2_Pos 16U /*!< TPIU FIFO1: ITM2 Position */ +#define TPIU_FIFO1_ITM2_Msk (0xFFUL << TPIU_FIFO1_ITM2_Pos) /*!< TPIU FIFO1: ITM2 Mask */ + +#define TPIU_FIFO1_ITM1_Pos 8U /*!< TPIU FIFO1: ITM1 Position */ +#define TPIU_FIFO1_ITM1_Msk (0xFFUL << TPIU_FIFO1_ITM1_Pos) /*!< TPIU FIFO1: ITM1 Mask */ + +#define TPIU_FIFO1_ITM0_Pos 0U /*!< TPIU FIFO1: ITM0 Position */ +#define TPIU_FIFO1_ITM0_Msk (0xFFUL /*<< TPIU_FIFO1_ITM0_Pos*/) /*!< TPIU FIFO1: ITM0 Mask */ + +/** \brief TPIU ITATBCTR0 Register Definitions */ +#define TPIU_ITATBCTR0_ATREADY2_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2 Position */ +#define TPIU_ITATBCTR0_ATREADY2_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2 Mask */ + +#define TPIU_ITATBCTR0_ATREADY1_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1 Position */ +#define TPIU_ITATBCTR0_ATREADY1_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1 Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_MinBufSz_Pos 6U /*!< TPIU DEVID: MinBufSz Position */ +#define TPIU_DEVID_MinBufSz_Msk (0x7UL << TPIU_DEVID_MinBufSz_Pos) /*!< TPIU DEVID: MinBufSz Mask */ + +#define TPIU_DEVID_AsynClkIn_Pos 5U /*!< TPIU DEVID: AsynClkIn Position */ +#define TPIU_DEVID_AsynClkIn_Msk (1UL << TPIU_DEVID_AsynClkIn_Pos) /*!< TPIU DEVID: AsynClkIn Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/** \brief MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ +#define DCB_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPIU ((TPIU_Type *) TPIU_BASE ) /*!< TPIU configuration struct */ +#define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + +/** + \defgroup CMSIS_deprecated_aliases Backwards Compatibility Aliases + \brief Alias definitions present for backwards compatibility for deprecated symbols. + @{ + */ + +#ifndef CMSIS_DISABLE_DEPRECATED + +#define SCB_AIRCR_ENDIANESS_Pos SCB_AIRCR_ENDIANNESS_Pos +#define SCB_AIRCR_ENDIANESS_Msk SCB_AIRCR_ENDIANNESS_Msk + +/* deprecated, CMSIS_5 backward compatibility */ +typedef struct +{ + __IOM uint32_t DHCSR; + __OM uint32_t DCRSR; + __IOM uint32_t DCRDR; + __IOM uint32_t DEMCR; +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos DCB_DHCSR_DBGKEY_Pos +#define CoreDebug_DHCSR_DBGKEY_Msk DCB_DHCSR_DBGKEY_Msk + +#define CoreDebug_DHCSR_S_RESET_ST_Pos DCB_DHCSR_S_RESET_ST_Pos +#define CoreDebug_DHCSR_S_RESET_ST_Msk DCB_DHCSR_S_RESET_ST_Msk + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos DCB_DHCSR_S_RETIRE_ST_Pos +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk DCB_DHCSR_S_RETIRE_ST_Msk + +#define CoreDebug_DHCSR_S_LOCKUP_Pos DCB_DHCSR_S_LOCKUP_Pos +#define CoreDebug_DHCSR_S_LOCKUP_Msk DCB_DHCSR_S_LOCKUP_Msk + +#define CoreDebug_DHCSR_S_SLEEP_Pos DCB_DHCSR_S_SLEEP_Pos +#define CoreDebug_DHCSR_S_SLEEP_Msk DCB_DHCSR_S_SLEEP_Msk + +#define CoreDebug_DHCSR_S_HALT_Pos DCB_DHCSR_S_HALT_Pos +#define CoreDebug_DHCSR_S_HALT_Msk DCB_DHCSR_S_HALT_Msk + +#define CoreDebug_DHCSR_S_REGRDY_Pos DCB_DHCSR_S_REGRDY_Pos +#define CoreDebug_DHCSR_S_REGRDY_Msk DCB_DHCSR_S_REGRDY_Msk + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos DCB_DHCSR_C_SNAPSTALL_Pos +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk DCB_DHCSR_C_SNAPSTALL_Msk + +#define CoreDebug_DHCSR_C_MASKINTS_Pos DCB_DHCSR_C_MASKINTS_Pos +#define CoreDebug_DHCSR_C_MASKINTS_Msk DCB_DHCSR_C_MASKINTS_Msk + +#define CoreDebug_DHCSR_C_STEP_Pos DCB_DHCSR_C_STEP_Pos +#define CoreDebug_DHCSR_C_STEP_Msk DCB_DHCSR_C_STEP_Msk + +#define CoreDebug_DHCSR_C_HALT_Pos DCB_DHCSR_C_HALT_Pos +#define CoreDebug_DHCSR_C_HALT_Msk DCB_DHCSR_C_HALT_Msk + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos DCB_DHCSR_C_DEBUGEN_Pos +#define CoreDebug_DHCSR_C_DEBUGEN_Msk DCB_DHCSR_C_DEBUGEN_Msk + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos DCB_DCRSR_REGWnR_Pos +#define CoreDebug_DCRSR_REGWnR_Msk DCB_DCRSR_REGWnR_Msk + +#define CoreDebug_DCRSR_REGSEL_Pos DCB_DCRSR_REGSEL_Pos +#define CoreDebug_DCRSR_REGSEL_Msk DCB_DCRSR_REGSEL_Msk + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos DCB_DEMCR_TRCENA_Pos +#define CoreDebug_DEMCR_TRCENA_Msk DCB_DEMCR_TRCENA_Msk + +#define CoreDebug_DEMCR_MON_REQ_Pos DCB_DEMCR_MON_REQ_Pos +#define CoreDebug_DEMCR_MON_REQ_Msk DCB_DEMCR_MON_REQ_Msk + +#define CoreDebug_DEMCR_MON_STEP_Pos DCB_DEMCR_MON_STEP_Pos +#define CoreDebug_DEMCR_MON_STEP_Msk DCB_DEMCR_MON_STEP_Msk + +#define CoreDebug_DEMCR_MON_PEND_Pos DCB_DEMCR_MON_PEND_Pos +#define CoreDebug_DEMCR_MON_PEND_Msk DCB_DEMCR_MON_PEND_Msk + +#define CoreDebug_DEMCR_MON_EN_Pos DCB_DEMCR_MON_EN_Pos +#define CoreDebug_DEMCR_MON_EN_Msk DCB_DEMCR_MON_EN_Msk + +#define CoreDebug_DEMCR_VC_HARDERR_Pos DCB_DEMCR_VC_HARDERR_Pos +#define CoreDebug_DEMCR_VC_HARDERR_Msk DCB_DEMCR_VC_HARDERR_Msk + +#define CoreDebug_DEMCR_VC_INTERR_Pos DCB_DEMCR_VC_INTERR_Pos +#define CoreDebug_DEMCR_VC_INTERR_Msk DCB_DEMCR_VC_INTERR_Msk + +#define CoreDebug_DEMCR_VC_BUSERR_Pos DCB_DEMCR_VC_BUSERR_Pos +#define CoreDebug_DEMCR_VC_BUSERR_Msk DCB_DEMCR_VC_BUSERR_Msk + +#define CoreDebug_DEMCR_VC_STATERR_Pos DCB_DEMCR_VC_STATERR_Pos +#define CoreDebug_DEMCR_VC_STATERR_Msk DCB_DEMCR_VC_STATERR_Msk + +#define CoreDebug_DEMCR_VC_CHKERR_Pos DCB_DEMCR_VC_CHKERR_Pos +#define CoreDebug_DEMCR_VC_CHKERR_Msk DCB_DEMCR_VC_CHKERR_Msk + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos DCB_DEMCR_VC_NOCPERR_Pos +#define CoreDebug_DEMCR_VC_NOCPERR_Msk DCB_DEMCR_VC_NOCPERR_Msk + +#define CoreDebug_DEMCR_VC_MMERR_Pos DCB_DEMCR_VC_MMERR_Pos +#define CoreDebug_DEMCR_VC_MMERR_Msk DCB_DEMCR_VC_MMERR_Msk + +#define CoreDebug_DEMCR_VC_CORERESET_Pos DCB_DEMCR_VC_CORERESET_Pos +#define CoreDebug_DEMCR_VC_CORERESET_Msk DCB_DEMCR_VC_CORERESET_Msk + +#define CoreDebug ((CoreDebug_Type *) DCB_BASE) + +#endif // CMSIS_DISABLE_DEPRECATED + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M3 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "m-profile/armv7m_mpu.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/core_starmc1.h b/src/third_party/cmsis/CMSIS/Core/Include/core_starmc1.h new file mode 100644 index 0000000..0bac12b --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/core_starmc1.h @@ -0,0 +1,3673 @@ +/* + * Copyright (c) 2009-2024 Arm Limited. + * Copyright (c) 2018-2022 Arm China. + * All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS ArmChina STAR-MC1 Core Peripheral Access Layer Header File + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_STAR_H_GENERIC +#define __CORE_STAR_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup STAR-MC1 + @{ + */ + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_version.h" + +/* Macro Define for STAR-MC1 */ + +#define __STAR_MC (1U) /*!< STAR-MC Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ti__) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "third_party/cmsis/CMSIS/Core/Include/cmsis_compiler.h" + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_STAR_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_STAR_H_DEPENDANT +#define __CORE_STAR_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __STAR_REV + #define __STAR_REV 0x0000U + #warning "__STAR_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group STAR-MC1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for STAR-MC1 processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/** \brief APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/** \brief IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/** \brief xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/** \brief CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/** \brief NVIC Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED_ADD1[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: F00-D00=0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ +} SCB_Type; + +typedef struct +{ + __IOM uint32_t CACR; /*!< Offset: 0x0 (R/W) L1 Cache Control Register */ + uint32_t RESERVED0[3U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x10 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x14 (R/W) Data Tightly-Coupled Memory Control Registers */ +} EMSS_Type; + +/** \brief SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/** \brief SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/** \brief SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/** \brief SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANNESS_Pos 15U /*!< SCB AIRCR: ENDIANNESS Position */ +#define SCB_AIRCR_ENDIANNESS_Msk (1UL << SCB_AIRCR_ENDIANNESS_Pos) /*!< SCB AIRCR: ENDIANNESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/** \brief SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/** \brief SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/** \brief SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/** \brief SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/** \brief SCB MemManage Fault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/** \brief SCB BusFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/** \brief SCB UsageFault Status Register Definitions (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/** \brief SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/** \brief SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/** \brief SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/** \brief SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +#define SCB_CLIDR_IC_Pos 0U /*!< SCB CLIDR: IC Position */ +#define SCB_CLIDR_IC_Msk (1UL << SCB_CLIDR_IC_Pos) /*!< SCB CLIDR: IC Mask */ + +#define SCB_CLIDR_DC_Pos 1U /*!< SCB CLIDR: DC Position */ +#define SCB_CLIDR_DC_Msk (1UL << SCB_CLIDR_DC_Pos) /*!< SCB CLIDR: DC Mask */ + +/** \brief SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/** \brief SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/** \brief SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/** \brief SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/** \brief SCB D-Cache line Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_LEVEL_Pos 1U /*!< SCB DCISW: Level Position */ +#define SCB_DCISW_LEVEL_Msk (7UL << SCB_DCISW_LEVEL_Pos) /*!< SCB DCISW: Level Mask */ + +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0xFFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/** \brief SCB D-Cache Clean line by Set-way Register Definitions */ +#define SCB_DCCSW_LEVEL_Pos 1U /*!< SCB DCCSW: Level Position */ +#define SCB_DCCSW_LEVEL_Msk (7UL << SCB_DCCSW_LEVEL_Pos) /*!< SCB DCCSW: Level Mask */ + +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0xFFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/** \brief SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_LEVEL_Pos 1U /*!< SCB DCCISW: Level Position */ +#define SCB_DCCISW_LEVEL_Msk (7UL << SCB_DCCISW_LEVEL_Pos) /*!< SCB DCCISW: Level Mask */ + +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0xFFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* ArmChina: Implementation Defined */ +/** \brief Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/** \brief Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/** \brief L1 Cache Control Register Definitions */ +#define SCB_CACR_DCCLEAN_Pos 16U /*!< SCB CACR: DCCLEAN Position */ +#define SCB_CACR_DCCLEAN_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: DCCLEAN Mask */ + +#define SCB_CACR_ICACTIVE_Pos 13U /*!< SCB CACR: ICACTIVE Position */ +#define SCB_CACR_ICACTIVE_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: ICACTIVE Mask */ + +#define SCB_CACR_DCACTIVE_Pos 12U /*!< SCB CACR: DCACTIVE Position */ +#define SCB_CACR_DCACTIVE_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: DCACTIVE Mask */ + +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/** \brief SCnSCB Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/** \brief SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/** \brief SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/** \brief SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/** \brief SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} ITM_Type; + +/** \brief ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/** \brief ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/** \brief ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/** \brief ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/** \brief DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/** \brief DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/** \brief DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/** \brief DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/** \brief DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/** \brief DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/** \brief DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_BPU Breakpoint Unit (BPU) + \brief Type definitions for the Breakpoint Unit (BPU) + @{ + */ + +/** + \brief Structure type to access the Breakpoint Unit Register (BPU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED1; + __IOM uint32_t COMP0; /*!< Offset: 0x008 (R/W) Comparator Register 0 */ + __IOM uint32_t COMP1; /*!< Offset: 0x00C (R/W) Comparator Register 1 */ + __IOM uint32_t COMP2; /*!< Offset: 0x010 (R/W) Comparator Register 2 */ + __IOM uint32_t COMP3; /*!< Offset: 0x014 (R/W) Comparator Register 3 */ + __IOM uint32_t COMP4; /*!< Offset: 0x018 (R/W) Comparator Register 0 */ + __IOM uint32_t COMP5; /*!< Offset: 0x01C (R/W) Comparator Register 0 */ + __IOM uint32_t COMP6; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t COMP7; /*!< Offset: 0x024 (R/W) Comparator Register 0 */ + uint32_t RESERVED2[997]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Register */ + uint32_t RESERVED3[3]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Architecture Register */ +} BPU_Type; + +/** \brief BPU Control Register Definitions */ +#define BPU_CTRL_REV_Pos 28U /*!< BPU CTRL: REV Position */ +#define BPU_CTRL_REV_Msk (0xFUL << BPU_CTRL_REV_Pos) /*!< BPU CTRL: REV Mask */ + +#define BPU_CTRL_NUM_CODE_H_Pos 12U /*!< BPU CTRL: NUM_CODE_H Position */ +#define BPU_CTRL_NUM_CODE_H_Msk (0x7UL << BPU_CTRL_NUM_CODE_H_Pos) /*!< BPU CTRL: NUM_CODE_H Mask */ + +#define BPU_CTRL_NUM_LIT_Pos 8U /*!< BPU CTRL: NUM_LIT Position */ +#define BPU_CTRL_NUM_LIT_Msk (0xFUL << BPU_CTRL_NUM_LIT_Pos) /*!< BPU CTRL: NUM_LIT Mask */ + +#define BPU_CTRL_NUM_CODE_L_Pos 4U /*!< BPU CTRL: NUM_CODE_L Position */ +#define BPU_CTRL_NUM_CODE_L_Msk (0xFUL << BPU_CTRL_NUM_CODE_L_Pos) /*!< BPU CTRL: NUM_CODE_L Mask */ + +#define BPU_CTRL_KEY_Pos 1U /*!< BPU CTRL: KEY Position */ +#define BPU_CTRL_KEY_Msk (0x1UL << BPU_CTRL_KEY_Pos) /*!< BPU CTRL: KEY Mask */ + +#define BPU_CTRL_ENABLE_Pos 0U /*!< BPU CTRL: ENABLE Position */ +#define BPU_CTRL_ENABLE_Msk (0x1UL << BPU_CTRL_ENABLE_Pos) /*!< BPU CTRL: ENABLE Mask */ + +/** \brief BPU Comparator Register Definitions */ +#define BPU_COMP_BPADDR_Pos 1U /*!< BPU COMP: BPADDR Position */ +#define BPU_COMP_BPADDR_Msk (0x7FFFFFFFUL << BPU_COMP_BPADDR_Pos) /*!< BPU COMP: BPADDR Mask */ + +#define BPU_COMP_BE_Pos 0U /*!< BPU COMP: BE Position */ +#define BPU_COMP_BE_Msk (0x1UL << BPU_COMP_BE_Pos) /*!< BPU COMP: BE Mask */ + +/*@}*/ /* end of group CMSIS_BPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPIU Trace Port Interface Unit (TPIU) + \brief Type definitions for the Trace Port Interface Unit (TPIU) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Unit Register (TPIU). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPIU_Type; + +/** \brief TPIU Asynchronous Clock Prescaler Register Definitions */ +#define TPIU_ACPR_PRESCALER_Pos 0U /*!< TPIU ACPR: PRESCALER Position */ +#define TPIU_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPIU_ACPR_PRESCALER_Pos*/) /*!< TPIU ACPR: PRESCALER Mask */ + +/** \brief TPIU Selected Pin Protocol Register Definitions */ +#define TPIU_SPPR_TXMODE_Pos 0U /*!< TPIU SPPR: TXMODE Position */ +#define TPIU_SPPR_TXMODE_Msk (0x3UL /*<< TPIU_SPPR_TXMODE_Pos*/) /*!< TPIU SPPR: TXMODE Mask */ + +/** \brief TPIU Formatter and Flush Status Register Definitions */ +#define TPIU_FFSR_FtNonStop_Pos 3U /*!< TPIU FFSR: FtNonStop Position */ +#define TPIU_FFSR_FtNonStop_Msk (1UL << TPIU_FFSR_FtNonStop_Pos) /*!< TPIU FFSR: FtNonStop Mask */ + +#define TPIU_FFSR_TCPresent_Pos 2U /*!< TPIU FFSR: TCPresent Position */ +#define TPIU_FFSR_TCPresent_Msk (1UL << TPIU_FFSR_TCPresent_Pos) /*!< TPIU FFSR: TCPresent Mask */ + +#define TPIU_FFSR_FtStopped_Pos 1U /*!< TPIU FFSR: FtStopped Position */ +#define TPIU_FFSR_FtStopped_Msk (1UL << TPIU_FFSR_FtStopped_Pos) /*!< TPIU FFSR: FtStopped Mask */ + +#define TPIU_FFSR_FlInProg_Pos 0U /*!< TPIU FFSR: FlInProg Position */ +#define TPIU_FFSR_FlInProg_Msk (1UL /*<< TPIU_FFSR_FlInProg_Pos*/) /*!< TPIU FFSR: FlInProg Mask */ + +/** \brief TPIU Formatter and Flush Control Register Definitions */ +#define TPIU_FFCR_TrigIn_Pos 8U /*!< TPIU FFCR: TrigIn Position */ +#define TPIU_FFCR_TrigIn_Msk (1UL << TPIU_FFCR_TrigIn_Pos) /*!< TPIU FFCR: TrigIn Mask */ + +#define TPIU_FFCR_FOnMan_Pos 6U /*!< TPIU FFCR: FOnMan Position */ +#define TPIU_FFCR_FOnMan_Msk (1UL << TPIU_FFCR_FOnMan_Pos) /*!< TPIU FFCR: FOnMan Mask */ + +#define TPIU_FFCR_EnFCont_Pos 1U /*!< TPIU FFCR: EnFCont Position */ +#define TPIU_FFCR_EnFCont_Msk (1UL << TPIU_FFCR_EnFCont_Pos) /*!< TPIU FFCR: EnFCont Mask */ + +/** \brief TPIU Periodic Synchronization Control Register Definitions */ +#define TPIU_PSCR_PSCount_Pos 0U /*!< TPIU PSCR: PSCount Position */ +#define TPIU_PSCR_PSCount_Msk (0x1FUL /*<< TPIU_PSCR_PSCount_Pos*/) /*!< TPIU PSCR: TPSCount Mask */ + +/** \brief TPIU TRIGGER Register Definitions */ +#define TPIU_TRIGGER_TRIGGER_Pos 0U /*!< TPIU TRIGGER: TRIGGER Position */ +#define TPIU_TRIGGER_TRIGGER_Msk (1UL /*<< TPIU_TRIGGER_TRIGGER_Pos*/) /*!< TPIU TRIGGER: TRIGGER Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 0 Register Definitions */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPIU_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD0: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD0: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPIU ITFTTD0: ATB Interface 1 data2 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPIU ITFTTD0: ATB Interface 1 data1 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPIU_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPIU ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPIU_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPIU ITFTTD0: ATB Interface 1 data0 Position */ +#define TPIU_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPIU_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPIU ITFTTD0: ATB Interface 1 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 2 Register Definitions */ +#define TPIU_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID2S Position */ +#define TPIU_ITATBCTR2_AFVALID2S_Msk (1UL << TPIU_ITATBCTR2_AFVALID2S_Pos) /*!< TPIU ITATBCTR2: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR2: AFVALID1S Position */ +#define TPIU_ITATBCTR2_AFVALID1S_Msk (1UL << TPIU_ITATBCTR2_AFVALID1S_Pos) /*!< TPIU ITATBCTR2: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY2S Position */ +#define TPIU_ITATBCTR2_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY2S Mask */ + +#define TPIU_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR2: ATREADY1S Position */ +#define TPIU_ITATBCTR2_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR2: ATREADY1S Mask */ + +/** \brief TPIU Integration Test FIFO Test Data 1 Register Definitions */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPIU ITFTTD1: ATB Interface 2 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPIU_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPIU ITFTTD1: ATB Interface 1 byte count Position */ +#define TPIU_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPIU_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPIU ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPIU ITFTTD1: ATB Interface 2 data2 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPIU ITFTTD1: ATB Interface 2 data1 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPIU_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPIU ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPIU_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPIU ITFTTD1: ATB Interface 2 data0 Position */ +#define TPIU_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPIU_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPIU ITFTTD1: ATB Interface 2 data0 Mask */ + +/** \brief TPIU Integration Test ATB Control Register 0 Definitions */ +#define TPIU_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID2S Position */ +#define TPIU_ITATBCTR0_AFVALID2S_Msk (1UL << TPIU_ITATBCTR0_AFVALID2S_Pos) /*!< TPIU ITATBCTR0: AFVALID2SS Mask */ + +#define TPIU_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPIU ITATBCTR0: AFVALID1S Position */ +#define TPIU_ITATBCTR0_AFVALID1S_Msk (1UL << TPIU_ITATBCTR0_AFVALID1S_Pos) /*!< TPIU ITATBCTR0: AFVALID1SS Mask */ + +#define TPIU_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY2S Position */ +#define TPIU_ITATBCTR0_ATREADY2S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY2S Mask */ + +#define TPIU_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPIU ITATBCTR0: ATREADY1S Position */ +#define TPIU_ITATBCTR0_ATREADY1S_Msk (1UL /*<< TPIU_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPIU ITATBCTR0: ATREADY1S Mask */ + +/** \brief TPIU Integration Mode Control Register Definitions */ +#define TPIU_ITCTRL_Mode_Pos 0U /*!< TPIU ITCTRL: Mode Position */ +#define TPIU_ITCTRL_Mode_Msk (0x3UL /*<< TPIU_ITCTRL_Mode_Pos*/) /*!< TPIU ITCTRL: Mode Mask */ + +/** \brief TPIU DEVID Register Definitions */ +#define TPIU_DEVID_NRZVALID_Pos 11U /*!< TPIU DEVID: NRZVALID Position */ +#define TPIU_DEVID_NRZVALID_Msk (1UL << TPIU_DEVID_NRZVALID_Pos) /*!< TPIU DEVID: NRZVALID Mask */ + +#define TPIU_DEVID_MANCVALID_Pos 10U /*!< TPIU DEVID: MANCVALID Position */ +#define TPIU_DEVID_MANCVALID_Msk (1UL << TPIU_DEVID_MANCVALID_Pos) /*!< TPIU DEVID: MANCVALID Mask */ + +#define TPIU_DEVID_PTINVALID_Pos 9U /*!< TPIU DEVID: PTINVALID Position */ +#define TPIU_DEVID_PTINVALID_Msk (1UL << TPIU_DEVID_PTINVALID_Pos) /*!< TPIU DEVID: PTINVALID Mask */ + +#define TPIU_DEVID_FIFOSZ_Pos 6U /*!< TPIU DEVID: FIFOSZ Position */ +#define TPIU_DEVID_FIFOSZ_Msk (0x7UL << TPIU_DEVID_FIFOSZ_Pos) /*!< TPIU DEVID: FIFOSZ Mask */ + +#define TPIU_DEVID_NrTraceInput_Pos 0U /*!< TPIU DEVID: NrTraceInput Position */ +#define TPIU_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPIU_DEVID_NrTraceInput_Pos*/) /*!< TPIU DEVID: NrTraceInput Mask */ + +/** \brief TPIU DEVTYPE Register Definitions */ +#define TPIU_DEVTYPE_SubType_Pos 4U /*!< TPIU DEVTYPE: SubType Position */ +#define TPIU_DEVTYPE_SubType_Msk (0xFUL /*<< TPIU_DEVTYPE_SubType_Pos*/) /*!< TPIU DEVTYPE: SubType Mask */ + +#define TPIU_DEVTYPE_MajorType_Pos 0U /*!< TPIU DEVTYPE: MajorType Position */ +#define TPIU_DEVTYPE_MajorType_Msk (0xFUL << TPIU_DEVTYPE_MajorType_Pos) /*!< TPIU DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPIU */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/** \brief MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/** \brief MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/** \brief MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/** \brief MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/** \brief MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Mask */ + +/** \brief MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/** \brief MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/** \brief SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/** \brief SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/** \brief SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/** \brief SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/** \brief SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/** \brief SAU Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/** \brief FPU Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/** \brief FPU Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/** \brief FPU Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/** \brief FPU Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: Rounding modes bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: Rounding modes bits Mask */ + +#define FPU_MVFR0_FPShortvec_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_FPShortvec_Msk (0xFUL << FPU_MVFR0_FPShortvec_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPExceptrap_Pos 12U /*!< MVFR0: Exception trapping bits Position */ +#define FPU_MVFR0_FPExceptrap_Msk (0xFUL << FPU_MVFR0_FPExceptrap_Pos) /*!< MVFR0: Exception trapping bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMD registers bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMD registers bits Mask */ + +/** \brief FPU Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: Fused MAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: Fused MAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/** \brief FPU Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/** \brief DCB Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/** \brief DCB Debug Core Register Selector Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/** \brief DCB Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/** \brief DCB Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/** \brief DCB Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/** \brief DCB Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/** \brief DIB SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/** \brief DIB SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/** \brief DIB Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/** \brief DIB SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/** \brief DIB SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define BPU_BASE (0xE0002000UL) /*!< BPU Base Address */ + #define TPIU_BASE (0xE0040000UL) /*!< TPIU Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define EMSS_BASE (0xE001E000UL) /*!AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *) ((uintptr_t) SCB->VTOR); + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/** + \brief Software Reset + \details Initiates a system reset request to reset the CPU. + */ +__NO_RETURN __STATIC_INLINE void __SW_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses including + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_BFHFNMINS_Msk) | /* Keep BFHFNMINS unchanged. Use this Reset function in case your case need to keep it */ + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | /* Keep priority group unchanged */ + SCB_AIRCR_SYSRESETREQ_Msk ); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + + #include "m-profile/armv8m_mpu.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) + +/* ########################## Cache functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_CacheFunctions Cache Functions + \brief Functions that configure Instruction and Data cache. + @{ + */ + +/* Cache Size ID Register Macros */ +#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + +#define __SCB_DCACHE_LINE_SIZE 32U /*!< STAR-MC1 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#define __SCB_ICACHE_LINE_SIZE 32U /*!< STAR-MC1 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */ + + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief I-Cache Invalidate by address + \details Invalidates I-Cache for the given address. + I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + I-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] isize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if ( isize > 0 ) { + int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_ICACHE_LINE_SIZE; + op_size -= __SCB_ICACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */ + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address. + D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned and invalidated. + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + +/*@} end of CMSIS_Core_CacheFunctions */ +#endif + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_STAR_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/cmsis/CMSIS/Core/Include/tz_context.h b/src/third_party/cmsis/CMSIS/Core/Include/tz_context.h new file mode 100644 index 0000000..e095956 --- /dev/null +++ b/src/third_party/cmsis/CMSIS/Core/Include/tz_context.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * CMSIS Core(M) Context Management for Armv8-M TrustZone + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef TZ_CONTEXT_H +#define TZ_CONTEXT_H + +#include + +#ifndef TZ_MODULEID_T +#define TZ_MODULEID_T +/// \details Data type that identifies secure software modules called by a process. +typedef uint32_t TZ_ModuleId_t; +#endif + +/// \details TZ Memory ID identifies an allocated memory slot. +typedef uint32_t TZ_MemoryId_t; + +/// Initialize secure context memory system +/// \return execution status (1: success, 0: error) +uint32_t TZ_InitContextSystem_S (void); + +/// Allocate context memory for calling secure software modules in TrustZone +/// \param[in] module identifies software modules called from non-secure mode +/// \return value != 0 id TrustZone memory slot identifier +/// \return value 0 no memory available or internal error +TZ_MemoryId_t TZ_AllocModuleContext_S (TZ_ModuleId_t module); + +/// Free context memory that was previously allocated with \ref TZ_AllocModuleContext_S +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_FreeModuleContext_S (TZ_MemoryId_t id); + +/// Load secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_LoadContext_S (TZ_MemoryId_t id); + +/// Store secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_StoreContext_S (TZ_MemoryId_t id); + +#endif // TZ_CONTEXT_H diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/arm_common_tables.h b/src/third_party/cmsis/CMSIS/DSP/Include/arm_common_tables.h deleted file mode 100644 index 40b351b..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/arm_common_tables.h +++ /dev/null @@ -1,529 +0,0 @@ -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: arm_common_tables.h - * Description: Extern declaration for common tables - * - * $Date: 27. January 2017 - * $Revision: V.1.5.1 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _ARM_COMMON_TABLES_H -#define _ARM_COMMON_TABLES_H - -#include "arm_math_types.h" -#include "dsp/fast_math_functions.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) - /* Double Precision Float CFFT twiddles */ - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREV_1024) - extern const uint16_t armBitRevTable[1024]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_16) - extern const uint64_t twiddleCoefF64_16[32]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_32) - extern const uint64_t twiddleCoefF64_32[64]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_64) - extern const uint64_t twiddleCoefF64_64[128]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_128) - extern const uint64_t twiddleCoefF64_128[256]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_256) - extern const uint64_t twiddleCoefF64_256[512]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_512) - extern const uint64_t twiddleCoefF64_512[1024]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_1024) - extern const uint64_t twiddleCoefF64_1024[2048]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_2048) - extern const uint64_t twiddleCoefF64_2048[4096]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_4096) - extern const uint64_t twiddleCoefF64_4096[8192]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_16) - extern const float32_t twiddleCoef_16[32]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_32) - extern const float32_t twiddleCoef_32[64]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_64) - extern const float32_t twiddleCoef_64[128]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_128) - extern const float32_t twiddleCoef_128[256]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_256) - extern const float32_t twiddleCoef_256[512]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_512) - extern const float32_t twiddleCoef_512[1024]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_1024) - extern const float32_t twiddleCoef_1024[2048]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_2048) - extern const float32_t twiddleCoef_2048[4096]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_4096) - extern const float32_t twiddleCoef_4096[8192]; - #define twiddleCoef twiddleCoef_4096 - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - /* Q31 */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_16) - extern const q31_t twiddleCoef_16_q31[24]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_32) - extern const q31_t twiddleCoef_32_q31[48]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_64) - extern const q31_t twiddleCoef_64_q31[96]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_128) - extern const q31_t twiddleCoef_128_q31[192]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_256) - extern const q31_t twiddleCoef_256_q31[384]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_512) - extern const q31_t twiddleCoef_512_q31[768]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_1024) - extern const q31_t twiddleCoef_1024_q31[1536]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_2048) - extern const q31_t twiddleCoef_2048_q31[3072]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_4096) - extern const q31_t twiddleCoef_4096_q31[6144]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_16) - extern const q15_t twiddleCoef_16_q15[24]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_32) - extern const q15_t twiddleCoef_32_q15[48]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_64) - extern const q15_t twiddleCoef_64_q15[96]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_128) - extern const q15_t twiddleCoef_128_q15[192]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_256) - extern const q15_t twiddleCoef_256_q15[384]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_512) - extern const q15_t twiddleCoef_512_q15[768]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_1024) - extern const q15_t twiddleCoef_1024_q15[1536]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_2048) - extern const q15_t twiddleCoef_2048_q15[3072]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_4096) - extern const q15_t twiddleCoef_4096_q15[6144]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - /* Double Precision Float RFFT twiddles */ - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_32) - extern const uint64_t twiddleCoefF64_rfft_32[32]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_64) - extern const uint64_t twiddleCoefF64_rfft_64[64]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_128) - extern const uint64_t twiddleCoefF64_rfft_128[128]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_256) - extern const uint64_t twiddleCoefF64_rfft_256[256]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_512) - extern const uint64_t twiddleCoefF64_rfft_512[512]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_1024) - extern const uint64_t twiddleCoefF64_rfft_1024[1024]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_2048) - extern const uint64_t twiddleCoefF64_rfft_2048[2048]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_4096) - extern const uint64_t twiddleCoefF64_rfft_4096[4096]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_32) - extern const float32_t twiddleCoef_rfft_32[32]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_64) - extern const float32_t twiddleCoef_rfft_64[64]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_128) - extern const float32_t twiddleCoef_rfft_128[128]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_256) - extern const float32_t twiddleCoef_rfft_256[256]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_512) - extern const float32_t twiddleCoef_rfft_512[512]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_1024) - extern const float32_t twiddleCoef_rfft_1024[1024]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_2048) - extern const float32_t twiddleCoef_rfft_2048[2048]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_4096) - extern const float32_t twiddleCoef_rfft_4096[4096]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - - /* Double precision floating-point bit reversal tables */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_16) - #define ARMBITREVINDEXTABLEF64_16_TABLE_LENGTH ((uint16_t)12) - extern const uint16_t armBitRevIndexTableF64_16[ARMBITREVINDEXTABLEF64_16_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_32) - #define ARMBITREVINDEXTABLEF64_32_TABLE_LENGTH ((uint16_t)24) - extern const uint16_t armBitRevIndexTableF64_32[ARMBITREVINDEXTABLEF64_32_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_64) - #define ARMBITREVINDEXTABLEF64_64_TABLE_LENGTH ((uint16_t)56) - extern const uint16_t armBitRevIndexTableF64_64[ARMBITREVINDEXTABLEF64_64_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_128) - #define ARMBITREVINDEXTABLEF64_128_TABLE_LENGTH ((uint16_t)112) - extern const uint16_t armBitRevIndexTableF64_128[ARMBITREVINDEXTABLEF64_128_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_256) - #define ARMBITREVINDEXTABLEF64_256_TABLE_LENGTH ((uint16_t)240) - extern const uint16_t armBitRevIndexTableF64_256[ARMBITREVINDEXTABLEF64_256_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_512) - #define ARMBITREVINDEXTABLEF64_512_TABLE_LENGTH ((uint16_t)480) - extern const uint16_t armBitRevIndexTableF64_512[ARMBITREVINDEXTABLEF64_512_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_1024) - #define ARMBITREVINDEXTABLEF64_1024_TABLE_LENGTH ((uint16_t)992) - extern const uint16_t armBitRevIndexTableF64_1024[ARMBITREVINDEXTABLEF64_1024_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_2048) - #define ARMBITREVINDEXTABLEF64_2048_TABLE_LENGTH ((uint16_t)1984) - extern const uint16_t armBitRevIndexTableF64_2048[ARMBITREVINDEXTABLEF64_2048_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_4096) - #define ARMBITREVINDEXTABLEF64_4096_TABLE_LENGTH ((uint16_t)4032) - extern const uint16_t armBitRevIndexTableF64_4096[ARMBITREVINDEXTABLEF64_4096_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - /* floating-point bit reversal tables */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_16) - #define ARMBITREVINDEXTABLE_16_TABLE_LENGTH ((uint16_t)20) - extern const uint16_t armBitRevIndexTable16[ARMBITREVINDEXTABLE_16_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_32) - #define ARMBITREVINDEXTABLE_32_TABLE_LENGTH ((uint16_t)48) - extern const uint16_t armBitRevIndexTable32[ARMBITREVINDEXTABLE_32_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_64) - #define ARMBITREVINDEXTABLE_64_TABLE_LENGTH ((uint16_t)56) - extern const uint16_t armBitRevIndexTable64[ARMBITREVINDEXTABLE_64_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_128) - #define ARMBITREVINDEXTABLE_128_TABLE_LENGTH ((uint16_t)208) - extern const uint16_t armBitRevIndexTable128[ARMBITREVINDEXTABLE_128_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_256) - #define ARMBITREVINDEXTABLE_256_TABLE_LENGTH ((uint16_t)440) - extern const uint16_t armBitRevIndexTable256[ARMBITREVINDEXTABLE_256_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_512) - #define ARMBITREVINDEXTABLE_512_TABLE_LENGTH ((uint16_t)448) - extern const uint16_t armBitRevIndexTable512[ARMBITREVINDEXTABLE_512_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_1024) - #define ARMBITREVINDEXTABLE_1024_TABLE_LENGTH ((uint16_t)1800) - extern const uint16_t armBitRevIndexTable1024[ARMBITREVINDEXTABLE_1024_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_2048) - #define ARMBITREVINDEXTABLE_2048_TABLE_LENGTH ((uint16_t)3808) - extern const uint16_t armBitRevIndexTable2048[ARMBITREVINDEXTABLE_2048_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_4096) - #define ARMBITREVINDEXTABLE_4096_TABLE_LENGTH ((uint16_t)4032) - extern const uint16_t armBitRevIndexTable4096[ARMBITREVINDEXTABLE_4096_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - - /* fixed-point bit reversal tables */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_16) - #define ARMBITREVINDEXTABLE_FIXED_16_TABLE_LENGTH ((uint16_t)12) - extern const uint16_t armBitRevIndexTable_fixed_16[ARMBITREVINDEXTABLE_FIXED_16_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_32) - #define ARMBITREVINDEXTABLE_FIXED_32_TABLE_LENGTH ((uint16_t)24) - extern const uint16_t armBitRevIndexTable_fixed_32[ARMBITREVINDEXTABLE_FIXED_32_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_64) - #define ARMBITREVINDEXTABLE_FIXED_64_TABLE_LENGTH ((uint16_t)56) - extern const uint16_t armBitRevIndexTable_fixed_64[ARMBITREVINDEXTABLE_FIXED_64_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_128) - #define ARMBITREVINDEXTABLE_FIXED_128_TABLE_LENGTH ((uint16_t)112) - extern const uint16_t armBitRevIndexTable_fixed_128[ARMBITREVINDEXTABLE_FIXED_128_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_256) - #define ARMBITREVINDEXTABLE_FIXED_256_TABLE_LENGTH ((uint16_t)240) - extern const uint16_t armBitRevIndexTable_fixed_256[ARMBITREVINDEXTABLE_FIXED_256_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_512) - #define ARMBITREVINDEXTABLE_FIXED_512_TABLE_LENGTH ((uint16_t)480) - extern const uint16_t armBitRevIndexTable_fixed_512[ARMBITREVINDEXTABLE_FIXED_512_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_1024) - #define ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH ((uint16_t)992) - extern const uint16_t armBitRevIndexTable_fixed_1024[ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_2048) - #define ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH ((uint16_t)1984) - extern const uint16_t armBitRevIndexTable_fixed_2048[ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_4096) - #define ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH ((uint16_t)4032) - extern const uint16_t armBitRevIndexTable_fixed_4096[ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_F32) - extern const float32_t realCoefA[8192]; - extern const float32_t realCoefB[8192]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_Q31) - extern const q31_t realCoefAQ31[8192]; - extern const q31_t realCoefBQ31[8192]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_Q15) - extern const q15_t realCoefAQ15[8192]; - extern const q15_t realCoefBQ15[8192]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_128) - extern const float32_t Weights_128[256]; - extern const float32_t cos_factors_128[128]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_512) - extern const float32_t Weights_512[1024]; - extern const float32_t cos_factors_512[512]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_2048) - extern const float32_t Weights_2048[4096]; - extern const float32_t cos_factors_2048[2048]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_8192) - extern const float32_t Weights_8192[16384]; - extern const float32_t cos_factors_8192[8192]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_128) - extern const q15_t WeightsQ15_128[256]; - extern const q15_t cos_factorsQ15_128[128]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_512) - extern const q15_t WeightsQ15_512[1024]; - extern const q15_t cos_factorsQ15_512[512]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_2048) - extern const q15_t WeightsQ15_2048[4096]; - extern const q15_t cos_factorsQ15_2048[2048]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_8192) - extern const q15_t WeightsQ15_8192[16384]; - extern const q15_t cos_factorsQ15_8192[8192]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_128) - extern const q31_t WeightsQ31_128[256]; - extern const q31_t cos_factorsQ31_128[128]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_512) - extern const q31_t WeightsQ31_512[1024]; - extern const q31_t cos_factorsQ31_512[512]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_2048) - extern const q31_t WeightsQ31_2048[4096]; - extern const q31_t cos_factorsQ31_2048[2048]; - #endif - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_8192) - extern const q31_t WeightsQ31_8192[16384]; - extern const q31_t cos_factorsQ31_8192[8192]; - #endif - -#endif /* if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_TABLES) */ - -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FAST_ALLOW_TABLES) - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_RECIP_Q15) - extern const q15_t armRecipTableQ15[64]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_RECIP_Q31) - extern const q31_t armRecipTableQ31[64]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ - - /* Tables for Fast Math Sine and Cosine */ - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_F32) - extern const float32_t sinTable_f32[FAST_MATH_TABLE_SIZE + 1]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_Q31) - extern const q31_t sinTable_q31[FAST_MATH_TABLE_SIZE + 1]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ - - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_Q15) - extern const q15_t sinTable_q15[FAST_MATH_TABLE_SIZE + 1]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ - - #if defined(ARM_MATH_MVEI) - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE) - extern const q31_t sqrtTable_Q31[256]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ - #endif - - #if defined(ARM_MATH_MVEI) - #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE) - extern const q15_t sqrtTable_Q15[256]; - #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ - #endif - -#endif /* if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FAST_TABLES) */ - -#if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE) - extern const float32_t exp_tab[8]; - extern const float32_t __logf_lut_f32[8]; -#endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE) */ - -#if (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) -extern const unsigned char hwLUT[256]; -#endif /* (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) */ - -#ifdef __cplusplus -} -#endif - -#endif /* ARM_COMMON_TABLES_H */ - diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/arm_helium_utils.h b/src/third_party/cmsis/CMSIS/DSP/Include/arm_helium_utils.h deleted file mode 100755 index 915ad7c..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/arm_helium_utils.h +++ /dev/null @@ -1,748 +0,0 @@ -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: arm_helium_utils.h - * Description: Utility functions for Helium development - * - * $Date: 09. September 2019 - * $Revision: V.1.5.1 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _ARM_UTILS_HELIUM_H_ -#define _ARM_UTILS_HELIUM_H_ - - -#ifdef __cplusplus -extern "C" -{ -#endif -/*************************************** - -Definitions available for MVEF and MVEI - -***************************************/ -#if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) - -#define INACTIVELANE 0 /* inactive lane content */ - - -#endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) */ - -/*************************************** - -Definitions available for MVEF only - -***************************************/ -#if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) - -__STATIC_FORCEINLINE float32_t vecAddAcrossF32Mve(float32x4_t in) -{ - float32_t acc; - - acc = vgetq_lane(in, 0) + vgetq_lane(in, 1) + - vgetq_lane(in, 2) + vgetq_lane(in, 3); - - return acc; -} - -__STATIC_FORCEINLINE float16_t vecAddAcrossF16Mve(float16x8_t in) -{ - float16x8_t tmpVec; - _Float16 acc; - - tmpVec = (float16x8_t) vrev32q_s16((int16x8_t) in); - in = vaddq_f16(tmpVec, in); - tmpVec = (float16x8_t) vrev64q_s32((int32x4_t) in); - in = vaddq_f16(tmpVec, in); - acc = (_Float16)vgetq_lane_f16(in, 0) + (_Float16)vgetq_lane_f16(in, 4); - - return acc; -} - - -/* newton initial guess */ -#define INVSQRT_MAGIC_F32 0x5f3759df -#define INV_NEWTON_INIT_F32 0x7EF127EA - - -#define INVSQRT_NEWTON_MVE_F32(invSqrt, xHalf, xStart)\ -{ \ - float32x4_t tmp; \ - \ - /* tmp = xhalf * x * x */ \ - tmp = vmulq(xStart, xStart); \ - tmp = vmulq(tmp, xHalf); \ - /* (1.5f - xhalf * x * x) */ \ - tmp = vsubq(vdupq_n_f32(1.5f), tmp); \ - /* x = x*(1.5f-xhalf*x*x); */ \ - invSqrt = vmulq(tmp, xStart); \ -} -#endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) */ - - -/*************************************** - -Definitions available for f16 datatype with HW acceleration only - -***************************************/ -#if defined (ARM_MATH_MVE_FLOAT16) -__STATIC_FORCEINLINE float16x8_t __mve_cmplx_sum_intra_vec_f16( - float16x8_t vecIn) -{ - float16x8_t vecTmp, vecOut; - uint32_t tmp; - - vecTmp = (float16x8_t) vrev64q_s32((int32x4_t) vecIn); - // TO TRACK : using canonical addition leads to unefficient code generation for f16 - // vecTmp = vecTmp + vecAccCpx0; - /* - * Compute - * re0+re1 | im0+im1 | re0+re1 | im0+im1 - * re2+re3 | im2+im3 | re2+re3 | im2+im3 - */ - vecTmp = vaddq(vecTmp, vecIn); - vecOut = vecTmp; - /* - * shift left, random tmp insertion in bottom - */ - vecOut = vreinterpretq_f16_s32(vshlcq_s32(vreinterpretq_s32_f16(vecOut) , &tmp, 32)); - /* - * Compute: - * DONTCARE | DONTCARE | re0+re1+re0+re1 |im0+im1+im0+im1 - * re0+re1+re2+re3 | im0+im1+im2+im3 | re2+re3+re2+re3 |im2+im3+im2+im3 - */ - vecOut = vaddq(vecOut, vecTmp); - /* - * Cmplx sum is in 4rd & 5th f16 elt - * return full vector - */ - return vecOut; -} - - -#define mve_cmplx_sum_intra_r_i_f16(vec, Re, Im) \ -{ \ - float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vec); \ - Re = vgetq_lane(vecOut, 4); \ - Im = vgetq_lane(vecOut, 5); \ -} - -__STATIC_FORCEINLINE void mve_cmplx_sum_intra_vec_f16( - float16x8_t vecIn, - float16_t *pOut) -{ - float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vecIn); - /* - * Cmplx sum is in 4rd & 5th f16 elt - * use 32-bit extraction - */ - *(float32_t *) pOut = ((float32x4_t) vecOut)[2]; -} - - -#define INVSQRT_MAGIC_F16 0x59ba /* ( 0x1ba = 0x3759df >> 13) */ - -/* canonical version of INVSQRT_NEWTON_MVE_F16 leads to bad performance */ -#define INVSQRT_NEWTON_MVE_F16(invSqrt, xHalf, xStart) \ -{ \ - float16x8_t tmp; \ - \ - /* tmp = xhalf * x * x */ \ - tmp = vmulq(xStart, xStart); \ - tmp = vmulq(tmp, xHalf); \ - /* (1.5f - xhalf * x * x) */ \ - tmp = vsubq(vdupq_n_f16((float16_t)1.5), tmp); \ - /* x = x*(1.5f-xhalf*x*x); */ \ - invSqrt = vmulq(tmp, xStart); \ -} - -#endif - -/*************************************** - -Definitions available for MVEI and MVEF only - -***************************************/ -#if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) -/* Following functions are used to transpose matrix in f32 and q31 cases */ -__STATIC_INLINE arm_status arm_mat_trans_32bit_2x2_mve( - uint32_t * pDataSrc, - uint32_t * pDataDest) -{ - static const uint32x4_t vecOffs = { 0, 2, 1, 3 }; - /* - * - * | 0 1 | => | 0 2 | - * | 2 3 | | 1 3 | - * - */ - uint32x4_t vecIn = vldrwq_u32((uint32_t const *)pDataSrc); - vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs, vecIn); - - return (ARM_MATH_SUCCESS); -} - -__STATIC_INLINE arm_status arm_mat_trans_32bit_3x3_mve( - uint32_t * pDataSrc, - uint32_t * pDataDest) -{ - const uint32x4_t vecOffs1 = { 0, 3, 6, 1}; - const uint32x4_t vecOffs2 = { 4, 7, 2, 5}; - /* - * - * | 0 1 2 | | 0 3 6 | 4 x 32 flattened version | 0 3 6 1 | - * | 3 4 5 | => | 1 4 7 | => | 4 7 2 5 | - * | 6 7 8 | | 2 5 8 | (row major) | 8 . . . | - * - */ - uint32x4_t vecIn1 = vldrwq_u32((uint32_t const *) pDataSrc); - uint32x4_t vecIn2 = vldrwq_u32((uint32_t const *) &pDataSrc[4]); - - vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs1, vecIn1); - vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs2, vecIn2); - - pDataDest[8] = pDataSrc[8]; - - return (ARM_MATH_SUCCESS); -} - -__STATIC_INLINE arm_status arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc, uint32_t * pDataDest) -{ - /* - * 4x4 Matrix transposition - * is 4 x de-interleave operation - * - * 0 1 2 3 0 4 8 12 - * 4 5 6 7 1 5 9 13 - * 8 9 10 11 2 6 10 14 - * 12 13 14 15 3 7 11 15 - */ - - uint32x4x4_t vecIn; - - vecIn = vld4q((uint32_t const *) pDataSrc); - vstrwq(pDataDest, vecIn.val[0]); - pDataDest += 4; - vstrwq(pDataDest, vecIn.val[1]); - pDataDest += 4; - vstrwq(pDataDest, vecIn.val[2]); - pDataDest += 4; - vstrwq(pDataDest, vecIn.val[3]); - - return (ARM_MATH_SUCCESS); -} - - -__STATIC_INLINE arm_status arm_mat_trans_32bit_generic_mve( - uint16_t srcRows, - uint16_t srcCols, - uint32_t * pDataSrc, - uint32_t * pDataDest) -{ - uint32x4_t vecOffs; - uint32_t i; - uint32_t blkCnt; - uint32_t const *pDataC; - uint32_t *pDataDestR; - uint32x4_t vecIn; - - vecOffs = vidupq_u32((uint32_t)0, 1); - vecOffs = vecOffs * srcCols; - - i = srcCols; - do - { - pDataC = (uint32_t const *) pDataSrc; - pDataDestR = pDataDest; - - blkCnt = srcRows >> 2; - while (blkCnt > 0U) - { - vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs); - vstrwq(pDataDestR, vecIn); - pDataDestR += 4; - pDataC = pDataC + srcCols * 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - - /* - * tail - */ - blkCnt = srcRows & 3; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp32q(blkCnt); - vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs); - vstrwq_p(pDataDestR, vecIn, p0); - } - - pDataSrc += 1; - pDataDest += srcRows; - } - while (--i); - - return (ARM_MATH_SUCCESS); -} - -__STATIC_INLINE arm_status arm_mat_cmplx_trans_32bit( - uint16_t srcRows, - uint16_t srcCols, - uint32_t *pDataSrc, - uint16_t dstRows, - uint16_t dstCols, - uint32_t *pDataDest) -{ - uint32_t i; - uint32_t const *pDataC; - uint32_t *pDataRow; - uint32_t *pDataDestR, *pDataDestRow; - uint32x4_t vecOffsRef, vecOffsCur; - uint32_t blkCnt; - uint32x4_t vecIn; - -#ifdef ARM_MATH_MATRIX_CHECK - /* - * Check for matrix mismatch condition - */ - if ((srcRows != dstCols) || (srcCols != dstRows)) - { - /* - * Set status as ARM_MATH_SIZE_MISMATCH - */ - return = ARM_MATH_SIZE_MISMATCH; - } -#else - (void)dstRows; - (void)dstCols; -#endif - - /* 2x2, 3x3 and 4x4 specialization to be added */ - - vecOffsRef[0] = 0; - vecOffsRef[1] = 1; - vecOffsRef[2] = srcCols << 1; - vecOffsRef[3] = (srcCols << 1) + 1; - - pDataRow = pDataSrc; - pDataDestRow = pDataDest; - i = srcCols; - do - { - pDataC = (uint32_t const *) pDataRow; - pDataDestR = pDataDestRow; - vecOffsCur = vecOffsRef; - - blkCnt = (srcRows * CMPLX_DIM) >> 2; - while (blkCnt > 0U) - { - vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur); - vstrwq(pDataDestR, vecIn); - pDataDestR += 4; - vecOffsCur = vaddq(vecOffsCur, (srcCols << 2)); - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = (srcRows * CMPLX_DIM) & 3; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp32q(blkCnt); - vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur); - vstrwq_p(pDataDestR, vecIn, p0); - } - - pDataRow += CMPLX_DIM; - pDataDestRow += (srcRows * CMPLX_DIM); - } - while (--i); - - return (ARM_MATH_SUCCESS); -} - -__STATIC_INLINE arm_status arm_mat_trans_16bit_2x2(uint16_t * pDataSrc, uint16_t * pDataDest) -{ - pDataDest[0] = pDataSrc[0]; - pDataDest[3] = pDataSrc[3]; - pDataDest[2] = pDataSrc[1]; - pDataDest[1] = pDataSrc[2]; - - return (ARM_MATH_SUCCESS); -} - -__STATIC_INLINE arm_status arm_mat_trans_16bit_3x3_mve(uint16_t * pDataSrc, uint16_t * pDataDest) -{ - static const uint16_t stridesTr33[8] = { 0, 3, 6, 1, 4, 7, 2, 5 }; - uint16x8_t vecOffs1; - uint16x8_t vecIn1; - /* - * - * | 0 1 2 | | 0 3 6 | 8 x 16 flattened version | 0 3 6 1 4 7 2 5 | - * | 3 4 5 | => | 1 4 7 | => | 8 . . . . . . . | - * | 6 7 8 | | 2 5 8 | (row major) - * - */ - vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr33); - vecIn1 = vldrhq_u16((uint16_t const *) pDataSrc); - - vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1); - - pDataDest[8] = pDataSrc[8]; - - return (ARM_MATH_SUCCESS); -} - - -__STATIC_INLINE arm_status arm_mat_trans_16bit_4x4_mve(uint16_t * pDataSrc, uint16_t * pDataDest) -{ - static const uint16_t stridesTr44_1[8] = { 0, 4, 8, 12, 1, 5, 9, 13 }; - static const uint16_t stridesTr44_2[8] = { 2, 6, 10, 14, 3, 7, 11, 15 }; - uint16x8_t vecOffs1, vecOffs2; - uint16x8_t vecIn1, vecIn2; - uint16_t const * pDataSrcVec = (uint16_t const *) pDataSrc; - - /* - * 4x4 Matrix transposition - * - * | 0 1 2 3 | | 0 4 8 12 | 8 x 16 flattened version - * | 4 5 6 7 | => | 1 5 9 13 | => [0 4 8 12 1 5 9 13] - * | 8 9 10 11 | | 2 6 10 14 | [2 6 10 14 3 7 11 15] - * | 12 13 14 15 | | 3 7 11 15 | - */ - - vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr44_1); - vecOffs2 = vldrhq_u16((uint16_t const *) stridesTr44_2); - vecIn1 = vldrhq_u16(pDataSrcVec); - pDataSrcVec += 8; - vecIn2 = vldrhq_u16(pDataSrcVec); - - vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1); - vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs2, vecIn2); - - - return (ARM_MATH_SUCCESS); -} - - - -__STATIC_INLINE arm_status arm_mat_trans_16bit_generic( - uint16_t srcRows, - uint16_t srcCols, - uint16_t * pDataSrc, - uint16_t * pDataDest) -{ - uint16x8_t vecOffs; - uint32_t i; - uint32_t blkCnt; - uint16_t const *pDataC; - uint16_t *pDataDestR; - uint16x8_t vecIn; - - vecOffs = vidupq_u16((uint32_t)0, 1); - vecOffs = vecOffs * srcCols; - - i = srcCols; - while(i > 0U) - { - pDataC = (uint16_t const *) pDataSrc; - pDataDestR = pDataDest; - - blkCnt = srcRows >> 3; - while (blkCnt > 0U) - { - vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs); - vstrhq_u16(pDataDestR, vecIn); - pDataDestR += 8; - pDataC = pDataC + srcCols * 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - - /* - * tail - */ - blkCnt = srcRows & 7; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp16q(blkCnt); - vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs); - vstrhq_p_u16(pDataDestR, vecIn, p0); - } - pDataSrc += 1; - pDataDest += srcRows; - i--; - } - - return (ARM_MATH_SUCCESS); -} - - -__STATIC_INLINE arm_status arm_mat_cmplx_trans_16bit( - uint16_t srcRows, - uint16_t srcCols, - uint16_t *pDataSrc, - uint16_t dstRows, - uint16_t dstCols, - uint16_t *pDataDest) -{ - static const uint16_t loadCmplxCol[8] = { 0, 0, 1, 1, 2, 2, 3, 3 }; - int i; - uint16x8_t vecOffsRef, vecOffsCur; - uint16_t const *pDataC; - uint16_t *pDataRow; - uint16_t *pDataDestR, *pDataDestRow; - uint32_t blkCnt; - uint16x8_t vecIn; - -#ifdef ARM_MATH_MATRIX_CHECK - /* - * Check for matrix mismatch condition - */ - if ((srcRows != dstCols) || (srcCols != dstRows)) - { - /* - * Set status as ARM_MATH_SIZE_MISMATCH - */ - return = ARM_MATH_SIZE_MISMATCH; - } -#else - (void)dstRows; - (void)dstCols; -#endif - - /* - * 2x2, 3x3 and 4x4 specialization to be added - */ - - - /* - * build [0, 1, 2xcol, 2xcol+1, 4xcol, 4xcol+1, 6xcol, 6xcol+1] - */ - vecOffsRef = vldrhq_u16((uint16_t const *) loadCmplxCol); - vecOffsRef = vmulq(vecOffsRef, (uint16_t) (srcCols * CMPLX_DIM)) - + viwdupq_u16((uint32_t)0, (uint16_t) 2, 1); - - pDataRow = pDataSrc; - pDataDestRow = pDataDest; - i = srcCols; - do - { - pDataC = (uint16_t const *) pDataRow; - pDataDestR = pDataDestRow; - vecOffsCur = vecOffsRef; - - blkCnt = (srcRows * CMPLX_DIM) >> 3; - while (blkCnt > 0U) - { - vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur); - vstrhq(pDataDestR, vecIn); - pDataDestR+= 8; // VEC_LANES_U16 - vecOffsCur = vaddq(vecOffsCur, (srcCols << 3)); - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = (srcRows * CMPLX_DIM) & 0x7; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp16q(blkCnt); - vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur); - vstrhq_p(pDataDestR, vecIn, p0); - } - - pDataRow += CMPLX_DIM; - pDataDestRow += (srcRows * CMPLX_DIM); - } - while (--i); - - return (ARM_MATH_SUCCESS); -} -#endif /* MVEF and MVEI */ - -/*************************************** - -Definitions available for MVEI only - -***************************************/ -#if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) - -#include "arm_common_tables.h" - -#define MVE_ASRL_SAT16(acc, shift) ((sqrshrl_sat48(acc, -(32-shift)) >> 32) & 0xffffffff) -#define MVE_ASRL_SAT32(acc, shift) ((sqrshrl(acc, -(32-shift)) >> 32) & 0xffffffff) - - -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE) -__STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn) -{ - q63x2_t vecTmpLL; - q31x4_t vecTmp0, vecTmp1; - q31_t scale; - q63_t tmp64; - q31x4_t vecNrm, vecDst, vecIdx, vecSignBits; - - - vecSignBits = vclsq(vecIn); - vecSignBits = vbicq(vecSignBits, 1); - /* - * in = in << no_of_sign_bits; - */ - vecNrm = vshlq(vecIn, vecSignBits); - /* - * index = in >> 24; - */ - vecIdx = vecNrm >> 24; - vecIdx = vecIdx << 1; - - vecTmp0 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx); - - vecIdx = vecIdx + 1; - - vecTmp1 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx); - - vecTmp1 = vqrdmulhq(vecTmp1, vecNrm); - vecTmp0 = vecTmp0 - vecTmp1; - vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0); - vecTmp1 = vqrdmulhq(vecNrm, vecTmp1); - vecTmp1 = vdupq_n_s32(0x18000000) - vecTmp1; - vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1); - vecTmpLL = vmullbq_int(vecNrm, vecTmp0); - - /* - * scale elements 0, 2 - */ - scale = 26 + (vecSignBits[0] >> 1); - tmp64 = asrl(vecTmpLL[0], scale); - vecDst[0] = (q31_t) tmp64; - - scale = 26 + (vecSignBits[2] >> 1); - tmp64 = asrl(vecTmpLL[1], scale); - vecDst[2] = (q31_t) tmp64; - - vecTmpLL = vmulltq_int(vecNrm, vecTmp0); - - /* - * scale elements 1, 3 - */ - scale = 26 + (vecSignBits[1] >> 1); - tmp64 = asrl(vecTmpLL[0], scale); - vecDst[1] = (q31_t) tmp64; - - scale = 26 + (vecSignBits[3] >> 1); - tmp64 = asrl(vecTmpLL[1], scale); - vecDst[3] = (q31_t) tmp64; - /* - * set negative values to 0 - */ - vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s32(vecIn, 0)); - - return vecDst; -} -#endif - -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE) -__STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn) -{ - q31x4_t vecTmpLev, vecTmpLodd, vecSignL; - q15x8_t vecTmp0, vecTmp1; - q15x8_t vecNrm, vecDst, vecIdx, vecSignBits; - - vecDst = vuninitializedq_s16(); - - vecSignBits = vclsq(vecIn); - vecSignBits = vbicq(vecSignBits, 1); - /* - * in = in << no_of_sign_bits; - */ - vecNrm = vshlq(vecIn, vecSignBits); - - vecIdx = vecNrm >> 8; - vecIdx = vecIdx << 1; - - vecTmp0 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx); - - vecIdx = vecIdx + 1; - - vecTmp1 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx); - - vecTmp1 = vqrdmulhq(vecTmp1, vecNrm); - vecTmp0 = vecTmp0 - vecTmp1; - vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0); - vecTmp1 = vqrdmulhq(vecNrm, vecTmp1); - vecTmp1 = vdupq_n_s16(0x1800) - vecTmp1; - vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1); - - vecSignBits = vecSignBits >> 1; - - vecTmpLev = vmullbq_int(vecNrm, vecTmp0); - vecTmpLodd = vmulltq_int(vecNrm, vecTmp0); - - vecTmp0 = vecSignBits + 10; - /* - * negate sign to apply register based vshl - */ - vecTmp0 = -vecTmp0; - - /* - * shift even elements - */ - vecSignL = vmovlbq(vecTmp0); - vecTmpLev = vshlq(vecTmpLev, vecSignL); - /* - * shift odd elements - */ - vecSignL = vmovltq(vecTmp0); - vecTmpLodd = vshlq(vecTmpLodd, vecSignL); - /* - * merge and narrow odd and even parts - */ - vecDst = vmovnbq_s32(vecDst, vecTmpLev); - vecDst = vmovntq_s32(vecDst, vecTmpLodd); - /* - * set negative values to 0 - */ - vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s16(vecIn, 0)); - - return vecDst; -} -#endif - -#endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/arm_math.h b/src/third_party/cmsis/CMSIS/DSP/Include/arm_math.h deleted file mode 100644 index 404ee91..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/arm_math.h +++ /dev/null @@ -1,246 +0,0 @@ -/****************************************************************************** - * @file arm_math.h - * @brief Public header file for CMSIS DSP Library - * @version V1.7.0 - * @date 18. March 2019 - ******************************************************************************/ -/* - * Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - \mainpage CMSIS DSP Software Library - * - * \section intro Introduction - * - * This user manual describes the CMSIS DSP software library, - * a suite of common signal processing functions for use on Cortex-M and Cortex-A processor - * based devices. - * - * The library is divided into a number of functions each covering a specific category: - * - Basic math functions - * - Fast math functions - * - Complex math functions - * - Filtering functions - * - Matrix functions - * - Transform functions - * - Motor control functions - * - Statistical functions - * - Support functions - * - Interpolation functions - * - Support Vector Machine functions (SVM) - * - Bayes classifier functions - * - Distance functions - * - * The library has generally separate functions for operating on 8-bit integers, 16-bit integers, - * 32-bit integer and 32-bit floating-point values. - * - * \section using Using the Library - * - * The library installer contains prebuilt versions of the libraries in the Lib folder. - * - * Here is the list of pre-built libraries : - * - arm_cortexM7lfdp_math.lib (Cortex-M7, Little endian, Double Precision Floating Point Unit) - * - arm_cortexM7bfdp_math.lib (Cortex-M7, Big endian, Double Precision Floating Point Unit) - * - arm_cortexM7lfsp_math.lib (Cortex-M7, Little endian, Single Precision Floating Point Unit) - * - arm_cortexM7bfsp_math.lib (Cortex-M7, Big endian and Single Precision Floating Point Unit on) - * - arm_cortexM7l_math.lib (Cortex-M7, Little endian) - * - arm_cortexM7b_math.lib (Cortex-M7, Big endian) - * - arm_cortexM4lf_math.lib (Cortex-M4, Little endian, Floating Point Unit) - * - arm_cortexM4bf_math.lib (Cortex-M4, Big endian, Floating Point Unit) - * - arm_cortexM4l_math.lib (Cortex-M4, Little endian) - * - arm_cortexM4b_math.lib (Cortex-M4, Big endian) - * - arm_cortexM3l_math.lib (Cortex-M3, Little endian) - * - arm_cortexM3b_math.lib (Cortex-M3, Big endian) - * - arm_cortexM0l_math.lib (Cortex-M0 / Cortex-M0+, Little endian) - * - arm_cortexM0b_math.lib (Cortex-M0 / Cortex-M0+, Big endian) - * - arm_ARMv8MBLl_math.lib (Armv8-M Baseline, Little endian) - * - arm_ARMv8MMLl_math.lib (Armv8-M Mainline, Little endian) - * - arm_ARMv8MMLlfsp_math.lib (Armv8-M Mainline, Little endian, Single Precision Floating Point Unit) - * - arm_ARMv8MMLld_math.lib (Armv8-M Mainline, Little endian, DSP instructions) - * - arm_ARMv8MMLldfsp_math.lib (Armv8-M Mainline, Little endian, DSP instructions, Single Precision Floating Point Unit) - * - * The library functions are declared in the public file arm_math.h which is placed in the Include folder. - * Simply include this file and link the appropriate library in the application and begin calling the library functions. The Library supports single - * public header file arm_math.h for Cortex-M cores with little endian and big endian. Same header file will be used for floating point unit(FPU) variants. - * - * - * \section example Examples - * - * The library ships with a number of examples which demonstrate how to use the library functions. - * - * \section toolchain Toolchain Support - * - * The library is now tested on Fast Models building with cmake. - * Core M0, M7, A5 are tested. - * - * - * - * \section building Building the Library - * - * The library installer contains a project file to rebuild libraries on MDK toolchain in the CMSIS\\DSP\\Projects\\ARM folder. - * - arm_cortexM_math.uvprojx - * - * - * The libraries can be built by opening the arm_cortexM_math.uvprojx project in MDK-ARM, selecting a specific target, and defining the optional preprocessor macros detailed above. - * - * There is also a work in progress cmake build. The README file is giving more details. - * - * \section preprocessor Preprocessor Macros - * - * Each library project have different preprocessor macros. - * - * - ARM_MATH_BIG_ENDIAN: - * - * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. By default library builds for little endian targets. - * - * - ARM_MATH_MATRIX_CHECK: - * - * Define macro ARM_MATH_MATRIX_CHECK for checking on the input and output sizes of matrices - * - * - ARM_MATH_ROUNDING: - * - * Define macro ARM_MATH_ROUNDING for rounding on support functions - * - * - ARM_MATH_LOOPUNROLL: - * - * Define macro ARM_MATH_LOOPUNROLL to enable manual loop unrolling in DSP functions - * - * - ARM_MATH_NEON: - * - * Define macro ARM_MATH_NEON to enable Neon versions of the DSP functions. - * It is not enabled by default when Neon is available because performances are - * dependent on the compiler and target architecture. - * - * - ARM_MATH_NEON_EXPERIMENTAL: - * - * Define macro ARM_MATH_NEON_EXPERIMENTAL to enable experimental Neon versions of - * of some DSP functions. Experimental Neon versions currently do not have better - * performances than the scalar versions. - * - * - ARM_MATH_HELIUM: - * - * It implies the flags ARM_MATH_MVEF and ARM_MATH_MVEI and ARM_MATH_FLOAT16. - * - * - ARM_MATH_MVEF: - * - * Select Helium versions of the f32 algorithms. - * It implies ARM_MATH_FLOAT16 and ARM_MATH_MVEI. - * - * - ARM_MATH_MVEI: - * - * Select Helium versions of the int and fixed point algorithms. - * - * - ARM_MATH_MVE_FLOAT16: - * - * MVE Float16 implementations of some algorithms (Requires MVE extension). - * - * - DISABLEFLOAT16: - * - * Disable float16 algorithms when __fp16 is not supported for a - * specific compiler / core configuration - * - *
- * \section pack CMSIS-DSP in ARM::CMSIS Pack - * - * The following files relevant to CMSIS-DSP are present in the ARM::CMSIS Pack directories: - * |File/Folder |Content | - * |---------------------------------|------------------------------------------------------------------------| - * |\b CMSIS\\Documentation\\DSP | This documentation | - * |\b CMSIS\\DSP\\DSP_Lib_TestSuite | DSP_Lib deprecated test suite | - * |\b CMSIS\\DSP\\Examples | Example projects demonstrating the usage of the library functions | - * |\b CMSIS\\DSP\\Include | DSP_Lib include files for using and building the lib - * |\b CMSIS\\DSP\\PrivateInclude | DSP_Lib private include files for building the lib | - * |\b CMSIS\\DSP\\Lib | DSP_Lib binaries | - * |\b CMSIS\\DSP\\Projects | Projects to rebuild DSP_Lib binaries | - * |\b CMSIS\\DSP\\Source | DSP_Lib source files | - * - *
- * \section rev Revision History of CMSIS-DSP - * Please refer to \ref ChangeLog_pg. - */ - - - - - - - - - - - -/** - * @defgroup groupExamples Examples - */ - - - - - -#ifndef _ARM_MATH_H -#define _ARM_MATH_H - - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#include "dsp/basic_math_functions.h" -#include "dsp/interpolation_functions.h" -#include "dsp/bayes_functions.h" -#include "dsp/matrix_functions.h" -#include "dsp/complex_math_functions.h" -#include "dsp/statistics_functions.h" -#include "dsp/controller_functions.h" -#include "dsp/support_functions.h" -#include "dsp/distance_functions.h" -#include "dsp/svm_functions.h" -#include "dsp/fast_math_functions.h" -#include "dsp/transform_functions.h" -#include "dsp/filtering_functions.h" - - - -#ifdef __cplusplus -extern "C" -{ -#endif - - - - -//#define TABLE_SPACING_Q31 0x400000 -//#define TABLE_SPACING_Q15 0x80 - - - - - -#ifdef __cplusplus -} -#endif - - -#endif /* _ARM_MATH_H */ - -/** - * - * End of file. - */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/arm_math_memory.h b/src/third_party/cmsis/CMSIS/DSP/Include/arm_math_memory.h deleted file mode 100755 index c7158dc..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/arm_math_memory.h +++ /dev/null @@ -1,240 +0,0 @@ -/****************************************************************************** - * @file arm_math_memory.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _ARM_MATH_MEMORY_H_ - -#define _ARM_MATH_MEMORY_H_ - -#include "arm_math_types.h" - - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - @brief definition to read/write two 16 bit values. - @deprecated - */ -#if defined ( __CC_ARM ) - #define __SIMD32_TYPE int32_t __packed -#elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) - #define __SIMD32_TYPE int32_t -#elif defined ( __GNUC__ ) - #define __SIMD32_TYPE int32_t -#elif defined ( __ICCARM__ ) - #define __SIMD32_TYPE int32_t __packed -#elif defined ( __TI_ARM__ ) - #define __SIMD32_TYPE int32_t -#elif defined ( __CSMC__ ) - #define __SIMD32_TYPE int32_t -#elif defined ( __TASKING__ ) - #define __SIMD32_TYPE __un(aligned) int32_t -#elif defined(_MSC_VER ) - #define __SIMD32_TYPE int32_t -#else - #error Unknown compiler -#endif - -#define __SIMD32(addr) (*(__SIMD32_TYPE **) & (addr)) -#define __SIMD32_CONST(addr) ( (__SIMD32_TYPE * ) (addr)) -#define _SIMD32_OFFSET(addr) (*(__SIMD32_TYPE * ) (addr)) -#define __SIMD64(addr) (*( int64_t **) & (addr)) - - -/* SIMD replacement */ - - -/** - @brief Read 2 Q15 from Q15 pointer. - @param[in] pQ15 points to input value - @return Q31 value - */ -__STATIC_FORCEINLINE q31_t read_q15x2 ( - q15_t * pQ15) -{ - q31_t val; - -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, pQ15, 4); -#else - val = (pQ15[1] << 16) | (pQ15[0] & 0x0FFFF) ; -#endif - - return (val); -} - -/** - @brief Read 2 Q15 from Q15 pointer and increment pointer afterwards. - @param[in] pQ15 points to input value - @return Q31 value - */ -__STATIC_FORCEINLINE q31_t read_q15x2_ia ( - q15_t ** pQ15) -{ - q31_t val; - -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, *pQ15, 4); -#else - val = ((*pQ15)[1] << 16) | ((*pQ15)[0] & 0x0FFFF); -#endif - - *pQ15 += 2; - return (val); -} - -/** - @brief Read 2 Q15 from Q15 pointer and decrement pointer afterwards. - @param[in] pQ15 points to input value - @return Q31 value - */ -__STATIC_FORCEINLINE q31_t read_q15x2_da ( - q15_t ** pQ15) -{ - q31_t val; - -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, *pQ15, 4); -#else - val = ((*pQ15)[1] << 16) | ((*pQ15)[0] & 0x0FFFF); -#endif - - *pQ15 -= 2; - return (val); -} - -/** - @brief Write 2 Q15 to Q15 pointer and increment pointer afterwards. - @param[in] pQ15 points to input value - @param[in] value Q31 value - @return none - */ -__STATIC_FORCEINLINE void write_q15x2_ia ( - q15_t ** pQ15, - q31_t value) -{ - q31_t val = value; -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (*pQ15, &val, 4); -#else - (*pQ15)[0] = (val & 0x0FFFF); - (*pQ15)[1] = (val >> 16) & 0x0FFFF; -#endif - - *pQ15 += 2; -} - -/** - @brief Write 2 Q15 to Q15 pointer. - @param[in] pQ15 points to input value - @param[in] value Q31 value - @return none - */ -__STATIC_FORCEINLINE void write_q15x2 ( - q15_t * pQ15, - q31_t value) -{ - q31_t val = value; - -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (pQ15, &val, 4); -#else - pQ15[0] = val & 0x0FFFF; - pQ15[1] = val >> 16; -#endif -} - - -/** - @brief Read 4 Q7 from Q7 pointer and increment pointer afterwards. - @param[in] pQ7 points to input value - @return Q31 value - */ -__STATIC_FORCEINLINE q31_t read_q7x4_ia ( - q7_t ** pQ7) -{ - q31_t val; - - -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, *pQ7, 4); -#else - val =(((*pQ7)[3] & 0x0FF) << 24) | (((*pQ7)[2] & 0x0FF) << 16) | (((*pQ7)[1] & 0x0FF) << 8) | ((*pQ7)[0] & 0x0FF); -#endif - - *pQ7 += 4; - - return (val); -} - -/** - @brief Read 4 Q7 from Q7 pointer and decrement pointer afterwards. - @param[in] pQ7 points to input value - @return Q31 value - */ -__STATIC_FORCEINLINE q31_t read_q7x4_da ( - q7_t ** pQ7) -{ - q31_t val; -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, *pQ7, 4); -#else - val = ((((*pQ7)[3]) & 0x0FF) << 24) | ((((*pQ7)[2]) & 0x0FF) << 16) | ((((*pQ7)[1]) & 0x0FF) << 8) | ((*pQ7)[0] & 0x0FF); -#endif - *pQ7 -= 4; - - return (val); -} - -/** - @brief Write 4 Q7 to Q7 pointer and increment pointer afterwards. - @param[in] pQ7 points to input value - @param[in] value Q31 value - @return none - */ -__STATIC_FORCEINLINE void write_q7x4_ia ( - q7_t ** pQ7, - q31_t value) -{ - q31_t val = value; -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (*pQ7, &val, 4); -#else - (*pQ7)[0] = val & 0x0FF; - (*pQ7)[1] = (val >> 8) & 0x0FF; - (*pQ7)[2] = (val >> 16) & 0x0FF; - (*pQ7)[3] = (val >> 24) & 0x0FF; - -#endif - *pQ7 += 4; -} - - -#ifdef __cplusplus -} -#endif - -#endif /*ifndef _ARM_MATH_MEMORY_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/arm_math_types.h b/src/third_party/cmsis/CMSIS/DSP/Include/arm_math_types.h deleted file mode 100755 index 95a17e3..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/arm_math_types.h +++ /dev/null @@ -1,598 +0,0 @@ -/****************************************************************************** - * @file arm_math_types.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _ARM_MATH_TYPES_H_ - -#define _ARM_MATH_TYPES_H_ - -#ifdef __cplusplus -extern "C" -{ -#endif - -/* Compiler specific diagnostic adjustment */ -#if defined ( __CC_ARM ) - -#elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) - -#elif defined ( __GNUC__ ) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wsign-conversion" - #pragma GCC diagnostic ignored "-Wconversion" - #pragma GCC diagnostic ignored "-Wunused-parameter" - -#elif defined ( __ICCARM__ ) - -#elif defined ( __TI_ARM__ ) - -#elif defined ( __CSMC__ ) - -#elif defined ( __TASKING__ ) - -#elif defined ( _MSC_VER ) - -#else - #error Unknown compiler -#endif - - -/* Included for instrinsics definitions */ -#if defined (_MSC_VER ) -#include -#define __STATIC_FORCEINLINE static __forceinline -#define __STATIC_INLINE static __inline -#define __ALIGNED(x) __declspec(align(x)) - -#elif defined (__GNUC_PYTHON__) -#include -#define __ALIGNED(x) __attribute__((aligned(x))) -#define __STATIC_FORCEINLINE static __attribute__((inline)) -#define __STATIC_INLINE static __attribute__((inline)) -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wattributes" - -#else -#include "cmsis_compiler.h" -#endif - - - -#include -#include -#include -#include - -/* evaluate ARM DSP feature */ -#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) - #define ARM_MATH_DSP 1 -#endif - -#if defined(ARM_MATH_NEON) -#include -#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - #if !defined(ARM_MATH_NEON_FLOAT16) - #define ARM_MATH_NEON_FLOAT16 - #endif -#endif -#endif - -#if !defined(ARM_MATH_AUTOVECTORIZE) - -#if __ARM_FEATURE_MVE - #if !defined(ARM_MATH_MVEI) - #define ARM_MATH_MVEI - #endif -#endif - -#if (__ARM_FEATURE_MVE & 2) - #if !defined(ARM_MATH_MVEF) - #define ARM_MATH_MVEF - #endif - #if !defined(ARM_MATH_MVE_FLOAT16) - /* HW Float16 not yet well supported on gcc for M55 */ - #if !defined(__CMSIS_GCC_H) - #define ARM_MATH_MVE_FLOAT16 - #endif - #endif -#endif - -#endif /*!defined(ARM_MATH_AUTOVECTORIZE)*/ - - -#if defined (ARM_MATH_HELIUM) - #if !defined(ARM_MATH_MVEF) - #define ARM_MATH_MVEF - #endif - - #if !defined(ARM_MATH_MVEI) - #define ARM_MATH_MVEI - #endif - - #if !defined(ARM_MATH_MVE_FLOAT16) - /* HW Float16 not yet well supported on gcc for M55 */ - #if !defined(__CMSIS_GCC_H) - #define ARM_MATH_MVE_FLOAT16 - #endif - #endif -#endif - - - -#if defined ( __CC_ARM ) - /* Enter low optimization region - place directly above function definition */ - #if defined( __ARM_ARCH_7EM__ ) - #define LOW_OPTIMIZATION_ENTER \ - _Pragma ("push") \ - _Pragma ("O1") - #else - #define LOW_OPTIMIZATION_ENTER - #endif - - /* Exit low optimization region - place directly after end of function definition */ - #if defined ( __ARM_ARCH_7EM__ ) - #define LOW_OPTIMIZATION_EXIT \ - _Pragma ("pop") - #else - #define LOW_OPTIMIZATION_EXIT - #endif - - /* Enter low optimization region - place directly above function definition */ - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER - - /* Exit low optimization region - place directly after end of function definition */ - #define IAR_ONLY_LOW_OPTIMIZATION_EXIT - -#elif defined (__ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) - #define LOW_OPTIMIZATION_ENTER - #define LOW_OPTIMIZATION_EXIT - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER - #define IAR_ONLY_LOW_OPTIMIZATION_EXIT - -#elif defined ( __GNUC__ ) - #define LOW_OPTIMIZATION_ENTER \ - __attribute__(( optimize("-O1") )) - #define LOW_OPTIMIZATION_EXIT - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER - #define IAR_ONLY_LOW_OPTIMIZATION_EXIT - -#elif defined ( __ICCARM__ ) - /* Enter low optimization region - place directly above function definition */ - #if defined ( __ARM_ARCH_7EM__ ) - #define LOW_OPTIMIZATION_ENTER \ - _Pragma ("optimize=low") - #else - #define LOW_OPTIMIZATION_ENTER - #endif - - /* Exit low optimization region - place directly after end of function definition */ - #define LOW_OPTIMIZATION_EXIT - - /* Enter low optimization region - place directly above function definition */ - #if defined ( __ARM_ARCH_7EM__ ) - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER \ - _Pragma ("optimize=low") - #else - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER - #endif - - /* Exit low optimization region - place directly after end of function definition */ - #define IAR_ONLY_LOW_OPTIMIZATION_EXIT - -#elif defined ( __TI_ARM__ ) - #define LOW_OPTIMIZATION_ENTER - #define LOW_OPTIMIZATION_EXIT - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER - #define IAR_ONLY_LOW_OPTIMIZATION_EXIT - -#elif defined ( __CSMC__ ) - #define LOW_OPTIMIZATION_ENTER - #define LOW_OPTIMIZATION_EXIT - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER - #define IAR_ONLY_LOW_OPTIMIZATION_EXIT - -#elif defined ( __TASKING__ ) - #define LOW_OPTIMIZATION_ENTER - #define LOW_OPTIMIZATION_EXIT - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER - #define IAR_ONLY_LOW_OPTIMIZATION_EXIT - -#elif defined ( _MSC_VER ) || defined(__GNUC_PYTHON__) - #define LOW_OPTIMIZATION_ENTER - #define LOW_OPTIMIZATION_EXIT - #define IAR_ONLY_LOW_OPTIMIZATION_ENTER - #define IAR_ONLY_LOW_OPTIMIZATION_EXIT -#endif - - - -/* Compiler specific diagnostic adjustment */ -#if defined ( __CC_ARM ) - -#elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) - -#elif defined ( __GNUC__ ) -#pragma GCC diagnostic pop - -#elif defined ( __ICCARM__ ) - -#elif defined ( __TI_ARM__ ) - -#elif defined ( __CSMC__ ) - -#elif defined ( __TASKING__ ) - -#elif defined ( _MSC_VER ) - -#else - #error Unknown compiler -#endif - -#ifdef __cplusplus -} -#endif - -#if __ARM_FEATURE_MVE -#include -#endif - -#ifdef __cplusplus -extern "C" -{ -#endif - - /** - * @brief 8-bit fractional data type in 1.7 format. - */ - typedef int8_t q7_t; - - /** - * @brief 16-bit fractional data type in 1.15 format. - */ - typedef int16_t q15_t; - - /** - * @brief 32-bit fractional data type in 1.31 format. - */ - typedef int32_t q31_t; - - /** - * @brief 64-bit fractional data type in 1.63 format. - */ - typedef int64_t q63_t; - - /** - * @brief 32-bit floating-point type definition. - */ - typedef float float32_t; - - /** - * @brief 64-bit floating-point type definition. - */ - typedef double float64_t; - - /** - * @brief vector types - */ -#if defined(ARM_MATH_NEON) || defined (ARM_MATH_MVEI) - /** - * @brief 64-bit fractional 128-bit vector data type in 1.63 format - */ - typedef int64x2_t q63x2_t; - - /** - * @brief 32-bit fractional 128-bit vector data type in 1.31 format. - */ - typedef int32x4_t q31x4_t; - - /** - * @brief 16-bit fractional 128-bit vector data type with 16-bit alignement in 1.15 format. - */ - typedef __ALIGNED(2) int16x8_t q15x8_t; - - /** - * @brief 8-bit fractional 128-bit vector data type with 8-bit alignement in 1.7 format. - */ - typedef __ALIGNED(1) int8x16_t q7x16_t; - - /** - * @brief 32-bit fractional 128-bit vector pair data type in 1.31 format. - */ - typedef int32x4x2_t q31x4x2_t; - - /** - * @brief 32-bit fractional 128-bit vector quadruplet data type in 1.31 format. - */ - typedef int32x4x4_t q31x4x4_t; - - /** - * @brief 16-bit fractional 128-bit vector pair data type in 1.15 format. - */ - typedef int16x8x2_t q15x8x2_t; - - /** - * @brief 16-bit fractional 128-bit vector quadruplet data type in 1.15 format. - */ - typedef int16x8x4_t q15x8x4_t; - - /** - * @brief 8-bit fractional 128-bit vector pair data type in 1.7 format. - */ - typedef int8x16x2_t q7x16x2_t; - - /** - * @brief 8-bit fractional 128-bit vector quadruplet data type in 1.7 format. - */ - typedef int8x16x4_t q7x16x4_t; - - /** - * @brief 32-bit fractional data type in 9.23 format. - */ - typedef int32_t q23_t; - - /** - * @brief 32-bit fractional 128-bit vector data type in 9.23 format. - */ - typedef int32x4_t q23x4_t; - - /** - * @brief 64-bit status 128-bit vector data type. - */ - typedef int64x2_t status64x2_t; - - /** - * @brief 32-bit status 128-bit vector data type. - */ - typedef int32x4_t status32x4_t; - - /** - * @brief 16-bit status 128-bit vector data type. - */ - typedef int16x8_t status16x8_t; - - /** - * @brief 8-bit status 128-bit vector data type. - */ - typedef int8x16_t status8x16_t; - - -#endif - -#if defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF) /* floating point vector*/ - /** - * @brief 32-bit floating-point 128-bit vector type - */ - typedef float32x4_t f32x4_t; - - /** - * @brief 32-bit floating-point 128-bit vector pair data type - */ - typedef float32x4x2_t f32x4x2_t; - - /** - * @brief 32-bit floating-point 128-bit vector quadruplet data type - */ - typedef float32x4x4_t f32x4x4_t; - - /** - * @brief 32-bit ubiquitous 128-bit vector data type - */ - typedef union _any32x4_t - { - float32x4_t f; - int32x4_t i; - } any32x4_t; - -#endif - -#if defined(ARM_MATH_NEON) - /** - * @brief 32-bit fractional 64-bit vector data type in 1.31 format. - */ - typedef int32x2_t q31x2_t; - - /** - * @brief 16-bit fractional 64-bit vector data type in 1.15 format. - */ - typedef __ALIGNED(2) int16x4_t q15x4_t; - - /** - * @brief 8-bit fractional 64-bit vector data type in 1.7 format. - */ - typedef __ALIGNED(1) int8x8_t q7x8_t; - - /** - * @brief 32-bit float 64-bit vector data type. - */ - typedef float32x2_t f32x2_t; - - /** - * @brief 32-bit floating-point 128-bit vector triplet data type - */ - typedef float32x4x3_t f32x4x3_t; - - - /** - * @brief 32-bit fractional 128-bit vector triplet data type in 1.31 format - */ - typedef int32x4x3_t q31x4x3_t; - - /** - * @brief 16-bit fractional 128-bit vector triplet data type in 1.15 format - */ - typedef int16x8x3_t q15x8x3_t; - - /** - * @brief 8-bit fractional 128-bit vector triplet data type in 1.7 format - */ - typedef int8x16x3_t q7x16x3_t; - - /** - * @brief 32-bit floating-point 64-bit vector pair data type - */ - typedef float32x2x2_t f32x2x2_t; - - /** - * @brief 32-bit floating-point 64-bit vector triplet data type - */ - typedef float32x2x3_t f32x2x3_t; - - /** - * @brief 32-bit floating-point 64-bit vector quadruplet data type - */ - typedef float32x2x4_t f32x2x4_t; - - - /** - * @brief 32-bit fractional 64-bit vector pair data type in 1.31 format - */ - typedef int32x2x2_t q31x2x2_t; - - /** - * @brief 32-bit fractional 64-bit vector triplet data type in 1.31 format - */ - typedef int32x2x3_t q31x2x3_t; - - /** - * @brief 32-bit fractional 64-bit vector quadruplet data type in 1.31 format - */ - typedef int32x4x3_t q31x2x4_t; - - /** - * @brief 16-bit fractional 64-bit vector pair data type in 1.15 format - */ - typedef int16x4x2_t q15x4x2_t; - - /** - * @brief 16-bit fractional 64-bit vector triplet data type in 1.15 format - */ - typedef int16x4x2_t q15x4x3_t; - - /** - * @brief 16-bit fractional 64-bit vector quadruplet data type in 1.15 format - */ - typedef int16x4x3_t q15x4x4_t; - - /** - * @brief 8-bit fractional 64-bit vector pair data type in 1.7 format - */ - typedef int8x8x2_t q7x8x2_t; - - /** - * @brief 8-bit fractional 64-bit vector triplet data type in 1.7 format - */ - typedef int8x8x3_t q7x8x3_t; - - /** - * @brief 8-bit fractional 64-bit vector quadruplet data type in 1.7 format - */ - typedef int8x8x4_t q7x8x4_t; - - /** - * @brief 32-bit ubiquitous 64-bit vector data type - */ - typedef union _any32x2_t - { - float32x2_t f; - int32x2_t i; - } any32x2_t; - - - /** - * @brief 32-bit status 64-bit vector data type. - */ - typedef int32x4_t status32x2_t; - - /** - * @brief 16-bit status 64-bit vector data type. - */ - typedef int16x8_t status16x4_t; - - /** - * @brief 8-bit status 64-bit vector data type. - */ - typedef int8x16_t status8x8_t; - -#endif - - - - - -#define F64_MAX ((float64_t)DBL_MAX) -#define F32_MAX ((float32_t)FLT_MAX) - - - -#define F64_MIN (-DBL_MAX) -#define F32_MIN (-FLT_MAX) - - - -#define F64_ABSMAX ((float64_t)DBL_MAX) -#define F32_ABSMAX ((float32_t)FLT_MAX) - - - -#define F64_ABSMIN ((float64_t)0.0) -#define F32_ABSMIN ((float32_t)0.0) - - -#define Q31_MAX ((q31_t)(0x7FFFFFFFL)) -#define Q15_MAX ((q15_t)(0x7FFF)) -#define Q7_MAX ((q7_t)(0x7F)) -#define Q31_MIN ((q31_t)(0x80000000L)) -#define Q15_MIN ((q15_t)(0x8000)) -#define Q7_MIN ((q7_t)(0x80)) - -#define Q31_ABSMAX ((q31_t)(0x7FFFFFFFL)) -#define Q15_ABSMAX ((q15_t)(0x7FFF)) -#define Q7_ABSMAX ((q7_t)(0x7F)) -#define Q31_ABSMIN ((q31_t)0) -#define Q15_ABSMIN ((q15_t)0) -#define Q7_ABSMIN ((q7_t)0) - - /* Dimension C vector space */ - #define CMPLX_DIM 2 - - /** - * @brief Error status returned by some functions in the library. - */ - - typedef enum - { - ARM_MATH_SUCCESS = 0, /**< No error */ - ARM_MATH_ARGUMENT_ERROR = -1, /**< One or more arguments are incorrect */ - ARM_MATH_LENGTH_ERROR = -2, /**< Length of data buffer is incorrect */ - ARM_MATH_SIZE_MISMATCH = -3, /**< Size of matrices is not compatible with the operation */ - ARM_MATH_NANINF = -4, /**< Not-a-number (NaN) or infinity is generated */ - ARM_MATH_SINGULAR = -5, /**< Input matrix is singular and cannot be inverted */ - ARM_MATH_TEST_FAILURE = -6 /**< Test Failed */ - } arm_status; - - -#ifdef __cplusplus -} -#endif - -#endif /*ifndef _ARM_MATH_TYPES_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/basic_math_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/basic_math_functions.h deleted file mode 100755 index b82a6dd..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/basic_math_functions.h +++ /dev/null @@ -1,699 +0,0 @@ -/****************************************************************************** - * @file basic_math_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _BASIC_MATH_FUNCTIONS_H_ -#define _BASIC_MATH_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * @defgroup groupMath Basic Math Functions - */ - - /** - * @brief Q7 vector multiplication. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_mult_q7( - const q7_t * pSrcA, - const q7_t * pSrcB, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Q15 vector multiplication. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_mult_q15( - const q15_t * pSrcA, - const q15_t * pSrcB, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Q31 vector multiplication. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_mult_q31( - const q31_t * pSrcA, - const q31_t * pSrcB, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Floating-point vector multiplication. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_mult_f32( - const float32_t * pSrcA, - const float32_t * pSrcB, - float32_t * pDst, - uint32_t blockSize); - - - - /** - * @brief Floating-point vector addition. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_add_f32( - const float32_t * pSrcA, - const float32_t * pSrcB, - float32_t * pDst, - uint32_t blockSize); - - - - /** - * @brief Q7 vector addition. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_add_q7( - const q7_t * pSrcA, - const q7_t * pSrcB, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Q15 vector addition. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_add_q15( - const q15_t * pSrcA, - const q15_t * pSrcB, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Q31 vector addition. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_add_q31( - const q31_t * pSrcA, - const q31_t * pSrcB, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Floating-point vector subtraction. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_sub_f32( - const float32_t * pSrcA, - const float32_t * pSrcB, - float32_t * pDst, - uint32_t blockSize); - - - - /** - * @brief Q7 vector subtraction. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_sub_q7( - const q7_t * pSrcA, - const q7_t * pSrcB, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Q15 vector subtraction. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_sub_q15( - const q15_t * pSrcA, - const q15_t * pSrcB, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Q31 vector subtraction. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in each vector - */ - void arm_sub_q31( - const q31_t * pSrcA, - const q31_t * pSrcB, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Multiplies a floating-point vector by a scalar. - * @param[in] pSrc points to the input vector - * @param[in] scale scale factor to be applied - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_scale_f32( - const float32_t * pSrc, - float32_t scale, - float32_t * pDst, - uint32_t blockSize); - - - - /** - * @brief Multiplies a Q7 vector by a scalar. - * @param[in] pSrc points to the input vector - * @param[in] scaleFract fractional portion of the scale value - * @param[in] shift number of bits to shift the result by - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_scale_q7( - const q7_t * pSrc, - q7_t scaleFract, - int8_t shift, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Multiplies a Q15 vector by a scalar. - * @param[in] pSrc points to the input vector - * @param[in] scaleFract fractional portion of the scale value - * @param[in] shift number of bits to shift the result by - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_scale_q15( - const q15_t * pSrc, - q15_t scaleFract, - int8_t shift, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Multiplies a Q31 vector by a scalar. - * @param[in] pSrc points to the input vector - * @param[in] scaleFract fractional portion of the scale value - * @param[in] shift number of bits to shift the result by - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_scale_q31( - const q31_t * pSrc, - q31_t scaleFract, - int8_t shift, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Q7 vector absolute value. - * @param[in] pSrc points to the input buffer - * @param[out] pDst points to the output buffer - * @param[in] blockSize number of samples in each vector - */ - void arm_abs_q7( - const q7_t * pSrc, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Floating-point vector absolute value. - * @param[in] pSrc points to the input buffer - * @param[out] pDst points to the output buffer - * @param[in] blockSize number of samples in each vector - */ - void arm_abs_f32( - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - - - /** - * @brief Q15 vector absolute value. - * @param[in] pSrc points to the input buffer - * @param[out] pDst points to the output buffer - * @param[in] blockSize number of samples in each vector - */ - void arm_abs_q15( - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Q31 vector absolute value. - * @param[in] pSrc points to the input buffer - * @param[out] pDst points to the output buffer - * @param[in] blockSize number of samples in each vector - */ - void arm_abs_q31( - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Dot product of floating-point vectors. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[in] blockSize number of samples in each vector - * @param[out] result output result returned here - */ - void arm_dot_prod_f32( - const float32_t * pSrcA, - const float32_t * pSrcB, - uint32_t blockSize, - float32_t * result); - - - - /** - * @brief Dot product of Q7 vectors. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[in] blockSize number of samples in each vector - * @param[out] result output result returned here - */ - void arm_dot_prod_q7( - const q7_t * pSrcA, - const q7_t * pSrcB, - uint32_t blockSize, - q31_t * result); - - - /** - * @brief Dot product of Q15 vectors. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[in] blockSize number of samples in each vector - * @param[out] result output result returned here - */ - void arm_dot_prod_q15( - const q15_t * pSrcA, - const q15_t * pSrcB, - uint32_t blockSize, - q63_t * result); - - - /** - * @brief Dot product of Q31 vectors. - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[in] blockSize number of samples in each vector - * @param[out] result output result returned here - */ - void arm_dot_prod_q31( - const q31_t * pSrcA, - const q31_t * pSrcB, - uint32_t blockSize, - q63_t * result); - - - /** - * @brief Shifts the elements of a Q7 vector a specified number of bits. - * @param[in] pSrc points to the input vector - * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right. - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_shift_q7( - const q7_t * pSrc, - int8_t shiftBits, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Shifts the elements of a Q15 vector a specified number of bits. - * @param[in] pSrc points to the input vector - * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right. - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_shift_q15( - const q15_t * pSrc, - int8_t shiftBits, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Shifts the elements of a Q31 vector a specified number of bits. - * @param[in] pSrc points to the input vector - * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right. - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_shift_q31( - const q31_t * pSrc, - int8_t shiftBits, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Adds a constant offset to a floating-point vector. - * @param[in] pSrc points to the input vector - * @param[in] offset is the offset to be added - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_offset_f32( - const float32_t * pSrc, - float32_t offset, - float32_t * pDst, - uint32_t blockSize); - - - - /** - * @brief Adds a constant offset to a Q7 vector. - * @param[in] pSrc points to the input vector - * @param[in] offset is the offset to be added - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_offset_q7( - const q7_t * pSrc, - q7_t offset, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Adds a constant offset to a Q15 vector. - * @param[in] pSrc points to the input vector - * @param[in] offset is the offset to be added - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_offset_q15( - const q15_t * pSrc, - q15_t offset, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Adds a constant offset to a Q31 vector. - * @param[in] pSrc points to the input vector - * @param[in] offset is the offset to be added - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_offset_q31( - const q31_t * pSrc, - q31_t offset, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Negates the elements of a floating-point vector. - * @param[in] pSrc points to the input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_negate_f32( - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Negates the elements of a Q7 vector. - * @param[in] pSrc points to the input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_negate_q7( - const q7_t * pSrc, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Negates the elements of a Q15 vector. - * @param[in] pSrc points to the input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_negate_q15( - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Negates the elements of a Q31 vector. - * @param[in] pSrc points to the input vector - * @param[out] pDst points to the output vector - * @param[in] blockSize number of samples in the vector - */ - void arm_negate_q31( - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - -/** - * @brief Compute the logical bitwise AND of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_and_u16( - const uint16_t * pSrcA, - const uint16_t * pSrcB, - uint16_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise AND of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_and_u32( - const uint32_t * pSrcA, - const uint32_t * pSrcB, - uint32_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise AND of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_and_u8( - const uint8_t * pSrcA, - const uint8_t * pSrcB, - uint8_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise OR of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_or_u16( - const uint16_t * pSrcA, - const uint16_t * pSrcB, - uint16_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise OR of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_or_u32( - const uint32_t * pSrcA, - const uint32_t * pSrcB, - uint32_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise OR of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_or_u8( - const uint8_t * pSrcA, - const uint8_t * pSrcB, - uint8_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise NOT of a fixed-point vector. - * @param[in] pSrc points to input vector - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_not_u16( - const uint16_t * pSrc, - uint16_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise NOT of a fixed-point vector. - * @param[in] pSrc points to input vector - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_not_u32( - const uint32_t * pSrc, - uint32_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise NOT of a fixed-point vector. - * @param[in] pSrc points to input vector - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_not_u8( - const uint8_t * pSrc, - uint8_t * pDst, - uint32_t blockSize); - -/** - * @brief Compute the logical bitwise XOR of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_xor_u16( - const uint16_t * pSrcA, - const uint16_t * pSrcB, - uint16_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise XOR of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_xor_u32( - const uint32_t * pSrcA, - const uint32_t * pSrcB, - uint32_t * pDst, - uint32_t blockSize); - - /** - * @brief Compute the logical bitwise XOR of two fixed-point vectors. - * @param[in] pSrcA points to input vector A - * @param[in] pSrcB points to input vector B - * @param[out] pDst points to output vector - * @param[in] blockSize number of samples in each vector - * @return none - */ - void arm_xor_u8( - const uint8_t * pSrcA, - const uint8_t * pSrcB, - uint8_t * pDst, - uint32_t blockSize); - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _BASIC_MATH_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/bayes_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/bayes_functions.h deleted file mode 100755 index 050386c..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/bayes_functions.h +++ /dev/null @@ -1,86 +0,0 @@ -/****************************************************************************** - * @file bayes_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _BAYES_FUNCTIONS_H_ -#define _BAYES_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#include "dsp/statistics_functions.h" - -/** - * @defgroup groupBayes Bayesian estimators - * - * Implement the naive gaussian Bayes estimator. - * The training must be done from scikit-learn. - * - * The parameters can be easily - * generated from the scikit-learn object. Some examples are given in - * DSP/Testing/PatternGeneration/Bayes.py - */ - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * @brief Instance structure for Naive Gaussian Bayesian estimator. - */ -typedef struct -{ - uint32_t vectorDimension; /**< Dimension of vector space */ - uint32_t numberOfClasses; /**< Number of different classes */ - const float32_t *theta; /**< Mean values for the Gaussians */ - const float32_t *sigma; /**< Variances for the Gaussians */ - const float32_t *classPriors; /**< Class prior probabilities */ - float32_t epsilon; /**< Additive value to variances */ -} arm_gaussian_naive_bayes_instance_f32; - -/** - * @brief Naive Gaussian Bayesian Estimator - * - * @param[in] S points to a naive bayes instance structure - * @param[in] in points to the elements of the input vector. - * @param[in] pBuffer points to a buffer of length numberOfClasses - * @return The predicted class - * - */ - - -uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_instance_f32 *S, - const float32_t * in, - float32_t *pBuffer); - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _BAYES_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/complex_math_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/complex_math_functions.h deleted file mode 100755 index 4a76532..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/complex_math_functions.h +++ /dev/null @@ -1,294 +0,0 @@ -/****************************************************************************** - * @file complex_math_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _COMPLEX_MATH_FUNCTIONS_H_ -#define _COMPLEX_MATH_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" -#include "dsp/fast_math_functions.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * @defgroup groupCmplxMath Complex Math Functions - * This set of functions operates on complex data vectors. - * The data in the complex arrays is stored in an interleaved fashion - * (real, imag, real, imag, ...). - * In the API functions, the number of samples in a complex array refers - * to the number of complex values; the array contains twice this number of - * real values. - */ - - /** - * @brief Floating-point complex conjugate. - * @param[in] pSrc points to the input vector - * @param[out] pDst points to the output vector - * @param[in] numSamples number of complex samples in each vector - */ - void arm_cmplx_conj_f32( - const float32_t * pSrc, - float32_t * pDst, - uint32_t numSamples); - - /** - * @brief Q31 complex conjugate. - * @param[in] pSrc points to the input vector - * @param[out] pDst points to the output vector - * @param[in] numSamples number of complex samples in each vector - */ - void arm_cmplx_conj_q31( - const q31_t * pSrc, - q31_t * pDst, - uint32_t numSamples); - - - /** - * @brief Q15 complex conjugate. - * @param[in] pSrc points to the input vector - * @param[out] pDst points to the output vector - * @param[in] numSamples number of complex samples in each vector - */ - void arm_cmplx_conj_q15( - const q15_t * pSrc, - q15_t * pDst, - uint32_t numSamples); - - - /** - * @brief Floating-point complex magnitude squared - * @param[in] pSrc points to the complex input vector - * @param[out] pDst points to the real output vector - * @param[in] numSamples number of complex samples in the input vector - */ - void arm_cmplx_mag_squared_f32( - const float32_t * pSrc, - float32_t * pDst, - uint32_t numSamples); - - - /** - * @brief Q31 complex magnitude squared - * @param[in] pSrc points to the complex input vector - * @param[out] pDst points to the real output vector - * @param[in] numSamples number of complex samples in the input vector - */ - void arm_cmplx_mag_squared_q31( - const q31_t * pSrc, - q31_t * pDst, - uint32_t numSamples); - - - /** - * @brief Q15 complex magnitude squared - * @param[in] pSrc points to the complex input vector - * @param[out] pDst points to the real output vector - * @param[in] numSamples number of complex samples in the input vector - */ - void arm_cmplx_mag_squared_q15( - const q15_t * pSrc, - q15_t * pDst, - uint32_t numSamples); - - -/** - * @brief Floating-point complex magnitude - * @param[in] pSrc points to the complex input vector - * @param[out] pDst points to the real output vector - * @param[in] numSamples number of complex samples in the input vector - */ - void arm_cmplx_mag_f32( - const float32_t * pSrc, - float32_t * pDst, - uint32_t numSamples); - - - /** - * @brief Q31 complex magnitude - * @param[in] pSrc points to the complex input vector - * @param[out] pDst points to the real output vector - * @param[in] numSamples number of complex samples in the input vector - */ - void arm_cmplx_mag_q31( - const q31_t * pSrc, - q31_t * pDst, - uint32_t numSamples); - - - /** - * @brief Q15 complex magnitude - * @param[in] pSrc points to the complex input vector - * @param[out] pDst points to the real output vector - * @param[in] numSamples number of complex samples in the input vector - */ - void arm_cmplx_mag_q15( - const q15_t * pSrc, - q15_t * pDst, - uint32_t numSamples); - - - /** - * @brief Q15 complex dot product - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[in] numSamples number of complex samples in each vector - * @param[out] realResult real part of the result returned here - * @param[out] imagResult imaginary part of the result returned here - */ - void arm_cmplx_dot_prod_q15( - const q15_t * pSrcA, - const q15_t * pSrcB, - uint32_t numSamples, - q31_t * realResult, - q31_t * imagResult); - - - /** - * @brief Q31 complex dot product - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[in] numSamples number of complex samples in each vector - * @param[out] realResult real part of the result returned here - * @param[out] imagResult imaginary part of the result returned here - */ - void arm_cmplx_dot_prod_q31( - const q31_t * pSrcA, - const q31_t * pSrcB, - uint32_t numSamples, - q63_t * realResult, - q63_t * imagResult); - - - /** - * @brief Floating-point complex dot product - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[in] numSamples number of complex samples in each vector - * @param[out] realResult real part of the result returned here - * @param[out] imagResult imaginary part of the result returned here - */ - void arm_cmplx_dot_prod_f32( - const float32_t * pSrcA, - const float32_t * pSrcB, - uint32_t numSamples, - float32_t * realResult, - float32_t * imagResult); - - - /** - * @brief Q15 complex-by-real multiplication - * @param[in] pSrcCmplx points to the complex input vector - * @param[in] pSrcReal points to the real input vector - * @param[out] pCmplxDst points to the complex output vector - * @param[in] numSamples number of samples in each vector - */ - void arm_cmplx_mult_real_q15( - const q15_t * pSrcCmplx, - const q15_t * pSrcReal, - q15_t * pCmplxDst, - uint32_t numSamples); - - - /** - * @brief Q31 complex-by-real multiplication - * @param[in] pSrcCmplx points to the complex input vector - * @param[in] pSrcReal points to the real input vector - * @param[out] pCmplxDst points to the complex output vector - * @param[in] numSamples number of samples in each vector - */ - void arm_cmplx_mult_real_q31( - const q31_t * pSrcCmplx, - const q31_t * pSrcReal, - q31_t * pCmplxDst, - uint32_t numSamples); - - - /** - * @brief Floating-point complex-by-real multiplication - * @param[in] pSrcCmplx points to the complex input vector - * @param[in] pSrcReal points to the real input vector - * @param[out] pCmplxDst points to the complex output vector - * @param[in] numSamples number of samples in each vector - */ - void arm_cmplx_mult_real_f32( - const float32_t * pSrcCmplx, - const float32_t * pSrcReal, - float32_t * pCmplxDst, - uint32_t numSamples); - - /** - * @brief Q15 complex-by-complex multiplication - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] numSamples number of complex samples in each vector - */ - void arm_cmplx_mult_cmplx_q15( - const q15_t * pSrcA, - const q15_t * pSrcB, - q15_t * pDst, - uint32_t numSamples); - - - /** - * @brief Q31 complex-by-complex multiplication - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] numSamples number of complex samples in each vector - */ - void arm_cmplx_mult_cmplx_q31( - const q31_t * pSrcA, - const q31_t * pSrcB, - q31_t * pDst, - uint32_t numSamples); - - - /** - * @brief Floating-point complex-by-complex multiplication - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[out] pDst points to the output vector - * @param[in] numSamples number of complex samples in each vector - */ - void arm_cmplx_mult_cmplx_f32( - const float32_t * pSrcA, - const float32_t * pSrcB, - float32_t * pDst, - uint32_t numSamples); - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _COMPLEX_MATH_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/controller_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/controller_functions.h deleted file mode 100755 index 1de68b4..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/controller_functions.h +++ /dev/null @@ -1,790 +0,0 @@ -/****************************************************************************** - * @file controller_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _CONTROLLER_FUNCTIONS_H_ -#define _CONTROLLER_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - - /** - * @brief Macros required for SINE and COSINE Controller functions - */ - -#define CONTROLLER_Q31_SHIFT (32 - 9) - /* 1.31(q31) Fixed value of 2/360 */ - /* -1 to +1 is divided into 360 values so total spacing is (2/360) */ -#define INPUT_SPACING 0xB60B61 - -/** - * @defgroup groupController Controller Functions - */ - - - /** - * @ingroup groupController - */ - - /** - * @addtogroup SinCos - * @{ - */ - -/** - * @brief Floating-point sin_cos function. - * @param[in] theta input value in degrees - * @param[out] pSinVal points to the processed sine output. - * @param[out] pCosVal points to the processed cos output. - */ - void arm_sin_cos_f32( - float32_t theta, - float32_t * pSinVal, - float32_t * pCosVal); - - - /** - * @brief Q31 sin_cos function. - * @param[in] theta scaled input value in degrees - * @param[out] pSinVal points to the processed sine output. - * @param[out] pCosVal points to the processed cosine output. - */ - void arm_sin_cos_q31( - q31_t theta, - q31_t * pSinVal, - q31_t * pCosVal); - - /** - * @} end of SinCos group - */ - - /** - * @ingroup groupController - */ - -/** - * @defgroup PID PID Motor Control - * - * A Proportional Integral Derivative (PID) controller is a generic feedback control - * loop mechanism widely used in industrial control systems. - * A PID controller is the most commonly used type of feedback controller. - * - * This set of functions implements (PID) controllers - * for Q15, Q31, and floating-point data types. The functions operate on a single sample - * of data and each call to the function returns a single processed value. - * S points to an instance of the PID control data structure. in - * is the input sample value. The functions return the output value. - * - * \par Algorithm: - *
-   *    y[n] = y[n-1] + A0 * x[n] + A1 * x[n-1] + A2 * x[n-2]
-   *    A0 = Kp + Ki + Kd
-   *    A1 = (-Kp ) - (2 * Kd )
-   *    A2 = Kd
-   * 
- * - * \par - * where \c Kp is proportional constant, \c Ki is Integral constant and \c Kd is Derivative constant - * - * \par - * \image html PID.gif "Proportional Integral Derivative Controller" - * - * \par - * The PID controller calculates an "error" value as the difference between - * the measured output and the reference input. - * The controller attempts to minimize the error by adjusting the process control inputs. - * The proportional value determines the reaction to the current error, - * the integral value determines the reaction based on the sum of recent errors, - * and the derivative value determines the reaction based on the rate at which the error has been changing. - * - * \par Instance Structure - * The Gains A0, A1, A2 and state variables for a PID controller are stored together in an instance data structure. - * A separate instance structure must be defined for each PID Controller. - * There are separate instance structure declarations for each of the 3 supported data types. - * - * \par Reset Functions - * There is also an associated reset function for each data type which clears the state array. - * - * \par Initialization Functions - * There is also an associated initialization function for each data type. - * The initialization function performs the following operations: - * - Initializes the Gains A0, A1, A2 from Kp,Ki, Kd gains. - * - Zeros out the values in the state buffer. - * - * \par - * Instance structure cannot be placed into a const data section and it is recommended to use the initialization function. - * - * \par Fixed-Point Behavior - * Care must be taken when using the fixed-point versions of the PID Controller functions. - * In particular, the overflow and saturation behavior of the accumulator used in each function must be considered. - * Refer to the function specific documentation below for usage guidelines. - */ - - - /** - * @brief Instance structure for the Q15 PID Control. - */ - typedef struct - { - q15_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */ -#if !defined (ARM_MATH_DSP) - q15_t A1; /**< The derived gain A1 = -Kp - 2Kd */ - q15_t A2; /**< The derived gain A1 = Kd. */ -#else - q31_t A1; /**< The derived gain A1 = -Kp - 2Kd | Kd.*/ -#endif - q15_t state[3]; /**< The state array of length 3. */ - q15_t Kp; /**< The proportional gain. */ - q15_t Ki; /**< The integral gain. */ - q15_t Kd; /**< The derivative gain. */ - } arm_pid_instance_q15; - - /** - * @brief Instance structure for the Q31 PID Control. - */ - typedef struct - { - q31_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */ - q31_t A1; /**< The derived gain, A1 = -Kp - 2Kd. */ - q31_t A2; /**< The derived gain, A2 = Kd . */ - q31_t state[3]; /**< The state array of length 3. */ - q31_t Kp; /**< The proportional gain. */ - q31_t Ki; /**< The integral gain. */ - q31_t Kd; /**< The derivative gain. */ - } arm_pid_instance_q31; - - /** - * @brief Instance structure for the floating-point PID Control. - */ - typedef struct - { - float32_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */ - float32_t A1; /**< The derived gain, A1 = -Kp - 2Kd. */ - float32_t A2; /**< The derived gain, A2 = Kd . */ - float32_t state[3]; /**< The state array of length 3. */ - float32_t Kp; /**< The proportional gain. */ - float32_t Ki; /**< The integral gain. */ - float32_t Kd; /**< The derivative gain. */ - } arm_pid_instance_f32; - - - - /** - * @brief Initialization function for the floating-point PID Control. - * @param[in,out] S points to an instance of the PID structure. - * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state. - */ - void arm_pid_init_f32( - arm_pid_instance_f32 * S, - int32_t resetStateFlag); - - - /** - * @brief Reset function for the floating-point PID Control. - * @param[in,out] S is an instance of the floating-point PID Control structure - */ - void arm_pid_reset_f32( - arm_pid_instance_f32 * S); - - - /** - * @brief Initialization function for the Q31 PID Control. - * @param[in,out] S points to an instance of the Q15 PID structure. - * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state. - */ - void arm_pid_init_q31( - arm_pid_instance_q31 * S, - int32_t resetStateFlag); - - - /** - * @brief Reset function for the Q31 PID Control. - * @param[in,out] S points to an instance of the Q31 PID Control structure - */ - - void arm_pid_reset_q31( - arm_pid_instance_q31 * S); - - - /** - * @brief Initialization function for the Q15 PID Control. - * @param[in,out] S points to an instance of the Q15 PID structure. - * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state. - */ - void arm_pid_init_q15( - arm_pid_instance_q15 * S, - int32_t resetStateFlag); - - - /** - * @brief Reset function for the Q15 PID Control. - * @param[in,out] S points to an instance of the q15 PID Control structure - */ - void arm_pid_reset_q15( - arm_pid_instance_q15 * S); - - - - /** - * @addtogroup PID - * @{ - */ - - /** - * @brief Process function for the floating-point PID Control. - * @param[in,out] S is an instance of the floating-point PID Control structure - * @param[in] in input sample to process - * @return processed output sample. - */ - __STATIC_FORCEINLINE float32_t arm_pid_f32( - arm_pid_instance_f32 * S, - float32_t in) - { - float32_t out; - - /* y[n] = y[n-1] + A0 * x[n] + A1 * x[n-1] + A2 * x[n-2] */ - out = (S->A0 * in) + - (S->A1 * S->state[0]) + (S->A2 * S->state[1]) + (S->state[2]); - - /* Update state */ - S->state[1] = S->state[0]; - S->state[0] = in; - S->state[2] = out; - - /* return to application */ - return (out); - - } - -/** - @brief Process function for the Q31 PID Control. - @param[in,out] S points to an instance of the Q31 PID Control structure - @param[in] in input sample to process - @return processed output sample. - - \par Scaling and Overflow Behavior - The function is implemented using an internal 64-bit accumulator. - The accumulator has a 2.62 format and maintains full precision of the intermediate multiplication results but provides only a single guard bit. - Thus, if the accumulator result overflows it wraps around rather than clip. - In order to avoid overflows completely the input signal must be scaled down by 2 bits as there are four additions. - After all multiply-accumulates are performed, the 2.62 accumulator is truncated to 1.32 format and then saturated to 1.31 format. - */ -__STATIC_FORCEINLINE q31_t arm_pid_q31( - arm_pid_instance_q31 * S, - q31_t in) - { - q63_t acc; - q31_t out; - - /* acc = A0 * x[n] */ - acc = (q63_t) S->A0 * in; - - /* acc += A1 * x[n-1] */ - acc += (q63_t) S->A1 * S->state[0]; - - /* acc += A2 * x[n-2] */ - acc += (q63_t) S->A2 * S->state[1]; - - /* convert output to 1.31 format to add y[n-1] */ - out = (q31_t) (acc >> 31U); - - /* out += y[n-1] */ - out += S->state[2]; - - /* Update state */ - S->state[1] = S->state[0]; - S->state[0] = in; - S->state[2] = out; - - /* return to application */ - return (out); - } - - -/** - @brief Process function for the Q15 PID Control. - @param[in,out] S points to an instance of the Q15 PID Control structure - @param[in] in input sample to process - @return processed output sample. - - \par Scaling and Overflow Behavior - The function is implemented using a 64-bit internal accumulator. - Both Gains and state variables are represented in 1.15 format and multiplications yield a 2.30 result. - The 2.30 intermediate results are accumulated in a 64-bit accumulator in 34.30 format. - There is no risk of internal overflow with this approach and the full precision of intermediate multiplications is preserved. - After all additions have been performed, the accumulator is truncated to 34.15 format by discarding low 15 bits. - Lastly, the accumulator is saturated to yield a result in 1.15 format. - */ -__STATIC_FORCEINLINE q15_t arm_pid_q15( - arm_pid_instance_q15 * S, - q15_t in) - { - q63_t acc; - q15_t out; - -#if defined (ARM_MATH_DSP) - /* Implementation of PID controller */ - - /* acc = A0 * x[n] */ - acc = (q31_t) __SMUAD((uint32_t)S->A0, (uint32_t)in); - - /* acc += A1 * x[n-1] + A2 * x[n-2] */ - acc = (q63_t)__SMLALD((uint32_t)S->A1, (uint32_t)read_q15x2 (S->state), (uint64_t)acc); -#else - /* acc = A0 * x[n] */ - acc = ((q31_t) S->A0) * in; - - /* acc += A1 * x[n-1] + A2 * x[n-2] */ - acc += (q31_t) S->A1 * S->state[0]; - acc += (q31_t) S->A2 * S->state[1]; -#endif - - /* acc += y[n-1] */ - acc += (q31_t) S->state[2] << 15; - - /* saturate the output */ - out = (q15_t) (__SSAT((q31_t)(acc >> 15), 16)); - - /* Update state */ - S->state[1] = S->state[0]; - S->state[0] = in; - S->state[2] = out; - - /* return to application */ - return (out); - } - - /** - * @} end of PID group - */ - - /** - * @ingroup groupController - */ - - /** - * @defgroup park Vector Park Transform - * - * Forward Park transform converts the input two-coordinate vector to flux and torque components. - * The Park transform can be used to realize the transformation of the Ialpha and the Ibeta currents - * from the stationary to the moving reference frame and control the spatial relationship between - * the stator vector current and rotor flux vector. - * If we consider the d axis aligned with the rotor flux, the diagram below shows the - * current vector and the relationship from the two reference frames: - * \image html park.gif "Stator current space vector and its component in (a,b) and in the d,q rotating reference frame" - * - * The function operates on a single sample of data and each call to the function returns the processed output. - * The library provides separate functions for Q31 and floating-point data types. - * \par Algorithm - * \image html parkFormula.gif - * where Ialpha and Ibeta are the stator vector components, - * pId and pIq are rotor vector components and cosVal and sinVal are the - * cosine and sine values of theta (rotor flux position). - * \par Fixed-Point Behavior - * Care must be taken when using the Q31 version of the Park transform. - * In particular, the overflow and saturation behavior of the accumulator used must be considered. - * Refer to the function specific documentation below for usage guidelines. - */ - - /** - * @addtogroup park - * @{ - */ - - /** - * @brief Floating-point Park transform - * @param[in] Ialpha input two-phase vector coordinate alpha - * @param[in] Ibeta input two-phase vector coordinate beta - * @param[out] pId points to output rotor reference frame d - * @param[out] pIq points to output rotor reference frame q - * @param[in] sinVal sine value of rotation angle theta - * @param[in] cosVal cosine value of rotation angle theta - * @return none - * - * The function implements the forward Park transform. - * - */ - __STATIC_FORCEINLINE void arm_park_f32( - float32_t Ialpha, - float32_t Ibeta, - float32_t * pId, - float32_t * pIq, - float32_t sinVal, - float32_t cosVal) - { - /* Calculate pId using the equation, pId = Ialpha * cosVal + Ibeta * sinVal */ - *pId = Ialpha * cosVal + Ibeta * sinVal; - - /* Calculate pIq using the equation, pIq = - Ialpha * sinVal + Ibeta * cosVal */ - *pIq = -Ialpha * sinVal + Ibeta * cosVal; - } - - -/** - @brief Park transform for Q31 version - @param[in] Ialpha input two-phase vector coordinate alpha - @param[in] Ibeta input two-phase vector coordinate beta - @param[out] pId points to output rotor reference frame d - @param[out] pIq points to output rotor reference frame q - @param[in] sinVal sine value of rotation angle theta - @param[in] cosVal cosine value of rotation angle theta - @return none - - \par Scaling and Overflow Behavior - The function is implemented using an internal 32-bit accumulator. - The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format. - There is saturation on the addition and subtraction, hence there is no risk of overflow. - */ -__STATIC_FORCEINLINE void arm_park_q31( - q31_t Ialpha, - q31_t Ibeta, - q31_t * pId, - q31_t * pIq, - q31_t sinVal, - q31_t cosVal) - { - q31_t product1, product2; /* Temporary variables used to store intermediate results */ - q31_t product3, product4; /* Temporary variables used to store intermediate results */ - - /* Intermediate product is calculated by (Ialpha * cosVal) */ - product1 = (q31_t) (((q63_t) (Ialpha) * (cosVal)) >> 31); - - /* Intermediate product is calculated by (Ibeta * sinVal) */ - product2 = (q31_t) (((q63_t) (Ibeta) * (sinVal)) >> 31); - - - /* Intermediate product is calculated by (Ialpha * sinVal) */ - product3 = (q31_t) (((q63_t) (Ialpha) * (sinVal)) >> 31); - - /* Intermediate product is calculated by (Ibeta * cosVal) */ - product4 = (q31_t) (((q63_t) (Ibeta) * (cosVal)) >> 31); - - /* Calculate pId by adding the two intermediate products 1 and 2 */ - *pId = __QADD(product1, product2); - - /* Calculate pIq by subtracting the two intermediate products 3 from 4 */ - *pIq = __QSUB(product4, product3); - } - - /** - * @} end of park group - */ - - - /** - * @ingroup groupController - */ - - /** - * @defgroup inv_park Vector Inverse Park transform - * Inverse Park transform converts the input flux and torque components to two-coordinate vector. - * - * The function operates on a single sample of data and each call to the function returns the processed output. - * The library provides separate functions for Q31 and floating-point data types. - * \par Algorithm - * \image html parkInvFormula.gif - * where pIalpha and pIbeta are the stator vector components, - * Id and Iq are rotor vector components and cosVal and sinVal are the - * cosine and sine values of theta (rotor flux position). - * \par Fixed-Point Behavior - * Care must be taken when using the Q31 version of the Park transform. - * In particular, the overflow and saturation behavior of the accumulator used must be considered. - * Refer to the function specific documentation below for usage guidelines. - */ - - /** - * @addtogroup inv_park - * @{ - */ - - /** - * @brief Floating-point Inverse Park transform - * @param[in] Id input coordinate of rotor reference frame d - * @param[in] Iq input coordinate of rotor reference frame q - * @param[out] pIalpha points to output two-phase orthogonal vector axis alpha - * @param[out] pIbeta points to output two-phase orthogonal vector axis beta - * @param[in] sinVal sine value of rotation angle theta - * @param[in] cosVal cosine value of rotation angle theta - * @return none - */ - __STATIC_FORCEINLINE void arm_inv_park_f32( - float32_t Id, - float32_t Iq, - float32_t * pIalpha, - float32_t * pIbeta, - float32_t sinVal, - float32_t cosVal) - { - /* Calculate pIalpha using the equation, pIalpha = Id * cosVal - Iq * sinVal */ - *pIalpha = Id * cosVal - Iq * sinVal; - - /* Calculate pIbeta using the equation, pIbeta = Id * sinVal + Iq * cosVal */ - *pIbeta = Id * sinVal + Iq * cosVal; - } - - -/** - @brief Inverse Park transform for Q31 version - @param[in] Id input coordinate of rotor reference frame d - @param[in] Iq input coordinate of rotor reference frame q - @param[out] pIalpha points to output two-phase orthogonal vector axis alpha - @param[out] pIbeta points to output two-phase orthogonal vector axis beta - @param[in] sinVal sine value of rotation angle theta - @param[in] cosVal cosine value of rotation angle theta - @return none - - @par Scaling and Overflow Behavior - The function is implemented using an internal 32-bit accumulator. - The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format. - There is saturation on the addition, hence there is no risk of overflow. - */ -__STATIC_FORCEINLINE void arm_inv_park_q31( - q31_t Id, - q31_t Iq, - q31_t * pIalpha, - q31_t * pIbeta, - q31_t sinVal, - q31_t cosVal) - { - q31_t product1, product2; /* Temporary variables used to store intermediate results */ - q31_t product3, product4; /* Temporary variables used to store intermediate results */ - - /* Intermediate product is calculated by (Id * cosVal) */ - product1 = (q31_t) (((q63_t) (Id) * (cosVal)) >> 31); - - /* Intermediate product is calculated by (Iq * sinVal) */ - product2 = (q31_t) (((q63_t) (Iq) * (sinVal)) >> 31); - - - /* Intermediate product is calculated by (Id * sinVal) */ - product3 = (q31_t) (((q63_t) (Id) * (sinVal)) >> 31); - - /* Intermediate product is calculated by (Iq * cosVal) */ - product4 = (q31_t) (((q63_t) (Iq) * (cosVal)) >> 31); - - /* Calculate pIalpha by using the two intermediate products 1 and 2 */ - *pIalpha = __QSUB(product1, product2); - - /* Calculate pIbeta by using the two intermediate products 3 and 4 */ - *pIbeta = __QADD(product4, product3); - } - - /** - * @} end of Inverse park group - */ - -/** - * @ingroup groupController - */ - - /** - * @defgroup clarke Vector Clarke Transform - * Forward Clarke transform converts the instantaneous stator phases into a two-coordinate time invariant vector. - * Generally the Clarke transform uses three-phase currents Ia, Ib and Ic to calculate currents - * in the two-phase orthogonal stator axis Ialpha and Ibeta. - * When Ialpha is superposed with Ia as shown in the figure below - * \image html clarke.gif Stator current space vector and its components in (a,b). - * and Ia + Ib + Ic = 0, in this condition Ialpha and Ibeta - * can be calculated using only Ia and Ib. - * - * The function operates on a single sample of data and each call to the function returns the processed output. - * The library provides separate functions for Q31 and floating-point data types. - * \par Algorithm - * \image html clarkeFormula.gif - * where Ia and Ib are the instantaneous stator phases and - * pIalpha and pIbeta are the two coordinates of time invariant vector. - * \par Fixed-Point Behavior - * Care must be taken when using the Q31 version of the Clarke transform. - * In particular, the overflow and saturation behavior of the accumulator used must be considered. - * Refer to the function specific documentation below for usage guidelines. - */ - - /** - * @addtogroup clarke - * @{ - */ - - /** - * - * @brief Floating-point Clarke transform - * @param[in] Ia input three-phase coordinate a - * @param[in] Ib input three-phase coordinate b - * @param[out] pIalpha points to output two-phase orthogonal vector axis alpha - * @param[out] pIbeta points to output two-phase orthogonal vector axis beta - * @return none - */ - __STATIC_FORCEINLINE void arm_clarke_f32( - float32_t Ia, - float32_t Ib, - float32_t * pIalpha, - float32_t * pIbeta) - { - /* Calculate pIalpha using the equation, pIalpha = Ia */ - *pIalpha = Ia; - - /* Calculate pIbeta using the equation, pIbeta = (1/sqrt(3)) * Ia + (2/sqrt(3)) * Ib */ - *pIbeta = (0.57735026919f * Ia + 1.15470053838f * Ib); - } - - -/** - @brief Clarke transform for Q31 version - @param[in] Ia input three-phase coordinate a - @param[in] Ib input three-phase coordinate b - @param[out] pIalpha points to output two-phase orthogonal vector axis alpha - @param[out] pIbeta points to output two-phase orthogonal vector axis beta - @return none - - \par Scaling and Overflow Behavior - The function is implemented using an internal 32-bit accumulator. - The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format. - There is saturation on the addition, hence there is no risk of overflow. - */ -__STATIC_FORCEINLINE void arm_clarke_q31( - q31_t Ia, - q31_t Ib, - q31_t * pIalpha, - q31_t * pIbeta) - { - q31_t product1, product2; /* Temporary variables used to store intermediate results */ - - /* Calculating pIalpha from Ia by equation pIalpha = Ia */ - *pIalpha = Ia; - - /* Intermediate product is calculated by (1/(sqrt(3)) * Ia) */ - product1 = (q31_t) (((q63_t) Ia * 0x24F34E8B) >> 30); - - /* Intermediate product is calculated by (2/sqrt(3) * Ib) */ - product2 = (q31_t) (((q63_t) Ib * 0x49E69D16) >> 30); - - /* pIbeta is calculated by adding the intermediate products */ - *pIbeta = __QADD(product1, product2); - } - - /** - * @} end of clarke group - */ - - - /** - * @ingroup groupController - */ - - /** - * @defgroup inv_clarke Vector Inverse Clarke Transform - * Inverse Clarke transform converts the two-coordinate time invariant vector into instantaneous stator phases. - * - * The function operates on a single sample of data and each call to the function returns the processed output. - * The library provides separate functions for Q31 and floating-point data types. - * \par Algorithm - * \image html clarkeInvFormula.gif - * where pIa and pIb are the instantaneous stator phases and - * Ialpha and Ibeta are the two coordinates of time invariant vector. - * \par Fixed-Point Behavior - * Care must be taken when using the Q31 version of the Clarke transform. - * In particular, the overflow and saturation behavior of the accumulator used must be considered. - * Refer to the function specific documentation below for usage guidelines. - */ - - /** - * @addtogroup inv_clarke - * @{ - */ - - /** - * @brief Floating-point Inverse Clarke transform - * @param[in] Ialpha input two-phase orthogonal vector axis alpha - * @param[in] Ibeta input two-phase orthogonal vector axis beta - * @param[out] pIa points to output three-phase coordinate a - * @param[out] pIb points to output three-phase coordinate b - * @return none - */ - __STATIC_FORCEINLINE void arm_inv_clarke_f32( - float32_t Ialpha, - float32_t Ibeta, - float32_t * pIa, - float32_t * pIb) - { - /* Calculating pIa from Ialpha by equation pIa = Ialpha */ - *pIa = Ialpha; - - /* Calculating pIb from Ialpha and Ibeta by equation pIb = -(1/2) * Ialpha + (sqrt(3)/2) * Ibeta */ - *pIb = -0.5f * Ialpha + 0.8660254039f * Ibeta; - } - - -/** - @brief Inverse Clarke transform for Q31 version - @param[in] Ialpha input two-phase orthogonal vector axis alpha - @param[in] Ibeta input two-phase orthogonal vector axis beta - @param[out] pIa points to output three-phase coordinate a - @param[out] pIb points to output three-phase coordinate b - @return none - - \par Scaling and Overflow Behavior - The function is implemented using an internal 32-bit accumulator. - The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format. - There is saturation on the subtraction, hence there is no risk of overflow. - */ -__STATIC_FORCEINLINE void arm_inv_clarke_q31( - q31_t Ialpha, - q31_t Ibeta, - q31_t * pIa, - q31_t * pIb) - { - q31_t product1, product2; /* Temporary variables used to store intermediate results */ - - /* Calculating pIa from Ialpha by equation pIa = Ialpha */ - *pIa = Ialpha; - - /* Intermediate product is calculated by (1/(2*sqrt(3)) * Ia) */ - product1 = (q31_t) (((q63_t) (Ialpha) * (0x40000000)) >> 31); - - /* Intermediate product is calculated by (1/sqrt(3) * pIb) */ - product2 = (q31_t) (((q63_t) (Ibeta) * (0x6ED9EBA1)) >> 31); - - /* pIb is calculated by subtracting the products */ - *pIb = __QSUB(product2, product1); - } - - /** - * @} end of inv_clarke group - */ - - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _CONTROLLER_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/distance_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/distance_functions.h deleted file mode 100755 index d58c6c0..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/distance_functions.h +++ /dev/null @@ -1,296 +0,0 @@ -/****************************************************************************** - * @file distance_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _DISTANCE_FUNCTIONS_H_ -#define _DISTANCE_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#include "dsp/statistics_functions.h" -#include "dsp/basic_math_functions.h" -#include "dsp/fast_math_functions.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - - -/** - * @defgroup groupDistance Distance functions - * - * Distance functions for use with clustering algorithms. - * There are distance functions for float vectors and boolean vectors. - * - */ - -/* 6.14 bug */ -#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) && (__ARMCC_VERSION < 6150001) - -__attribute__((weak)) float __powisf2(float a, int b); - -#endif - -/** - * @brief Euclidean distance between two vectors - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] blockSize vector length - * @return distance - * - */ - -float32_t arm_euclidean_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); - -/** - * @brief Bray-Curtis distance between two vectors - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] blockSize vector length - * @return distance - * - */ -float32_t arm_braycurtis_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); - -/** - * @brief Canberra distance between two vectors - * - * This function may divide by zero when samples pA[i] and pB[i] are both zero. - * The result of the computation will be correct. So the division per zero may be - * ignored. - * - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] blockSize vector length - * @return distance - * - */ -float32_t arm_canberra_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); - - -/** - * @brief Chebyshev distance between two vectors - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] blockSize vector length - * @return distance - * - */ -float32_t arm_chebyshev_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); - - -/** - * @brief Cityblock (Manhattan) distance between two vectors - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] blockSize vector length - * @return distance - * - */ -float32_t arm_cityblock_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); - -/** - * @brief Correlation distance between two vectors - * - * The input vectors are modified in place ! - * - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] blockSize vector length - * @return distance - * - */ -float32_t arm_correlation_distance_f32(float32_t *pA,float32_t *pB, uint32_t blockSize); - -/** - * @brief Cosine distance between two vectors - * - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] blockSize vector length - * @return distance - * - */ - -float32_t arm_cosine_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); - -/** - * @brief Jensen-Shannon distance between two vectors - * - * This function is assuming that elements of second vector are > 0 - * and 0 only when the corresponding element of first vector is 0. - * Otherwise the result of the computation does not make sense - * and for speed reasons, the cases returning NaN or Infinity are not - * managed. - * - * When the function is computing x log (x / y) with x 0 and y 0, - * it will compute the right value (0) but a division per zero will occur - * and shoudl be ignored in client code. - * - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] blockSize vector length - * @return distance - * - */ - -float32_t arm_jensenshannon_distance_f32(const float32_t *pA,const float32_t *pB,uint32_t blockSize); - -/** - * @brief Minkowski distance between two vectors - * - * @param[in] pA First vector - * @param[in] pB Second vector - * @param[in] n Norm order (>= 2) - * @param[in] blockSize vector length - * @return distance - * - */ - - - -float32_t arm_minkowski_distance_f32(const float32_t *pA,const float32_t *pB, int32_t order, uint32_t blockSize); - -/** - * @brief Dice distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] order Distance order - * @param[in] blockSize Number of samples - * @return distance - * - */ - - -float32_t arm_dice_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - -/** - * @brief Hamming distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] numberOfBools Number of booleans - * @return distance - * - */ - -float32_t arm_hamming_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - -/** - * @brief Jaccard distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] numberOfBools Number of booleans - * @return distance - * - */ - -float32_t arm_jaccard_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - -/** - * @brief Kulsinski distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] numberOfBools Number of booleans - * @return distance - * - */ - -float32_t arm_kulsinski_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - -/** - * @brief Roger Stanimoto distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] numberOfBools Number of booleans - * @return distance - * - */ - -float32_t arm_rogerstanimoto_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - -/** - * @brief Russell-Rao distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] numberOfBools Number of booleans - * @return distance - * - */ - -float32_t arm_russellrao_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - -/** - * @brief Sokal-Michener distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] numberOfBools Number of booleans - * @return distance - * - */ - -float32_t arm_sokalmichener_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - -/** - * @brief Sokal-Sneath distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] numberOfBools Number of booleans - * @return distance - * - */ - -float32_t arm_sokalsneath_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - -/** - * @brief Yule distance between two vectors - * - * @param[in] pA First vector of packed booleans - * @param[in] pB Second vector of packed booleans - * @param[in] numberOfBools Number of booleans - * @return distance - * - */ - -float32_t arm_yule_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _DISTANCE_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/fast_math_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/fast_math_functions.h deleted file mode 100755 index 4de7159..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/fast_math_functions.h +++ /dev/null @@ -1,287 +0,0 @@ -/****************************************************************************** - * @file fast_math_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _FAST_MATH_FUNCTIONS_H_ -#define _FAST_MATH_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - - /** - * @brief Macros required for SINE and COSINE Fast math approximations - */ - -#define FAST_MATH_TABLE_SIZE 512 -#define FAST_MATH_Q31_SHIFT (32 - 10) -#define FAST_MATH_Q15_SHIFT (16 - 10) - -#ifndef PI - #define PI 3.14159265358979f -#endif - - -/** - * @defgroup groupFastMath Fast Math Functions - * This set of functions provides a fast approximation to sine, cosine, and square root. - * As compared to most of the other functions in the CMSIS math library, the fast math functions - * operate on individual values and not arrays. - * There are separate functions for Q15, Q31, and floating-point data. - * - */ - - /** - * @ingroup groupFastMath - */ - - -/** - @addtogroup sin - @{ - */ - -/** - * @brief Fast approximation to the trigonometric sine function for floating-point data. - * @param[in] x input value in radians. - * @return sin(x). - */ - float32_t arm_sin_f32( - float32_t x); - - - /** - * @brief Fast approximation to the trigonometric sine function for Q31 data. - * @param[in] x Scaled input value in radians. - * @return sin(x). - */ - q31_t arm_sin_q31( - q31_t x); - - - /** - * @brief Fast approximation to the trigonometric sine function for Q15 data. - * @param[in] x Scaled input value in radians. - * @return sin(x). - */ - q15_t arm_sin_q15( - q15_t x); - -/** - @} end of sin group - */ - -/** - @addtogroup cos - @{ - */ - - /** - * @brief Fast approximation to the trigonometric cosine function for floating-point data. - * @param[in] x input value in radians. - * @return cos(x). - */ - float32_t arm_cos_f32( - float32_t x); - - - /** - * @brief Fast approximation to the trigonometric cosine function for Q31 data. - * @param[in] x Scaled input value in radians. - * @return cos(x). - */ - q31_t arm_cos_q31( - q31_t x); - - - /** - * @brief Fast approximation to the trigonometric cosine function for Q15 data. - * @param[in] x Scaled input value in radians. - * @return cos(x). - */ - q15_t arm_cos_q15( - q15_t x); - -/** - @} end of cos group - */ - - -/** - @brief Floating-point vector of log values. - @param[in] pSrc points to the input vector - @param[out] pDst points to the output vector - @param[in] blockSize number of samples in each vector - @return none - */ - void arm_vlog_f32( - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - -/** - @brief Floating-point vector of exp values. - @param[in] pSrc points to the input vector - @param[out] pDst points to the output vector - @param[in] blockSize number of samples in each vector - @return none - */ - void arm_vexp_f32( - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - /** - * @defgroup SQRT Square Root - * - * Computes the square root of a number. - * There are separate functions for Q15, Q31, and floating-point data types. - * The square root function is computed using the Newton-Raphson algorithm. - * This is an iterative algorithm of the form: - *
-   *      x1 = x0 - f(x0)/f'(x0)
-   * 
- * where x1 is the current estimate, - * x0 is the previous estimate, and - * f'(x0) is the derivative of f() evaluated at x0. - * For the square root function, the algorithm reduces to: - *
-   *     x0 = in/2                         [initial guess]
-   *     x1 = 1/2 * ( x0 + in / x0)        [each iteration]
-   * 
- */ - - - /** - * @addtogroup SQRT - * @{ - */ - -/** - @brief Floating-point square root function. - @param[in] in input value - @param[out] pOut square root of input value - @return execution status - - \ref ARM_MATH_SUCCESS : input value is positive - - \ref ARM_MATH_ARGUMENT_ERROR : input value is negative; *pOut is set to 0 - */ -__STATIC_FORCEINLINE arm_status arm_sqrt_f32( - float32_t in, - float32_t * pOut) - { - if (in >= 0.0f) - { -#if defined ( __CC_ARM ) - #if defined __TARGET_FPU_VFP - *pOut = __sqrtf(in); - #else - *pOut = sqrtf(in); - #endif - -#elif defined ( __ICCARM__ ) - #if defined __ARMVFP__ - __ASM("VSQRT.F32 %0,%1" : "=t"(*pOut) : "t"(in)); - #else - *pOut = sqrtf(in); - #endif - -#else - *pOut = sqrtf(in); -#endif - - return (ARM_MATH_SUCCESS); - } - else - { - *pOut = 0.0f; - return (ARM_MATH_ARGUMENT_ERROR); - } - } - - -/** - @brief Q31 square root function. - @param[in] in input value. The range of the input value is [0 +1) or 0x00000000 to 0x7FFFFFFF - @param[out] pOut points to square root of input value - @return execution status - - \ref ARM_MATH_SUCCESS : input value is positive - - \ref ARM_MATH_ARGUMENT_ERROR : input value is negative; *pOut is set to 0 - */ -arm_status arm_sqrt_q31( - q31_t in, - q31_t * pOut); - - -/** - @brief Q15 square root function. - @param[in] in input value. The range of the input value is [0 +1) or 0x0000 to 0x7FFF - @param[out] pOut points to square root of input value - @return execution status - - \ref ARM_MATH_SUCCESS : input value is positive - - \ref ARM_MATH_ARGUMENT_ERROR : input value is negative; *pOut is set to 0 - */ -arm_status arm_sqrt_q15( - q15_t in, - q15_t * pOut); - - /** - * @brief Vector Floating-point square root function. - * @param[in] pIn input vector. - * @param[out] pOut vector of square roots of input elements. - * @param[in] len length of input vector. - * @return The function returns ARM_MATH_SUCCESS if input value is positive value or ARM_MATH_ARGUMENT_ERROR if - * in is negative value and returns zero output for negative values. - */ - void arm_vsqrt_f32( - float32_t * pIn, - float32_t * pOut, - uint16_t len); - - void arm_vsqrt_q31( - q31_t * pIn, - q31_t * pOut, - uint16_t len); - - void arm_vsqrt_q15( - q15_t * pIn, - q15_t * pOut, - uint16_t len); - - /** - * @} end of SQRT group - */ - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _FAST_MATH_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/filtering_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/filtering_functions.h deleted file mode 100755 index 0cbd7cf..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/filtering_functions.h +++ /dev/null @@ -1,2439 +0,0 @@ -/****************************************************************************** - * @file filtering_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _FILTERING_FUNCTIONS_H_ -#define _FILTERING_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#include "dsp/support_functions.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -#define DELTA_Q31 ((q31_t)(0x100)) -#define DELTA_Q15 ((q15_t)0x5) - -/** - * @defgroup groupFilters Filtering Functions - */ - - /** - * @brief Instance structure for the Q7 FIR filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of filter coefficients in the filter. */ - q7_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - const q7_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - } arm_fir_instance_q7; - - /** - * @brief Instance structure for the Q15 FIR filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of filter coefficients in the filter. */ - q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - } arm_fir_instance_q15; - - /** - * @brief Instance structure for the Q31 FIR filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of filter coefficients in the filter. */ - q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ - } arm_fir_instance_q31; - - /** - * @brief Instance structure for the floating-point FIR filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of filter coefficients in the filter. */ - float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ - } arm_fir_instance_f32; - - /** - * @brief Processing function for the Q7 FIR filter. - * @param[in] S points to an instance of the Q7 FIR filter structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_fir_q7( - const arm_fir_instance_q7 * S, - const q7_t * pSrc, - q7_t * pDst, - uint32_t blockSize); - - /** - * @brief Initialization function for the Q7 FIR filter. - * @param[in,out] S points to an instance of the Q7 FIR structure. - * @param[in] numTaps Number of filter coefficients in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of samples that are processed. - * - * For the MVE version, the coefficient length must be a multiple of 16. - * You can pad with zeros if you have less coefficients. - */ - void arm_fir_init_q7( - arm_fir_instance_q7 * S, - uint16_t numTaps, - const q7_t * pCoeffs, - q7_t * pState, - uint32_t blockSize); - - /** - * @brief Processing function for the Q15 FIR filter. - * @param[in] S points to an instance of the Q15 FIR structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_fir_q15( - const arm_fir_instance_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - /** - * @brief Processing function for the fast Q15 FIR filter (fast version). - * @param[in] S points to an instance of the Q15 FIR filter structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_fir_fast_q15( - const arm_fir_instance_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - /** - * @brief Initialization function for the Q15 FIR filter. - * @param[in,out] S points to an instance of the Q15 FIR filter structure. - * @param[in] numTaps Number of filter coefficients in the filter. Must be even and greater than or equal to 4. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of samples that are processed at a time. - * @return The function returns either - * ARM_MATH_SUCCESS if initialization was successful or - * ARM_MATH_ARGUMENT_ERROR if numTaps is not a supported value. - * - * For the MVE version, the coefficient length must be a multiple of 8. - * You can pad with zeros if you have less coefficients. - * - */ - arm_status arm_fir_init_q15( - arm_fir_instance_q15 * S, - uint16_t numTaps, - const q15_t * pCoeffs, - q15_t * pState, - uint32_t blockSize); - - /** - * @brief Processing function for the Q31 FIR filter. - * @param[in] S points to an instance of the Q31 FIR filter structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_fir_q31( - const arm_fir_instance_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - /** - * @brief Processing function for the fast Q31 FIR filter (fast version). - * @param[in] S points to an instance of the Q31 FIR filter structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_fir_fast_q31( - const arm_fir_instance_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - /** - * @brief Initialization function for the Q31 FIR filter. - * @param[in,out] S points to an instance of the Q31 FIR structure. - * @param[in] numTaps Number of filter coefficients in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of samples that are processed at a time. - * - * For the MVE version, the coefficient length must be a multiple of 4. - * You can pad with zeros if you have less coefficients. - */ - void arm_fir_init_q31( - arm_fir_instance_q31 * S, - uint16_t numTaps, - const q31_t * pCoeffs, - q31_t * pState, - uint32_t blockSize); - - /** - * @brief Processing function for the floating-point FIR filter. - * @param[in] S points to an instance of the floating-point FIR structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_fir_f32( - const arm_fir_instance_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - /** - * @brief Initialization function for the floating-point FIR filter. - * @param[in,out] S points to an instance of the floating-point FIR filter structure. - * @param[in] numTaps Number of filter coefficients in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of samples that are processed at a time. - */ - void arm_fir_init_f32( - arm_fir_instance_f32 * S, - uint16_t numTaps, - const float32_t * pCoeffs, - float32_t * pState, - uint32_t blockSize); - - /** - * @brief Instance structure for the Q15 Biquad cascade filter. - */ - typedef struct - { - int8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ - q15_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */ - const q15_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */ - int8_t postShift; /**< Additional shift, in bits, applied to each output sample. */ - } arm_biquad_casd_df1_inst_q15; - - /** - * @brief Instance structure for the Q31 Biquad cascade filter. - */ - typedef struct - { - uint32_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ - q31_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */ - const q31_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */ - uint8_t postShift; /**< Additional shift, in bits, applied to each output sample. */ - } arm_biquad_casd_df1_inst_q31; - - /** - * @brief Instance structure for the floating-point Biquad cascade filter. - */ - typedef struct - { - uint32_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ - float32_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */ - const float32_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */ - } arm_biquad_casd_df1_inst_f32; - -#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) - /** - * @brief Instance structure for the modified Biquad coefs required by vectorized code. - */ - typedef struct - { - float32_t coeffs[8][4]; /**< Points to the array of modified coefficients. The array is of length 32. There is one per stage */ - } arm_biquad_mod_coef_f32; -#endif - - /** - * @brief Processing function for the Q15 Biquad cascade filter. - * @param[in] S points to an instance of the Q15 Biquad cascade structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cascade_df1_q15( - const arm_biquad_casd_df1_inst_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - /** - * @brief Initialization function for the Q15 Biquad cascade filter. - * @param[in,out] S points to an instance of the Q15 Biquad cascade structure. - * @param[in] numStages number of 2nd order stages in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] postShift Shift to be applied to the output. Varies according to the coefficients format - */ - void arm_biquad_cascade_df1_init_q15( - arm_biquad_casd_df1_inst_q15 * S, - uint8_t numStages, - const q15_t * pCoeffs, - q15_t * pState, - int8_t postShift); - - /** - * @brief Fast but less precise processing function for the Q15 Biquad cascade filter for Cortex-M3 and Cortex-M4. - * @param[in] S points to an instance of the Q15 Biquad cascade structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cascade_df1_fast_q15( - const arm_biquad_casd_df1_inst_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - /** - * @brief Processing function for the Q31 Biquad cascade filter - * @param[in] S points to an instance of the Q31 Biquad cascade structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cascade_df1_q31( - const arm_biquad_casd_df1_inst_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - /** - * @brief Fast but less precise processing function for the Q31 Biquad cascade filter for Cortex-M3 and Cortex-M4. - * @param[in] S points to an instance of the Q31 Biquad cascade structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cascade_df1_fast_q31( - const arm_biquad_casd_df1_inst_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - /** - * @brief Initialization function for the Q31 Biquad cascade filter. - * @param[in,out] S points to an instance of the Q31 Biquad cascade structure. - * @param[in] numStages number of 2nd order stages in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] postShift Shift to be applied to the output. Varies according to the coefficients format - */ - void arm_biquad_cascade_df1_init_q31( - arm_biquad_casd_df1_inst_q31 * S, - uint8_t numStages, - const q31_t * pCoeffs, - q31_t * pState, - int8_t postShift); - - /** - * @brief Processing function for the floating-point Biquad cascade filter. - * @param[in] S points to an instance of the floating-point Biquad cascade structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cascade_df1_f32( - const arm_biquad_casd_df1_inst_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - /** - * @brief Initialization function for the floating-point Biquad cascade filter. - * @param[in,out] S points to an instance of the floating-point Biquad cascade structure. - * @param[in] numStages number of 2nd order stages in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pCoeffsMod points to the modified filter coefficients (only MVE version). - * @param[in] pState points to the state buffer. - */ -#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) - void arm_biquad_cascade_df1_mve_init_f32( - arm_biquad_casd_df1_inst_f32 * S, - uint8_t numStages, - const float32_t * pCoeffs, - arm_biquad_mod_coef_f32 * pCoeffsMod, - float32_t * pState); -#endif - - void arm_biquad_cascade_df1_init_f32( - arm_biquad_casd_df1_inst_f32 * S, - uint8_t numStages, - const float32_t * pCoeffs, - float32_t * pState); - - -/** - * @brief Convolution of floating-point sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the location where the output result is written. Length srcALen+srcBLen-1. - */ - void arm_conv_f32( - const float32_t * pSrcA, - uint32_t srcALen, - const float32_t * pSrcB, - uint32_t srcBLen, - float32_t * pDst); - - - /** - * @brief Convolution of Q15 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. - * @param[in] pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. - * @param[in] pScratch2 points to scratch buffer of size min(srcALen, srcBLen). - */ - void arm_conv_opt_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst, - q15_t * pScratch1, - q15_t * pScratch2); - - -/** - * @brief Convolution of Q15 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the location where the output result is written. Length srcALen+srcBLen-1. - */ - void arm_conv_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst); - - - /** - * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4 - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. - */ - void arm_conv_fast_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst); - - - /** - * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4 - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. - * @param[in] pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. - * @param[in] pScratch2 points to scratch buffer of size min(srcALen, srcBLen). - */ - void arm_conv_fast_opt_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst, - q15_t * pScratch1, - q15_t * pScratch2); - - - /** - * @brief Convolution of Q31 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. - */ - void arm_conv_q31( - const q31_t * pSrcA, - uint32_t srcALen, - const q31_t * pSrcB, - uint32_t srcBLen, - q31_t * pDst); - - - /** - * @brief Convolution of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4 - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. - */ - void arm_conv_fast_q31( - const q31_t * pSrcA, - uint32_t srcALen, - const q31_t * pSrcB, - uint32_t srcBLen, - q31_t * pDst); - - - /** - * @brief Convolution of Q7 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. - * @param[in] pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. - * @param[in] pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen). - */ - void arm_conv_opt_q7( - const q7_t * pSrcA, - uint32_t srcALen, - const q7_t * pSrcB, - uint32_t srcBLen, - q7_t * pDst, - q15_t * pScratch1, - q15_t * pScratch2); - - - /** - * @brief Convolution of Q7 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. - */ - void arm_conv_q7( - const q7_t * pSrcA, - uint32_t srcALen, - const q7_t * pSrcB, - uint32_t srcBLen, - q7_t * pDst); - - - /** - * @brief Partial convolution of floating-point sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_f32( - const float32_t * pSrcA, - uint32_t srcALen, - const float32_t * pSrcB, - uint32_t srcBLen, - float32_t * pDst, - uint32_t firstIndex, - uint32_t numPoints); - - - /** - * @brief Partial convolution of Q15 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @param[in] pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. - * @param[in] pScratch2 points to scratch buffer of size min(srcALen, srcBLen). - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_opt_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst, - uint32_t firstIndex, - uint32_t numPoints, - q15_t * pScratch1, - q15_t * pScratch2); - - - /** - * @brief Partial convolution of Q15 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst, - uint32_t firstIndex, - uint32_t numPoints); - - - /** - * @brief Partial convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4 - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_fast_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst, - uint32_t firstIndex, - uint32_t numPoints); - - - /** - * @brief Partial convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4 - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @param[in] pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. - * @param[in] pScratch2 points to scratch buffer of size min(srcALen, srcBLen). - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_fast_opt_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst, - uint32_t firstIndex, - uint32_t numPoints, - q15_t * pScratch1, - q15_t * pScratch2); - - - /** - * @brief Partial convolution of Q31 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_q31( - const q31_t * pSrcA, - uint32_t srcALen, - const q31_t * pSrcB, - uint32_t srcBLen, - q31_t * pDst, - uint32_t firstIndex, - uint32_t numPoints); - - - /** - * @brief Partial convolution of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4 - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_fast_q31( - const q31_t * pSrcA, - uint32_t srcALen, - const q31_t * pSrcB, - uint32_t srcBLen, - q31_t * pDst, - uint32_t firstIndex, - uint32_t numPoints); - - - /** - * @brief Partial convolution of Q7 sequences - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @param[in] pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. - * @param[in] pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen). - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_opt_q7( - const q7_t * pSrcA, - uint32_t srcALen, - const q7_t * pSrcB, - uint32_t srcBLen, - q7_t * pDst, - uint32_t firstIndex, - uint32_t numPoints, - q15_t * pScratch1, - q15_t * pScratch2); - - -/** - * @brief Partial convolution of Q7 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data - * @param[in] firstIndex is the first output sample to start with. - * @param[in] numPoints is the number of output points to be computed. - * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. - */ - arm_status arm_conv_partial_q7( - const q7_t * pSrcA, - uint32_t srcALen, - const q7_t * pSrcB, - uint32_t srcBLen, - q7_t * pDst, - uint32_t firstIndex, - uint32_t numPoints); - - - /** - * @brief Instance structure for the Q15 FIR decimator. - */ - typedef struct - { - uint8_t M; /**< decimation factor. */ - uint16_t numTaps; /**< number of coefficients in the filter. */ - const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - } arm_fir_decimate_instance_q15; - - /** - * @brief Instance structure for the Q31 FIR decimator. - */ - typedef struct - { - uint8_t M; /**< decimation factor. */ - uint16_t numTaps; /**< number of coefficients in the filter. */ - const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - } arm_fir_decimate_instance_q31; - -/** - @brief Instance structure for floating-point FIR decimator. - */ -typedef struct - { - uint8_t M; /**< decimation factor. */ - uint16_t numTaps; /**< number of coefficients in the filter. */ - const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - } arm_fir_decimate_instance_f32; - - -/** - @brief Processing function for floating-point FIR decimator. - @param[in] S points to an instance of the floating-point FIR decimator structure - @param[in] pSrc points to the block of input data - @param[out] pDst points to the block of output data - @param[in] blockSize number of samples to process - */ -void arm_fir_decimate_f32( - const arm_fir_decimate_instance_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - -/** - @brief Initialization function for the floating-point FIR decimator. - @param[in,out] S points to an instance of the floating-point FIR decimator structure - @param[in] numTaps number of coefficients in the filter - @param[in] M decimation factor - @param[in] pCoeffs points to the filter coefficients - @param[in] pState points to the state buffer - @param[in] blockSize number of input samples to process per call - @return execution status - - \ref ARM_MATH_SUCCESS : Operation successful - - \ref ARM_MATH_LENGTH_ERROR : blockSize is not a multiple of M - */ -arm_status arm_fir_decimate_init_f32( - arm_fir_decimate_instance_f32 * S, - uint16_t numTaps, - uint8_t M, - const float32_t * pCoeffs, - float32_t * pState, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q15 FIR decimator. - * @param[in] S points to an instance of the Q15 FIR decimator structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_decimate_q15( - const arm_fir_decimate_instance_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q15 FIR decimator (fast variant) for Cortex-M3 and Cortex-M4. - * @param[in] S points to an instance of the Q15 FIR decimator structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_decimate_fast_q15( - const arm_fir_decimate_instance_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q15 FIR decimator. - * @param[in,out] S points to an instance of the Q15 FIR decimator structure. - * @param[in] numTaps number of coefficients in the filter. - * @param[in] M decimation factor. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of input samples to process per call. - * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if - * blockSize is not a multiple of M. - */ - arm_status arm_fir_decimate_init_q15( - arm_fir_decimate_instance_q15 * S, - uint16_t numTaps, - uint8_t M, - const q15_t * pCoeffs, - q15_t * pState, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q31 FIR decimator. - * @param[in] S points to an instance of the Q31 FIR decimator structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_decimate_q31( - const arm_fir_decimate_instance_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - /** - * @brief Processing function for the Q31 FIR decimator (fast variant) for Cortex-M3 and Cortex-M4. - * @param[in] S points to an instance of the Q31 FIR decimator structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_decimate_fast_q31( - const arm_fir_decimate_instance_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q31 FIR decimator. - * @param[in,out] S points to an instance of the Q31 FIR decimator structure. - * @param[in] numTaps number of coefficients in the filter. - * @param[in] M decimation factor. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of input samples to process per call. - * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if - * blockSize is not a multiple of M. - */ - arm_status arm_fir_decimate_init_q31( - arm_fir_decimate_instance_q31 * S, - uint16_t numTaps, - uint8_t M, - const q31_t * pCoeffs, - q31_t * pState, - uint32_t blockSize); - - - /** - * @brief Instance structure for the Q15 FIR interpolator. - */ - typedef struct - { - uint8_t L; /**< upsample factor. */ - uint16_t phaseLength; /**< length of each polyphase filter component. */ - const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */ - q15_t *pState; /**< points to the state variable array. The array is of length blockSize+phaseLength-1. */ - } arm_fir_interpolate_instance_q15; - - /** - * @brief Instance structure for the Q31 FIR interpolator. - */ - typedef struct - { - uint8_t L; /**< upsample factor. */ - uint16_t phaseLength; /**< length of each polyphase filter component. */ - const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */ - q31_t *pState; /**< points to the state variable array. The array is of length blockSize+phaseLength-1. */ - } arm_fir_interpolate_instance_q31; - - /** - * @brief Instance structure for the floating-point FIR interpolator. - */ - typedef struct - { - uint8_t L; /**< upsample factor. */ - uint16_t phaseLength; /**< length of each polyphase filter component. */ - const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */ - float32_t *pState; /**< points to the state variable array. The array is of length phaseLength+numTaps-1. */ - } arm_fir_interpolate_instance_f32; - - - /** - * @brief Processing function for the Q15 FIR interpolator. - * @param[in] S points to an instance of the Q15 FIR interpolator structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_interpolate_q15( - const arm_fir_interpolate_instance_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q15 FIR interpolator. - * @param[in,out] S points to an instance of the Q15 FIR interpolator structure. - * @param[in] L upsample factor. - * @param[in] numTaps number of filter coefficients in the filter. - * @param[in] pCoeffs points to the filter coefficient buffer. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of input samples to process per call. - * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if - * the filter length numTaps is not a multiple of the interpolation factor L. - */ - arm_status arm_fir_interpolate_init_q15( - arm_fir_interpolate_instance_q15 * S, - uint8_t L, - uint16_t numTaps, - const q15_t * pCoeffs, - q15_t * pState, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q31 FIR interpolator. - * @param[in] S points to an instance of the Q15 FIR interpolator structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_interpolate_q31( - const arm_fir_interpolate_instance_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q31 FIR interpolator. - * @param[in,out] S points to an instance of the Q31 FIR interpolator structure. - * @param[in] L upsample factor. - * @param[in] numTaps number of filter coefficients in the filter. - * @param[in] pCoeffs points to the filter coefficient buffer. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of input samples to process per call. - * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if - * the filter length numTaps is not a multiple of the interpolation factor L. - */ - arm_status arm_fir_interpolate_init_q31( - arm_fir_interpolate_instance_q31 * S, - uint8_t L, - uint16_t numTaps, - const q31_t * pCoeffs, - q31_t * pState, - uint32_t blockSize); - - - /** - * @brief Processing function for the floating-point FIR interpolator. - * @param[in] S points to an instance of the floating-point FIR interpolator structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_interpolate_f32( - const arm_fir_interpolate_instance_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Initialization function for the floating-point FIR interpolator. - * @param[in,out] S points to an instance of the floating-point FIR interpolator structure. - * @param[in] L upsample factor. - * @param[in] numTaps number of filter coefficients in the filter. - * @param[in] pCoeffs points to the filter coefficient buffer. - * @param[in] pState points to the state buffer. - * @param[in] blockSize number of input samples to process per call. - * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if - * the filter length numTaps is not a multiple of the interpolation factor L. - */ - arm_status arm_fir_interpolate_init_f32( - arm_fir_interpolate_instance_f32 * S, - uint8_t L, - uint16_t numTaps, - const float32_t * pCoeffs, - float32_t * pState, - uint32_t blockSize); - - - /** - * @brief Instance structure for the high precision Q31 Biquad cascade filter. - */ - typedef struct - { - uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ - q63_t *pState; /**< points to the array of state coefficients. The array is of length 4*numStages. */ - const q31_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */ - uint8_t postShift; /**< additional shift, in bits, applied to each output sample. */ - } arm_biquad_cas_df1_32x64_ins_q31; - - - /** - * @param[in] S points to an instance of the high precision Q31 Biquad cascade filter structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cas_df1_32x64_q31( - const arm_biquad_cas_df1_32x64_ins_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @param[in,out] S points to an instance of the high precision Q31 Biquad cascade filter structure. - * @param[in] numStages number of 2nd order stages in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] postShift shift to be applied to the output. Varies according to the coefficients format - */ - void arm_biquad_cas_df1_32x64_init_q31( - arm_biquad_cas_df1_32x64_ins_q31 * S, - uint8_t numStages, - const q31_t * pCoeffs, - q63_t * pState, - uint8_t postShift); - - - /** - * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter. - */ - typedef struct - { - uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ - float32_t *pState; /**< points to the array of state coefficients. The array is of length 2*numStages. */ - const float32_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */ - } arm_biquad_cascade_df2T_instance_f32; - - /** - * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter. - */ - typedef struct - { - uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ - float32_t *pState; /**< points to the array of state coefficients. The array is of length 4*numStages. */ - const float32_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */ - } arm_biquad_cascade_stereo_df2T_instance_f32; - - /** - * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter. - */ - typedef struct - { - uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ - float64_t *pState; /**< points to the array of state coefficients. The array is of length 2*numStages. */ - const float64_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */ - } arm_biquad_cascade_df2T_instance_f64; - - - /** - * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter. - * @param[in] S points to an instance of the filter data structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cascade_df2T_f32( - const arm_biquad_cascade_df2T_instance_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter. 2 channels - * @param[in] S points to an instance of the filter data structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cascade_stereo_df2T_f32( - const arm_biquad_cascade_stereo_df2T_instance_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter. - * @param[in] S points to an instance of the filter data structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of samples to process. - */ - void arm_biquad_cascade_df2T_f64( - const arm_biquad_cascade_df2T_instance_f64 * S, - const float64_t * pSrc, - float64_t * pDst, - uint32_t blockSize); - - -#if defined(ARM_MATH_NEON) -void arm_biquad_cascade_df2T_compute_coefs_f32( - arm_biquad_cascade_df2T_instance_f32 * S, - uint8_t numStages, - float32_t * pCoeffs); -#endif - /** - * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. - * @param[in,out] S points to an instance of the filter data structure. - * @param[in] numStages number of 2nd order stages in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - */ - void arm_biquad_cascade_df2T_init_f32( - arm_biquad_cascade_df2T_instance_f32 * S, - uint8_t numStages, - const float32_t * pCoeffs, - float32_t * pState); - - - /** - * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. - * @param[in,out] S points to an instance of the filter data structure. - * @param[in] numStages number of 2nd order stages in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - */ - void arm_biquad_cascade_stereo_df2T_init_f32( - arm_biquad_cascade_stereo_df2T_instance_f32 * S, - uint8_t numStages, - const float32_t * pCoeffs, - float32_t * pState); - - - /** - * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. - * @param[in,out] S points to an instance of the filter data structure. - * @param[in] numStages number of 2nd order stages in the filter. - * @param[in] pCoeffs points to the filter coefficients. - * @param[in] pState points to the state buffer. - */ - void arm_biquad_cascade_df2T_init_f64( - arm_biquad_cascade_df2T_instance_f64 * S, - uint8_t numStages, - const float64_t * pCoeffs, - float64_t * pState); - - - /** - * @brief Instance structure for the Q15 FIR lattice filter. - */ - typedef struct - { - uint16_t numStages; /**< number of filter stages. */ - q15_t *pState; /**< points to the state variable array. The array is of length numStages. */ - const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */ - } arm_fir_lattice_instance_q15; - - /** - * @brief Instance structure for the Q31 FIR lattice filter. - */ - typedef struct - { - uint16_t numStages; /**< number of filter stages. */ - q31_t *pState; /**< points to the state variable array. The array is of length numStages. */ - const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */ - } arm_fir_lattice_instance_q31; - - /** - * @brief Instance structure for the floating-point FIR lattice filter. - */ - typedef struct - { - uint16_t numStages; /**< number of filter stages. */ - float32_t *pState; /**< points to the state variable array. The array is of length numStages. */ - const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */ - } arm_fir_lattice_instance_f32; - - - /** - * @brief Initialization function for the Q15 FIR lattice filter. - * @param[in] S points to an instance of the Q15 FIR lattice structure. - * @param[in] numStages number of filter stages. - * @param[in] pCoeffs points to the coefficient buffer. The array is of length numStages. - * @param[in] pState points to the state buffer. The array is of length numStages. - */ - void arm_fir_lattice_init_q15( - arm_fir_lattice_instance_q15 * S, - uint16_t numStages, - const q15_t * pCoeffs, - q15_t * pState); - - - /** - * @brief Processing function for the Q15 FIR lattice filter. - * @param[in] S points to an instance of the Q15 FIR lattice structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_fir_lattice_q15( - const arm_fir_lattice_instance_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q31 FIR lattice filter. - * @param[in] S points to an instance of the Q31 FIR lattice structure. - * @param[in] numStages number of filter stages. - * @param[in] pCoeffs points to the coefficient buffer. The array is of length numStages. - * @param[in] pState points to the state buffer. The array is of length numStages. - */ - void arm_fir_lattice_init_q31( - arm_fir_lattice_instance_q31 * S, - uint16_t numStages, - const q31_t * pCoeffs, - q31_t * pState); - - - /** - * @brief Processing function for the Q31 FIR lattice filter. - * @param[in] S points to an instance of the Q31 FIR lattice structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of samples to process. - */ - void arm_fir_lattice_q31( - const arm_fir_lattice_instance_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - -/** - * @brief Initialization function for the floating-point FIR lattice filter. - * @param[in] S points to an instance of the floating-point FIR lattice structure. - * @param[in] numStages number of filter stages. - * @param[in] pCoeffs points to the coefficient buffer. The array is of length numStages. - * @param[in] pState points to the state buffer. The array is of length numStages. - */ - void arm_fir_lattice_init_f32( - arm_fir_lattice_instance_f32 * S, - uint16_t numStages, - const float32_t * pCoeffs, - float32_t * pState); - - - /** - * @brief Processing function for the floating-point FIR lattice filter. - * @param[in] S points to an instance of the floating-point FIR lattice structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of samples to process. - */ - void arm_fir_lattice_f32( - const arm_fir_lattice_instance_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Instance structure for the Q15 IIR lattice filter. - */ - typedef struct - { - uint16_t numStages; /**< number of stages in the filter. */ - q15_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */ - q15_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */ - q15_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */ - } arm_iir_lattice_instance_q15; - - /** - * @brief Instance structure for the Q31 IIR lattice filter. - */ - typedef struct - { - uint16_t numStages; /**< number of stages in the filter. */ - q31_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */ - q31_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */ - q31_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */ - } arm_iir_lattice_instance_q31; - - /** - * @brief Instance structure for the floating-point IIR lattice filter. - */ - typedef struct - { - uint16_t numStages; /**< number of stages in the filter. */ - float32_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */ - float32_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */ - float32_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */ - } arm_iir_lattice_instance_f32; - - - /** - * @brief Processing function for the floating-point IIR lattice filter. - * @param[in] S points to an instance of the floating-point IIR lattice structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_iir_lattice_f32( - const arm_iir_lattice_instance_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Initialization function for the floating-point IIR lattice filter. - * @param[in] S points to an instance of the floating-point IIR lattice structure. - * @param[in] numStages number of stages in the filter. - * @param[in] pkCoeffs points to the reflection coefficient buffer. The array is of length numStages. - * @param[in] pvCoeffs points to the ladder coefficient buffer. The array is of length numStages+1. - * @param[in] pState points to the state buffer. The array is of length numStages+blockSize-1. - * @param[in] blockSize number of samples to process. - */ - void arm_iir_lattice_init_f32( - arm_iir_lattice_instance_f32 * S, - uint16_t numStages, - float32_t * pkCoeffs, - float32_t * pvCoeffs, - float32_t * pState, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q31 IIR lattice filter. - * @param[in] S points to an instance of the Q31 IIR lattice structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_iir_lattice_q31( - const arm_iir_lattice_instance_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q31 IIR lattice filter. - * @param[in] S points to an instance of the Q31 IIR lattice structure. - * @param[in] numStages number of stages in the filter. - * @param[in] pkCoeffs points to the reflection coefficient buffer. The array is of length numStages. - * @param[in] pvCoeffs points to the ladder coefficient buffer. The array is of length numStages+1. - * @param[in] pState points to the state buffer. The array is of length numStages+blockSize. - * @param[in] blockSize number of samples to process. - */ - void arm_iir_lattice_init_q31( - arm_iir_lattice_instance_q31 * S, - uint16_t numStages, - q31_t * pkCoeffs, - q31_t * pvCoeffs, - q31_t * pState, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q15 IIR lattice filter. - * @param[in] S points to an instance of the Q15 IIR lattice structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_iir_lattice_q15( - const arm_iir_lattice_instance_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - -/** - * @brief Initialization function for the Q15 IIR lattice filter. - * @param[in] S points to an instance of the fixed-point Q15 IIR lattice structure. - * @param[in] numStages number of stages in the filter. - * @param[in] pkCoeffs points to reflection coefficient buffer. The array is of length numStages. - * @param[in] pvCoeffs points to ladder coefficient buffer. The array is of length numStages+1. - * @param[in] pState points to state buffer. The array is of length numStages+blockSize. - * @param[in] blockSize number of samples to process per call. - */ - void arm_iir_lattice_init_q15( - arm_iir_lattice_instance_q15 * S, - uint16_t numStages, - q15_t * pkCoeffs, - q15_t * pvCoeffs, - q15_t * pState, - uint32_t blockSize); - - - /** - * @brief Instance structure for the floating-point LMS filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ - float32_t mu; /**< step size that controls filter coefficient updates. */ - } arm_lms_instance_f32; - - - /** - * @brief Processing function for floating-point LMS filter. - * @param[in] S points to an instance of the floating-point LMS filter structure. - * @param[in] pSrc points to the block of input data. - * @param[in] pRef points to the block of reference data. - * @param[out] pOut points to the block of output data. - * @param[out] pErr points to the block of error data. - * @param[in] blockSize number of samples to process. - */ - void arm_lms_f32( - const arm_lms_instance_f32 * S, - const float32_t * pSrc, - float32_t * pRef, - float32_t * pOut, - float32_t * pErr, - uint32_t blockSize); - - - /** - * @brief Initialization function for floating-point LMS filter. - * @param[in] S points to an instance of the floating-point LMS filter structure. - * @param[in] numTaps number of filter coefficients. - * @param[in] pCoeffs points to the coefficient buffer. - * @param[in] pState points to state buffer. - * @param[in] mu step size that controls filter coefficient updates. - * @param[in] blockSize number of samples to process. - */ - void arm_lms_init_f32( - arm_lms_instance_f32 * S, - uint16_t numTaps, - float32_t * pCoeffs, - float32_t * pState, - float32_t mu, - uint32_t blockSize); - - - /** - * @brief Instance structure for the Q15 LMS filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ - q15_t mu; /**< step size that controls filter coefficient updates. */ - uint32_t postShift; /**< bit shift applied to coefficients. */ - } arm_lms_instance_q15; - - - /** - * @brief Initialization function for the Q15 LMS filter. - * @param[in] S points to an instance of the Q15 LMS filter structure. - * @param[in] numTaps number of filter coefficients. - * @param[in] pCoeffs points to the coefficient buffer. - * @param[in] pState points to the state buffer. - * @param[in] mu step size that controls filter coefficient updates. - * @param[in] blockSize number of samples to process. - * @param[in] postShift bit shift applied to coefficients. - */ - void arm_lms_init_q15( - arm_lms_instance_q15 * S, - uint16_t numTaps, - q15_t * pCoeffs, - q15_t * pState, - q15_t mu, - uint32_t blockSize, - uint32_t postShift); - - - /** - * @brief Processing function for Q15 LMS filter. - * @param[in] S points to an instance of the Q15 LMS filter structure. - * @param[in] pSrc points to the block of input data. - * @param[in] pRef points to the block of reference data. - * @param[out] pOut points to the block of output data. - * @param[out] pErr points to the block of error data. - * @param[in] blockSize number of samples to process. - */ - void arm_lms_q15( - const arm_lms_instance_q15 * S, - const q15_t * pSrc, - q15_t * pRef, - q15_t * pOut, - q15_t * pErr, - uint32_t blockSize); - - - /** - * @brief Instance structure for the Q31 LMS filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ - q31_t mu; /**< step size that controls filter coefficient updates. */ - uint32_t postShift; /**< bit shift applied to coefficients. */ - } arm_lms_instance_q31; - - - /** - * @brief Processing function for Q31 LMS filter. - * @param[in] S points to an instance of the Q15 LMS filter structure. - * @param[in] pSrc points to the block of input data. - * @param[in] pRef points to the block of reference data. - * @param[out] pOut points to the block of output data. - * @param[out] pErr points to the block of error data. - * @param[in] blockSize number of samples to process. - */ - void arm_lms_q31( - const arm_lms_instance_q31 * S, - const q31_t * pSrc, - q31_t * pRef, - q31_t * pOut, - q31_t * pErr, - uint32_t blockSize); - - - /** - * @brief Initialization function for Q31 LMS filter. - * @param[in] S points to an instance of the Q31 LMS filter structure. - * @param[in] numTaps number of filter coefficients. - * @param[in] pCoeffs points to coefficient buffer. - * @param[in] pState points to state buffer. - * @param[in] mu step size that controls filter coefficient updates. - * @param[in] blockSize number of samples to process. - * @param[in] postShift bit shift applied to coefficients. - */ - void arm_lms_init_q31( - arm_lms_instance_q31 * S, - uint16_t numTaps, - q31_t * pCoeffs, - q31_t * pState, - q31_t mu, - uint32_t blockSize, - uint32_t postShift); - - - /** - * @brief Instance structure for the floating-point normalized LMS filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ - float32_t mu; /**< step size that control filter coefficient updates. */ - float32_t energy; /**< saves previous frame energy. */ - float32_t x0; /**< saves previous input sample. */ - } arm_lms_norm_instance_f32; - - - /** - * @brief Processing function for floating-point normalized LMS filter. - * @param[in] S points to an instance of the floating-point normalized LMS filter structure. - * @param[in] pSrc points to the block of input data. - * @param[in] pRef points to the block of reference data. - * @param[out] pOut points to the block of output data. - * @param[out] pErr points to the block of error data. - * @param[in] blockSize number of samples to process. - */ - void arm_lms_norm_f32( - arm_lms_norm_instance_f32 * S, - const float32_t * pSrc, - float32_t * pRef, - float32_t * pOut, - float32_t * pErr, - uint32_t blockSize); - - - /** - * @brief Initialization function for floating-point normalized LMS filter. - * @param[in] S points to an instance of the floating-point LMS filter structure. - * @param[in] numTaps number of filter coefficients. - * @param[in] pCoeffs points to coefficient buffer. - * @param[in] pState points to state buffer. - * @param[in] mu step size that controls filter coefficient updates. - * @param[in] blockSize number of samples to process. - */ - void arm_lms_norm_init_f32( - arm_lms_norm_instance_f32 * S, - uint16_t numTaps, - float32_t * pCoeffs, - float32_t * pState, - float32_t mu, - uint32_t blockSize); - - - /** - * @brief Instance structure for the Q31 normalized LMS filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ - q31_t mu; /**< step size that controls filter coefficient updates. */ - uint8_t postShift; /**< bit shift applied to coefficients. */ - const q31_t *recipTable; /**< points to the reciprocal initial value table. */ - q31_t energy; /**< saves previous frame energy. */ - q31_t x0; /**< saves previous input sample. */ - } arm_lms_norm_instance_q31; - - - /** - * @brief Processing function for Q31 normalized LMS filter. - * @param[in] S points to an instance of the Q31 normalized LMS filter structure. - * @param[in] pSrc points to the block of input data. - * @param[in] pRef points to the block of reference data. - * @param[out] pOut points to the block of output data. - * @param[out] pErr points to the block of error data. - * @param[in] blockSize number of samples to process. - */ - void arm_lms_norm_q31( - arm_lms_norm_instance_q31 * S, - const q31_t * pSrc, - q31_t * pRef, - q31_t * pOut, - q31_t * pErr, - uint32_t blockSize); - - - /** - * @brief Initialization function for Q31 normalized LMS filter. - * @param[in] S points to an instance of the Q31 normalized LMS filter structure. - * @param[in] numTaps number of filter coefficients. - * @param[in] pCoeffs points to coefficient buffer. - * @param[in] pState points to state buffer. - * @param[in] mu step size that controls filter coefficient updates. - * @param[in] blockSize number of samples to process. - * @param[in] postShift bit shift applied to coefficients. - */ - void arm_lms_norm_init_q31( - arm_lms_norm_instance_q31 * S, - uint16_t numTaps, - q31_t * pCoeffs, - q31_t * pState, - q31_t mu, - uint32_t blockSize, - uint8_t postShift); - - - /** - * @brief Instance structure for the Q15 normalized LMS filter. - */ - typedef struct - { - uint16_t numTaps; /**< Number of coefficients in the filter. */ - q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ - q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ - q15_t mu; /**< step size that controls filter coefficient updates. */ - uint8_t postShift; /**< bit shift applied to coefficients. */ - const q15_t *recipTable; /**< Points to the reciprocal initial value table. */ - q15_t energy; /**< saves previous frame energy. */ - q15_t x0; /**< saves previous input sample. */ - } arm_lms_norm_instance_q15; - - - /** - * @brief Processing function for Q15 normalized LMS filter. - * @param[in] S points to an instance of the Q15 normalized LMS filter structure. - * @param[in] pSrc points to the block of input data. - * @param[in] pRef points to the block of reference data. - * @param[out] pOut points to the block of output data. - * @param[out] pErr points to the block of error data. - * @param[in] blockSize number of samples to process. - */ - void arm_lms_norm_q15( - arm_lms_norm_instance_q15 * S, - const q15_t * pSrc, - q15_t * pRef, - q15_t * pOut, - q15_t * pErr, - uint32_t blockSize); - - - /** - * @brief Initialization function for Q15 normalized LMS filter. - * @param[in] S points to an instance of the Q15 normalized LMS filter structure. - * @param[in] numTaps number of filter coefficients. - * @param[in] pCoeffs points to coefficient buffer. - * @param[in] pState points to state buffer. - * @param[in] mu step size that controls filter coefficient updates. - * @param[in] blockSize number of samples to process. - * @param[in] postShift bit shift applied to coefficients. - */ - void arm_lms_norm_init_q15( - arm_lms_norm_instance_q15 * S, - uint16_t numTaps, - q15_t * pCoeffs, - q15_t * pState, - q15_t mu, - uint32_t blockSize, - uint8_t postShift); - - - /** - * @brief Correlation of floating-point sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. - */ - void arm_correlate_f32( - const float32_t * pSrcA, - uint32_t srcALen, - const float32_t * pSrcB, - uint32_t srcBLen, - float32_t * pDst); - - -/** - @brief Correlation of Q15 sequences - @param[in] pSrcA points to the first input sequence - @param[in] srcALen length of the first input sequence - @param[in] pSrcB points to the second input sequence - @param[in] srcBLen length of the second input sequence - @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. - @param[in] pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. -*/ -void arm_correlate_opt_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst, - q15_t * pScratch); - - -/** - @brief Correlation of Q15 sequences. - @param[in] pSrcA points to the first input sequence - @param[in] srcALen length of the first input sequence - @param[in] pSrcB points to the second input sequence - @param[in] srcBLen length of the second input sequence - @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. - */ - void arm_correlate_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst); - - -/** - @brief Correlation of Q15 sequences (fast version). - @param[in] pSrcA points to the first input sequence - @param[in] srcALen length of the first input sequence - @param[in] pSrcB points to the second input sequence - @param[in] srcBLen length of the second input sequence - @param[out] pDst points to the location where the output result is written. Length 2 * max(srcALen, srcBLen) - 1. - @return none - */ -void arm_correlate_fast_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst); - - -/** - @brief Correlation of Q15 sequences (fast version). - @param[in] pSrcA points to the first input sequence. - @param[in] srcALen length of the first input sequence. - @param[in] pSrcB points to the second input sequence. - @param[in] srcBLen length of the second input sequence. - @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. - @param[in] pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. - */ -void arm_correlate_fast_opt_q15( - const q15_t * pSrcA, - uint32_t srcALen, - const q15_t * pSrcB, - uint32_t srcBLen, - q15_t * pDst, - q15_t * pScratch); - - - /** - * @brief Correlation of Q31 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. - */ - void arm_correlate_q31( - const q31_t * pSrcA, - uint32_t srcALen, - const q31_t * pSrcB, - uint32_t srcBLen, - q31_t * pDst); - - -/** - @brief Correlation of Q31 sequences (fast version). - @param[in] pSrcA points to the first input sequence - @param[in] srcALen length of the first input sequence - @param[in] pSrcB points to the second input sequence - @param[in] srcBLen length of the second input sequence - @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. - */ -void arm_correlate_fast_q31( - const q31_t * pSrcA, - uint32_t srcALen, - const q31_t * pSrcB, - uint32_t srcBLen, - q31_t * pDst); - - - /** - * @brief Correlation of Q7 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. - * @param[in] pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. - * @param[in] pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen). - */ - void arm_correlate_opt_q7( - const q7_t * pSrcA, - uint32_t srcALen, - const q7_t * pSrcB, - uint32_t srcBLen, - q7_t * pDst, - q15_t * pScratch1, - q15_t * pScratch2); - - - /** - * @brief Correlation of Q7 sequences. - * @param[in] pSrcA points to the first input sequence. - * @param[in] srcALen length of the first input sequence. - * @param[in] pSrcB points to the second input sequence. - * @param[in] srcBLen length of the second input sequence. - * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. - */ - void arm_correlate_q7( - const q7_t * pSrcA, - uint32_t srcALen, - const q7_t * pSrcB, - uint32_t srcBLen, - q7_t * pDst); - - - /** - * @brief Instance structure for the floating-point sparse FIR filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */ - float32_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */ - const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */ - int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */ - } arm_fir_sparse_instance_f32; - - /** - * @brief Instance structure for the Q31 sparse FIR filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */ - q31_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */ - const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */ - int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */ - } arm_fir_sparse_instance_q31; - - /** - * @brief Instance structure for the Q15 sparse FIR filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */ - q15_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */ - const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */ - int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */ - } arm_fir_sparse_instance_q15; - - /** - * @brief Instance structure for the Q7 sparse FIR filter. - */ - typedef struct - { - uint16_t numTaps; /**< number of coefficients in the filter. */ - uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */ - q7_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */ - const q7_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ - uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */ - int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */ - } arm_fir_sparse_instance_q7; - - - /** - * @brief Processing function for the floating-point sparse FIR filter. - * @param[in] S points to an instance of the floating-point sparse FIR structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] pScratchIn points to a temporary buffer of size blockSize. - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_sparse_f32( - arm_fir_sparse_instance_f32 * S, - const float32_t * pSrc, - float32_t * pDst, - float32_t * pScratchIn, - uint32_t blockSize); - - - /** - * @brief Initialization function for the floating-point sparse FIR filter. - * @param[in,out] S points to an instance of the floating-point sparse FIR structure. - * @param[in] numTaps number of nonzero coefficients in the filter. - * @param[in] pCoeffs points to the array of filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] pTapDelay points to the array of offset times. - * @param[in] maxDelay maximum offset time supported. - * @param[in] blockSize number of samples that will be processed per block. - */ - void arm_fir_sparse_init_f32( - arm_fir_sparse_instance_f32 * S, - uint16_t numTaps, - const float32_t * pCoeffs, - float32_t * pState, - int32_t * pTapDelay, - uint16_t maxDelay, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q31 sparse FIR filter. - * @param[in] S points to an instance of the Q31 sparse FIR structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] pScratchIn points to a temporary buffer of size blockSize. - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_sparse_q31( - arm_fir_sparse_instance_q31 * S, - const q31_t * pSrc, - q31_t * pDst, - q31_t * pScratchIn, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q31 sparse FIR filter. - * @param[in,out] S points to an instance of the Q31 sparse FIR structure. - * @param[in] numTaps number of nonzero coefficients in the filter. - * @param[in] pCoeffs points to the array of filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] pTapDelay points to the array of offset times. - * @param[in] maxDelay maximum offset time supported. - * @param[in] blockSize number of samples that will be processed per block. - */ - void arm_fir_sparse_init_q31( - arm_fir_sparse_instance_q31 * S, - uint16_t numTaps, - const q31_t * pCoeffs, - q31_t * pState, - int32_t * pTapDelay, - uint16_t maxDelay, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q15 sparse FIR filter. - * @param[in] S points to an instance of the Q15 sparse FIR structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] pScratchIn points to a temporary buffer of size blockSize. - * @param[in] pScratchOut points to a temporary buffer of size blockSize. - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_sparse_q15( - arm_fir_sparse_instance_q15 * S, - const q15_t * pSrc, - q15_t * pDst, - q15_t * pScratchIn, - q31_t * pScratchOut, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q15 sparse FIR filter. - * @param[in,out] S points to an instance of the Q15 sparse FIR structure. - * @param[in] numTaps number of nonzero coefficients in the filter. - * @param[in] pCoeffs points to the array of filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] pTapDelay points to the array of offset times. - * @param[in] maxDelay maximum offset time supported. - * @param[in] blockSize number of samples that will be processed per block. - */ - void arm_fir_sparse_init_q15( - arm_fir_sparse_instance_q15 * S, - uint16_t numTaps, - const q15_t * pCoeffs, - q15_t * pState, - int32_t * pTapDelay, - uint16_t maxDelay, - uint32_t blockSize); - - - /** - * @brief Processing function for the Q7 sparse FIR filter. - * @param[in] S points to an instance of the Q7 sparse FIR structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] pScratchIn points to a temporary buffer of size blockSize. - * @param[in] pScratchOut points to a temporary buffer of size blockSize. - * @param[in] blockSize number of input samples to process per call. - */ - void arm_fir_sparse_q7( - arm_fir_sparse_instance_q7 * S, - const q7_t * pSrc, - q7_t * pDst, - q7_t * pScratchIn, - q31_t * pScratchOut, - uint32_t blockSize); - - - /** - * @brief Initialization function for the Q7 sparse FIR filter. - * @param[in,out] S points to an instance of the Q7 sparse FIR structure. - * @param[in] numTaps number of nonzero coefficients in the filter. - * @param[in] pCoeffs points to the array of filter coefficients. - * @param[in] pState points to the state buffer. - * @param[in] pTapDelay points to the array of offset times. - * @param[in] maxDelay maximum offset time supported. - * @param[in] blockSize number of samples that will be processed per block. - */ - void arm_fir_sparse_init_q7( - arm_fir_sparse_instance_q7 * S, - uint16_t numTaps, - const q7_t * pCoeffs, - q7_t * pState, - int32_t * pTapDelay, - uint16_t maxDelay, - uint32_t blockSize); - - - - - - - /** - * @brief floating-point Circular write function. - */ - __STATIC_FORCEINLINE void arm_circularWrite_f32( - int32_t * circBuffer, - int32_t L, - uint16_t * writeOffset, - int32_t bufferInc, - const int32_t * src, - int32_t srcInc, - uint32_t blockSize) - { - uint32_t i = 0U; - int32_t wOffset; - - /* Copy the value of Index pointer that points - * to the current location where the input samples to be copied */ - wOffset = *writeOffset; - - /* Loop over the blockSize */ - i = blockSize; - - while (i > 0U) - { - /* copy the input sample to the circular buffer */ - circBuffer[wOffset] = *src; - - /* Update the input pointer */ - src += srcInc; - - /* Circularly update wOffset. Watch out for positive and negative value */ - wOffset += bufferInc; - if (wOffset >= L) - wOffset -= L; - - /* Decrement the loop counter */ - i--; - } - - /* Update the index pointer */ - *writeOffset = (uint16_t)wOffset; - } - - - - /** - * @brief floating-point Circular Read function. - */ - __STATIC_FORCEINLINE void arm_circularRead_f32( - int32_t * circBuffer, - int32_t L, - int32_t * readOffset, - int32_t bufferInc, - int32_t * dst, - int32_t * dst_base, - int32_t dst_length, - int32_t dstInc, - uint32_t blockSize) - { - uint32_t i = 0U; - int32_t rOffset; - int32_t* dst_end; - - /* Copy the value of Index pointer that points - * to the current location from where the input samples to be read */ - rOffset = *readOffset; - dst_end = dst_base + dst_length; - - /* Loop over the blockSize */ - i = blockSize; - - while (i > 0U) - { - /* copy the sample from the circular buffer to the destination buffer */ - *dst = circBuffer[rOffset]; - - /* Update the input pointer */ - dst += dstInc; - - if (dst == dst_end) - { - dst = dst_base; - } - - /* Circularly update rOffset. Watch out for positive and negative value */ - rOffset += bufferInc; - - if (rOffset >= L) - { - rOffset -= L; - } - - /* Decrement the loop counter */ - i--; - } - - /* Update the index pointer */ - *readOffset = rOffset; - } - - - /** - * @brief Q15 Circular write function. - */ - __STATIC_FORCEINLINE void arm_circularWrite_q15( - q15_t * circBuffer, - int32_t L, - uint16_t * writeOffset, - int32_t bufferInc, - const q15_t * src, - int32_t srcInc, - uint32_t blockSize) - { - uint32_t i = 0U; - int32_t wOffset; - - /* Copy the value of Index pointer that points - * to the current location where the input samples to be copied */ - wOffset = *writeOffset; - - /* Loop over the blockSize */ - i = blockSize; - - while (i > 0U) - { - /* copy the input sample to the circular buffer */ - circBuffer[wOffset] = *src; - - /* Update the input pointer */ - src += srcInc; - - /* Circularly update wOffset. Watch out for positive and negative value */ - wOffset += bufferInc; - if (wOffset >= L) - wOffset -= L; - - /* Decrement the loop counter */ - i--; - } - - /* Update the index pointer */ - *writeOffset = (uint16_t)wOffset; - } - - - /** - * @brief Q15 Circular Read function. - */ - __STATIC_FORCEINLINE void arm_circularRead_q15( - q15_t * circBuffer, - int32_t L, - int32_t * readOffset, - int32_t bufferInc, - q15_t * dst, - q15_t * dst_base, - int32_t dst_length, - int32_t dstInc, - uint32_t blockSize) - { - uint32_t i = 0; - int32_t rOffset; - q15_t* dst_end; - - /* Copy the value of Index pointer that points - * to the current location from where the input samples to be read */ - rOffset = *readOffset; - - dst_end = dst_base + dst_length; - - /* Loop over the blockSize */ - i = blockSize; - - while (i > 0U) - { - /* copy the sample from the circular buffer to the destination buffer */ - *dst = circBuffer[rOffset]; - - /* Update the input pointer */ - dst += dstInc; - - if (dst == dst_end) - { - dst = dst_base; - } - - /* Circularly update wOffset. Watch out for positive and negative value */ - rOffset += bufferInc; - - if (rOffset >= L) - { - rOffset -= L; - } - - /* Decrement the loop counter */ - i--; - } - - /* Update the index pointer */ - *readOffset = rOffset; - } - - - /** - * @brief Q7 Circular write function. - */ - __STATIC_FORCEINLINE void arm_circularWrite_q7( - q7_t * circBuffer, - int32_t L, - uint16_t * writeOffset, - int32_t bufferInc, - const q7_t * src, - int32_t srcInc, - uint32_t blockSize) - { - uint32_t i = 0U; - int32_t wOffset; - - /* Copy the value of Index pointer that points - * to the current location where the input samples to be copied */ - wOffset = *writeOffset; - - /* Loop over the blockSize */ - i = blockSize; - - while (i > 0U) - { - /* copy the input sample to the circular buffer */ - circBuffer[wOffset] = *src; - - /* Update the input pointer */ - src += srcInc; - - /* Circularly update wOffset. Watch out for positive and negative value */ - wOffset += bufferInc; - if (wOffset >= L) - wOffset -= L; - - /* Decrement the loop counter */ - i--; - } - - /* Update the index pointer */ - *writeOffset = (uint16_t)wOffset; - } - - - /** - * @brief Q7 Circular Read function. - */ - __STATIC_FORCEINLINE void arm_circularRead_q7( - q7_t * circBuffer, - int32_t L, - int32_t * readOffset, - int32_t bufferInc, - q7_t * dst, - q7_t * dst_base, - int32_t dst_length, - int32_t dstInc, - uint32_t blockSize) - { - uint32_t i = 0; - int32_t rOffset; - q7_t* dst_end; - - /* Copy the value of Index pointer that points - * to the current location from where the input samples to be read */ - rOffset = *readOffset; - - dst_end = dst_base + dst_length; - - /* Loop over the blockSize */ - i = blockSize; - - while (i > 0U) - { - /* copy the sample from the circular buffer to the destination buffer */ - *dst = circBuffer[rOffset]; - - /* Update the input pointer */ - dst += dstInc; - - if (dst == dst_end) - { - dst = dst_base; - } - - /* Circularly update rOffset. Watch out for positive and negative value */ - rOffset += bufferInc; - - if (rOffset >= L) - { - rOffset -= L; - } - - /* Decrement the loop counter */ - i--; - } - - /* Update the index pointer */ - *readOffset = rOffset; - } - - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _FILTERING_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/interpolation_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/interpolation_functions.h deleted file mode 100755 index 81034cd..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/interpolation_functions.h +++ /dev/null @@ -1,318 +0,0 @@ -/****************************************************************************** - * @file interpolation_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _INTERPOLATION_FUNCTIONS_H_ -#define _INTERPOLATION_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - - -/** - * @defgroup groupInterpolation Interpolation Functions - * These functions perform 1- and 2-dimensional interpolation of data. - * Linear interpolation is used for 1-dimensional data and - * bilinear interpolation is used for 2-dimensional data. - */ - - - /** - * @brief Instance structure for the floating-point Linear Interpolate function. - */ - typedef struct - { - uint32_t nValues; /**< nValues */ - float32_t x1; /**< x1 */ - float32_t xSpacing; /**< xSpacing */ - float32_t *pYData; /**< pointer to the table of Y values */ - } arm_linear_interp_instance_f32; - - /** - * @brief Instance structure for the floating-point bilinear interpolation function. - */ - typedef struct - { - uint16_t numRows; /**< number of rows in the data table. */ - uint16_t numCols; /**< number of columns in the data table. */ - float32_t *pData; /**< points to the data table. */ - } arm_bilinear_interp_instance_f32; - - /** - * @brief Instance structure for the Q31 bilinear interpolation function. - */ - typedef struct - { - uint16_t numRows; /**< number of rows in the data table. */ - uint16_t numCols; /**< number of columns in the data table. */ - q31_t *pData; /**< points to the data table. */ - } arm_bilinear_interp_instance_q31; - - /** - * @brief Instance structure for the Q15 bilinear interpolation function. - */ - typedef struct - { - uint16_t numRows; /**< number of rows in the data table. */ - uint16_t numCols; /**< number of columns in the data table. */ - q15_t *pData; /**< points to the data table. */ - } arm_bilinear_interp_instance_q15; - - /** - * @brief Instance structure for the Q15 bilinear interpolation function. - */ - typedef struct - { - uint16_t numRows; /**< number of rows in the data table. */ - uint16_t numCols; /**< number of columns in the data table. */ - q7_t *pData; /**< points to the data table. */ - } arm_bilinear_interp_instance_q7; - - - /** - * @brief Struct for specifying cubic spline type - */ - typedef enum - { - ARM_SPLINE_NATURAL = 0, /**< Natural spline */ - ARM_SPLINE_PARABOLIC_RUNOUT = 1 /**< Parabolic runout spline */ - } arm_spline_type; - - /** - * @brief Instance structure for the floating-point cubic spline interpolation. - */ - typedef struct - { - arm_spline_type type; /**< Type (boundary conditions) */ - const float32_t * x; /**< x values */ - const float32_t * y; /**< y values */ - uint32_t n_x; /**< Number of known data points */ - float32_t * coeffs; /**< Coefficients buffer (b,c, and d) */ - } arm_spline_instance_f32; - - - - - /** - * @ingroup groupInterpolation - */ - - /** - * @addtogroup SplineInterpolate - * @{ - */ - - - /** - * @brief Processing function for the floating-point cubic spline interpolation. - * @param[in] S points to an instance of the floating-point spline structure. - * @param[in] xq points to the x values ot the interpolated data points. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples of output data. - */ - void arm_spline_f32( - arm_spline_instance_f32 * S, - const float32_t * xq, - float32_t * pDst, - uint32_t blockSize); - - /** - * @brief Initialization function for the floating-point cubic spline interpolation. - * @param[in,out] S points to an instance of the floating-point spline structure. - * @param[in] type type of cubic spline interpolation (boundary conditions) - * @param[in] x points to the x values of the known data points. - * @param[in] y points to the y values of the known data points. - * @param[in] n number of known data points. - * @param[in] coeffs coefficients array for b, c, and d - * @param[in] tempBuffer buffer array for internal computations - */ - void arm_spline_init_f32( - arm_spline_instance_f32 * S, - arm_spline_type type, - const float32_t * x, - const float32_t * y, - uint32_t n, - float32_t * coeffs, - float32_t * tempBuffer); - - - /** - * @} end of SplineInterpolate group - */ - - - - /** - * @addtogroup LinearInterpolate - * @{ - */ - - /** - * @brief Process function for the floating-point Linear Interpolation Function. - * @param[in,out] S is an instance of the floating-point Linear Interpolation structure - * @param[in] x input sample to process - * @return y processed output sample. - * - */ - float32_t arm_linear_interp_f32( - arm_linear_interp_instance_f32 * S, - float32_t x); - - /** - * - * @brief Process function for the Q31 Linear Interpolation Function. - * @param[in] pYData pointer to Q31 Linear Interpolation table - * @param[in] x input sample to process - * @param[in] nValues number of table values - * @return y processed output sample. - * - * \par - * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part. - * This function can support maximum of table size 2^12. - * - */ - q31_t arm_linear_interp_q31( - q31_t * pYData, - q31_t x, - uint32_t nValues); - - /** - * - * @brief Process function for the Q15 Linear Interpolation Function. - * @param[in] pYData pointer to Q15 Linear Interpolation table - * @param[in] x input sample to process - * @param[in] nValues number of table values - * @return y processed output sample. - * - * \par - * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part. - * This function can support maximum of table size 2^12. - * - */ - q15_t arm_linear_interp_q15( - q15_t * pYData, - q31_t x, - uint32_t nValues); - - /** - * - * @brief Process function for the Q7 Linear Interpolation Function. - * @param[in] pYData pointer to Q7 Linear Interpolation table - * @param[in] x input sample to process - * @param[in] nValues number of table values - * @return y processed output sample. - * - * \par - * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part. - * This function can support maximum of table size 2^12. - */ -q7_t arm_linear_interp_q7( - q7_t * pYData, - q31_t x, - uint32_t nValues); - - /** - * @} end of LinearInterpolate group - */ - - - - - /** - * @ingroup groupInterpolation - */ - - - /** - * @addtogroup BilinearInterpolate - * @{ - */ - - /** - * @brief Floating-point bilinear interpolation. - * @param[in,out] S points to an instance of the interpolation structure. - * @param[in] X interpolation coordinate. - * @param[in] Y interpolation coordinate. - * @return out interpolated value. - */ - float32_t arm_bilinear_interp_f32( - const arm_bilinear_interp_instance_f32 * S, - float32_t X, - float32_t Y); - - /** - * @brief Q31 bilinear interpolation. - * @param[in,out] S points to an instance of the interpolation structure. - * @param[in] X interpolation coordinate in 12.20 format. - * @param[in] Y interpolation coordinate in 12.20 format. - * @return out interpolated value. - */ - q31_t arm_bilinear_interp_q31( - arm_bilinear_interp_instance_q31 * S, - q31_t X, - q31_t Y); - - - /** - * @brief Q15 bilinear interpolation. - * @param[in,out] S points to an instance of the interpolation structure. - * @param[in] X interpolation coordinate in 12.20 format. - * @param[in] Y interpolation coordinate in 12.20 format. - * @return out interpolated value. - */ - q15_t arm_bilinear_interp_q15( - arm_bilinear_interp_instance_q15 * S, - q31_t X, - q31_t Y); - - /** - * @brief Q7 bilinear interpolation. - * @param[in,out] S points to an instance of the interpolation structure. - * @param[in] X interpolation coordinate in 12.20 format. - * @param[in] Y interpolation coordinate in 12.20 format. - * @return out interpolated value. - */ - q7_t arm_bilinear_interp_q7( - arm_bilinear_interp_instance_q7 * S, - q31_t X, - q31_t Y); - /** - * @} end of BilinearInterpolate group - */ - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _INTERPOLATION_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/matrix_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/matrix_functions.h deleted file mode 100755 index e2330c6..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/matrix_functions.h +++ /dev/null @@ -1,597 +0,0 @@ -/****************************************************************************** - * @file matrix_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _MATRIX_FUNCTIONS_H_ -#define _MATRIX_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * @defgroup groupMatrix Matrix Functions - * - * This set of functions provides basic matrix math operations. - * The functions operate on matrix data structures. For example, - * the type - * definition for the floating-point matrix structure is shown - * below: - *
- *     typedef struct
- *     {
- *       uint16_t numRows;     // number of rows of the matrix.
- *       uint16_t numCols;     // number of columns of the matrix.
- *       float32_t *pData;     // points to the data of the matrix.
- *     } arm_matrix_instance_f32;
- * 
- * There are similar definitions for Q15 and Q31 data types. - * - * The structure specifies the size of the matrix and then points to - * an array of data. The array is of size numRows X numCols - * and the values are arranged in row order. That is, the - * matrix element (i, j) is stored at: - *
- *     pData[i*numCols + j]
- * 
- * - * \par Init Functions - * There is an associated initialization function for each type of matrix - * data structure. - * The initialization function sets the values of the internal structure fields. - * Refer to \ref arm_mat_init_f32(), \ref arm_mat_init_q31() and \ref arm_mat_init_q15() - * for floating-point, Q31 and Q15 types, respectively. - * - * \par - * Use of the initialization function is optional. However, if initialization function is used - * then the instance structure cannot be placed into a const data section. - * To place the instance structure in a const data - * section, manually initialize the data structure. For example: - *
- * arm_matrix_instance_f32 S = {nRows, nColumns, pData};
- * arm_matrix_instance_q31 S = {nRows, nColumns, pData};
- * arm_matrix_instance_q15 S = {nRows, nColumns, pData};
- * 
- * where nRows specifies the number of rows, nColumns - * specifies the number of columns, and pData points to the - * data array. - * - * \par Size Checking - * By default all of the matrix functions perform size checking on the input and - * output matrices. For example, the matrix addition function verifies that the - * two input matrices and the output matrix all have the same number of rows and - * columns. If the size check fails the functions return: - *
- *     ARM_MATH_SIZE_MISMATCH
- * 
- * Otherwise the functions return - *
- *     ARM_MATH_SUCCESS
- * 
- * There is some overhead associated with this matrix size checking. - * The matrix size checking is enabled via the \#define - *
- *     ARM_MATH_MATRIX_CHECK
- * 
- * within the library project settings. By default this macro is defined - * and size checking is enabled. By changing the project settings and - * undefining this macro size checking is eliminated and the functions - * run a bit faster. With size checking disabled the functions always - * return ARM_MATH_SUCCESS. - */ - - /** - * @brief Instance structure for the floating-point matrix structure. - */ - typedef struct - { - uint16_t numRows; /**< number of rows of the matrix. */ - uint16_t numCols; /**< number of columns of the matrix. */ - float32_t *pData; /**< points to the data of the matrix. */ - } arm_matrix_instance_f32; - - /** - * @brief Instance structure for the floating-point matrix structure. - */ - typedef struct - { - uint16_t numRows; /**< number of rows of the matrix. */ - uint16_t numCols; /**< number of columns of the matrix. */ - float64_t *pData; /**< points to the data of the matrix. */ - } arm_matrix_instance_f64; - - /** - * @brief Instance structure for the Q7 matrix structure. - */ - typedef struct - { - uint16_t numRows; /**< number of rows of the matrix. */ - uint16_t numCols; /**< number of columns of the matrix. */ - q7_t *pData; /**< points to the data of the matrix. */ - } arm_matrix_instance_q7; - - /** - * @brief Instance structure for the Q15 matrix structure. - */ - typedef struct - { - uint16_t numRows; /**< number of rows of the matrix. */ - uint16_t numCols; /**< number of columns of the matrix. */ - q15_t *pData; /**< points to the data of the matrix. */ - } arm_matrix_instance_q15; - - /** - * @brief Instance structure for the Q31 matrix structure. - */ - typedef struct - { - uint16_t numRows; /**< number of rows of the matrix. */ - uint16_t numCols; /**< number of columns of the matrix. */ - q31_t *pData; /**< points to the data of the matrix. */ - } arm_matrix_instance_q31; - - /** - * @brief Floating-point matrix addition. - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_add_f32( - const arm_matrix_instance_f32 * pSrcA, - const arm_matrix_instance_f32 * pSrcB, - arm_matrix_instance_f32 * pDst); - - /** - * @brief Q15 matrix addition. - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_add_q15( - const arm_matrix_instance_q15 * pSrcA, - const arm_matrix_instance_q15 * pSrcB, - arm_matrix_instance_q15 * pDst); - - /** - * @brief Q31 matrix addition. - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_add_q31( - const arm_matrix_instance_q31 * pSrcA, - const arm_matrix_instance_q31 * pSrcB, - arm_matrix_instance_q31 * pDst); - - /** - * @brief Floating-point, complex, matrix multiplication. - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_cmplx_mult_f32( - const arm_matrix_instance_f32 * pSrcA, - const arm_matrix_instance_f32 * pSrcB, - arm_matrix_instance_f32 * pDst); - - /** - * @brief Q15, complex, matrix multiplication. - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_cmplx_mult_q15( - const arm_matrix_instance_q15 * pSrcA, - const arm_matrix_instance_q15 * pSrcB, - arm_matrix_instance_q15 * pDst, - q15_t * pScratch); - - /** - * @brief Q31, complex, matrix multiplication. - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_cmplx_mult_q31( - const arm_matrix_instance_q31 * pSrcA, - const arm_matrix_instance_q31 * pSrcB, - arm_matrix_instance_q31 * pDst); - - /** - * @brief Floating-point matrix transpose. - * @param[in] pSrc points to the input matrix - * @param[out] pDst points to the output matrix - * @return The function returns either ARM_MATH_SIZE_MISMATCH - * or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_trans_f32( - const arm_matrix_instance_f32 * pSrc, - arm_matrix_instance_f32 * pDst); - - /** - * @brief Floating-point complex matrix transpose. - * @param[in] pSrc points to the input matrix - * @param[out] pDst points to the output matrix - * @return The function returns either ARM_MATH_SIZE_MISMATCH - * or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_cmplx_trans_f32( - const arm_matrix_instance_f32 * pSrc, - arm_matrix_instance_f32 * pDst); - - - /** - * @brief Q15 matrix transpose. - * @param[in] pSrc points to the input matrix - * @param[out] pDst points to the output matrix - * @return The function returns either ARM_MATH_SIZE_MISMATCH - * or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_trans_q15( - const arm_matrix_instance_q15 * pSrc, - arm_matrix_instance_q15 * pDst); - - /** - * @brief Q15 complex matrix transpose. - * @param[in] pSrc points to the input matrix - * @param[out] pDst points to the output matrix - * @return The function returns either ARM_MATH_SIZE_MISMATCH - * or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_cmplx_trans_q15( - const arm_matrix_instance_q15 * pSrc, - arm_matrix_instance_q15 * pDst); - - /** - * @brief Q7 matrix transpose. - * @param[in] pSrc points to the input matrix - * @param[out] pDst points to the output matrix - * @return The function returns either ARM_MATH_SIZE_MISMATCH - * or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_trans_q7( - const arm_matrix_instance_q7 * pSrc, - arm_matrix_instance_q7 * pDst); - - /** - * @brief Q31 matrix transpose. - * @param[in] pSrc points to the input matrix - * @param[out] pDst points to the output matrix - * @return The function returns either ARM_MATH_SIZE_MISMATCH - * or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_trans_q31( - const arm_matrix_instance_q31 * pSrc, - arm_matrix_instance_q31 * pDst); - - /** - * @brief Q31 complex matrix transpose. - * @param[in] pSrc points to the input matrix - * @param[out] pDst points to the output matrix - * @return The function returns either ARM_MATH_SIZE_MISMATCH - * or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_cmplx_trans_q31( - const arm_matrix_instance_q31 * pSrc, - arm_matrix_instance_q31 * pDst); - - /** - * @brief Floating-point matrix multiplication - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_mult_f32( - const arm_matrix_instance_f32 * pSrcA, - const arm_matrix_instance_f32 * pSrcB, - arm_matrix_instance_f32 * pDst); - - /** - * @brief Floating-point matrix and vector multiplication - * @param[in] pSrcMat points to the input matrix structure - * @param[in] pVec points to vector - * @param[out] pDst points to output vector - */ -void arm_mat_vec_mult_f32( - const arm_matrix_instance_f32 *pSrcMat, - const float32_t *pVec, - float32_t *pDst); - - /** - * @brief Q7 matrix multiplication - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @param[in] pState points to the array for storing intermediate results - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_mult_q7( - const arm_matrix_instance_q7 * pSrcA, - const arm_matrix_instance_q7 * pSrcB, - arm_matrix_instance_q7 * pDst, - q7_t * pState); - - /** - * @brief Q7 matrix and vector multiplication - * @param[in] pSrcMat points to the input matrix structure - * @param[in] pVec points to vector - * @param[out] pDst points to output vector - */ -void arm_mat_vec_mult_q7( - const arm_matrix_instance_q7 *pSrcMat, - const q7_t *pVec, - q7_t *pDst); - - /** - * @brief Q15 matrix multiplication - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @param[in] pState points to the array for storing intermediate results - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_mult_q15( - const arm_matrix_instance_q15 * pSrcA, - const arm_matrix_instance_q15 * pSrcB, - arm_matrix_instance_q15 * pDst, - q15_t * pState); - - /** - * @brief Q15 matrix and vector multiplication - * @param[in] pSrcMat points to the input matrix structure - * @param[in] pVec points to vector - * @param[out] pDst points to output vector - */ -void arm_mat_vec_mult_q15( - const arm_matrix_instance_q15 *pSrcMat, - const q15_t *pVec, - q15_t *pDst); - - /** - * @brief Q15 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4 - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @param[in] pState points to the array for storing intermediate results - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_mult_fast_q15( - const arm_matrix_instance_q15 * pSrcA, - const arm_matrix_instance_q15 * pSrcB, - arm_matrix_instance_q15 * pDst, - q15_t * pState); - - /** - * @brief Q31 matrix multiplication - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_mult_q31( - const arm_matrix_instance_q31 * pSrcA, - const arm_matrix_instance_q31 * pSrcB, - arm_matrix_instance_q31 * pDst); - - /** - * @brief Q31 matrix and vector multiplication - * @param[in] pSrcMat points to the input matrix structure - * @param[in] pVec points to vector - * @param[out] pDst points to output vector - */ -void arm_mat_vec_mult_q31( - const arm_matrix_instance_q31 *pSrcMat, - const q31_t *pVec, - q31_t *pDst); - - /** - * @brief Q31 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4 - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_mult_fast_q31( - const arm_matrix_instance_q31 * pSrcA, - const arm_matrix_instance_q31 * pSrcB, - arm_matrix_instance_q31 * pDst); - - /** - * @brief Floating-point matrix subtraction - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_sub_f32( - const arm_matrix_instance_f32 * pSrcA, - const arm_matrix_instance_f32 * pSrcB, - arm_matrix_instance_f32 * pDst); - - /** - * @brief Q15 matrix subtraction - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_sub_q15( - const arm_matrix_instance_q15 * pSrcA, - const arm_matrix_instance_q15 * pSrcB, - arm_matrix_instance_q15 * pDst); - - /** - * @brief Q31 matrix subtraction - * @param[in] pSrcA points to the first input matrix structure - * @param[in] pSrcB points to the second input matrix structure - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_sub_q31( - const arm_matrix_instance_q31 * pSrcA, - const arm_matrix_instance_q31 * pSrcB, - arm_matrix_instance_q31 * pDst); - - /** - * @brief Floating-point matrix scaling. - * @param[in] pSrc points to the input matrix - * @param[in] scale scale factor - * @param[out] pDst points to the output matrix - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_scale_f32( - const arm_matrix_instance_f32 * pSrc, - float32_t scale, - arm_matrix_instance_f32 * pDst); - - /** - * @brief Q15 matrix scaling. - * @param[in] pSrc points to input matrix - * @param[in] scaleFract fractional portion of the scale factor - * @param[in] shift number of bits to shift the result by - * @param[out] pDst points to output matrix - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_scale_q15( - const arm_matrix_instance_q15 * pSrc, - q15_t scaleFract, - int32_t shift, - arm_matrix_instance_q15 * pDst); - - /** - * @brief Q31 matrix scaling. - * @param[in] pSrc points to input matrix - * @param[in] scaleFract fractional portion of the scale factor - * @param[in] shift number of bits to shift the result by - * @param[out] pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ -arm_status arm_mat_scale_q31( - const arm_matrix_instance_q31 * pSrc, - q31_t scaleFract, - int32_t shift, - arm_matrix_instance_q31 * pDst); - - /** - * @brief Q31 matrix initialization. - * @param[in,out] S points to an instance of the floating-point matrix structure. - * @param[in] nRows number of rows in the matrix. - * @param[in] nColumns number of columns in the matrix. - * @param[in] pData points to the matrix data array. - */ -void arm_mat_init_q31( - arm_matrix_instance_q31 * S, - uint16_t nRows, - uint16_t nColumns, - q31_t * pData); - - /** - * @brief Q15 matrix initialization. - * @param[in,out] S points to an instance of the floating-point matrix structure. - * @param[in] nRows number of rows in the matrix. - * @param[in] nColumns number of columns in the matrix. - * @param[in] pData points to the matrix data array. - */ -void arm_mat_init_q15( - arm_matrix_instance_q15 * S, - uint16_t nRows, - uint16_t nColumns, - q15_t * pData); - - /** - * @brief Floating-point matrix initialization. - * @param[in,out] S points to an instance of the floating-point matrix structure. - * @param[in] nRows number of rows in the matrix. - * @param[in] nColumns number of columns in the matrix. - * @param[in] pData points to the matrix data array. - */ -void arm_mat_init_f32( - arm_matrix_instance_f32 * S, - uint16_t nRows, - uint16_t nColumns, - float32_t * pData); - - - - /** - * @brief Floating-point matrix inverse. - * @param[in] src points to the instance of the input floating-point matrix structure. - * @param[out] dst points to the instance of the output floating-point matrix structure. - * @return The function returns ARM_MATH_SIZE_MISMATCH, if the dimensions do not match. - * If the input matrix is singular (does not have an inverse), then the algorithm terminates and returns error status ARM_MATH_SINGULAR. - */ - arm_status arm_mat_inverse_f32( - const arm_matrix_instance_f32 * src, - arm_matrix_instance_f32 * dst); - - - /** - * @brief Floating-point matrix inverse. - * @param[in] src points to the instance of the input floating-point matrix structure. - * @param[out] dst points to the instance of the output floating-point matrix structure. - * @return The function returns ARM_MATH_SIZE_MISMATCH, if the dimensions do not match. - * If the input matrix is singular (does not have an inverse), then the algorithm terminates and returns error status ARM_MATH_SINGULAR. - */ - arm_status arm_mat_inverse_f64( - const arm_matrix_instance_f64 * src, - arm_matrix_instance_f64 * dst); - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _MATRIX_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/none.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/none.h deleted file mode 100755 index 62f2d14..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/none.h +++ /dev/null @@ -1,576 +0,0 @@ -/****************************************************************************** - * @file none.h - * @brief Intrinsincs when no DSP extension available - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Definitions in this file are allowing to reuse some versions of the -CMSIS-DSP to build on a core (M0 for instance) or a host where -DSP extension are not available. - -Ideally a pure C version should have been used instead. -But those are not always available or use a restricted set -of intrinsics. - -*/ - -#ifndef _NONE_H_ -#define _NONE_H_ - -#include "arm_math_types.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - - - -/* - -Normally those kind of definitions are in a compiler file -in Core or Core_A. - -But for MSVC compiler it is a bit special. The goal is very specific -to CMSIS-DSP and only to allow the use of this library from other -systems like Python or Matlab. - -MSVC is not going to be used to cross-compile to ARM. So, having a MSVC -compiler file in Core or Core_A would not make sense. - -*/ -#if defined ( _MSC_VER ) || defined(__GNUC_PYTHON__) - __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t data) - { - if (data == 0U) { return 32U; } - - uint32_t count = 0U; - uint32_t mask = 0x80000000U; - - while ((data & mask) == 0U) - { - count += 1U; - mask = mask >> 1U; - } - return count; - } - - __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) - { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; - } - - __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) - { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; - } - - /** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} - - -#endif - -/** - * @brief Clips Q63 to Q31 values. - */ - __STATIC_FORCEINLINE q31_t clip_q63_to_q31( - q63_t x) - { - return ((q31_t) (x >> 32) != ((q31_t) x >> 31)) ? - ((0x7FFFFFFF ^ ((q31_t) (x >> 63)))) : (q31_t) x; - } - - /** - * @brief Clips Q63 to Q15 values. - */ - __STATIC_FORCEINLINE q15_t clip_q63_to_q15( - q63_t x) - { - return ((q31_t) (x >> 32) != ((q31_t) x >> 31)) ? - ((0x7FFF ^ ((q15_t) (x >> 63)))) : (q15_t) (x >> 15); - } - - /** - * @brief Clips Q31 to Q7 values. - */ - __STATIC_FORCEINLINE q7_t clip_q31_to_q7( - q31_t x) - { - return ((q31_t) (x >> 24) != ((q31_t) x >> 23)) ? - ((0x7F ^ ((q7_t) (x >> 31)))) : (q7_t) x; - } - - /** - * @brief Clips Q31 to Q15 values. - */ - __STATIC_FORCEINLINE q15_t clip_q31_to_q15( - q31_t x) - { - return ((q31_t) (x >> 16) != ((q31_t) x >> 15)) ? - ((0x7FFF ^ ((q15_t) (x >> 31)))) : (q15_t) x; - } - - /** - * @brief Multiplies 32 X 64 and returns 32 bit result in 2.30 format. - */ - __STATIC_FORCEINLINE q63_t mult32x64( - q63_t x, - q31_t y) - { - return ((((q63_t) (x & 0x00000000FFFFFFFF) * y) >> 32) + - (((q63_t) (x >> 32) * y) ) ); - } - -/* SMMLAR */ -#define multAcc_32x32_keep32_R(a, x, y) \ - a = (q31_t) (((((q63_t) a) << 32) + ((q63_t) x * y) + 0x80000000LL ) >> 32) - -/* SMMLSR */ -#define multSub_32x32_keep32_R(a, x, y) \ - a = (q31_t) (((((q63_t) a) << 32) - ((q63_t) x * y) + 0x80000000LL ) >> 32) - -/* SMMULR */ -#define mult_32x32_keep32_R(a, x, y) \ - a = (q31_t) (((q63_t) x * y + 0x80000000LL ) >> 32) - -/* SMMLA */ -#define multAcc_32x32_keep32(a, x, y) \ - a += (q31_t) (((q63_t) x * y) >> 32) - -/* SMMLS */ -#define multSub_32x32_keep32(a, x, y) \ - a -= (q31_t) (((q63_t) x * y) >> 32) - -/* SMMUL */ -#define mult_32x32_keep32(a, x, y) \ - a = (q31_t) (((q63_t) x * y ) >> 32) - -#ifndef ARM_MATH_DSP - /** - * @brief definition to pack two 16 bit values. - */ - #define __PKHBT(ARG1, ARG2, ARG3) ( (((int32_t)(ARG1) << 0) & (int32_t)0x0000FFFF) | \ - (((int32_t)(ARG2) << ARG3) & (int32_t)0xFFFF0000) ) - #define __PKHTB(ARG1, ARG2, ARG3) ( (((int32_t)(ARG1) << 0) & (int32_t)0xFFFF0000) | \ - (((int32_t)(ARG2) >> ARG3) & (int32_t)0x0000FFFF) ) -#endif - - /** - * @brief definition to pack four 8 bit values. - */ -#ifndef ARM_MATH_BIG_ENDIAN - #define __PACKq7(v0,v1,v2,v3) ( (((int32_t)(v0) << 0) & (int32_t)0x000000FF) | \ - (((int32_t)(v1) << 8) & (int32_t)0x0000FF00) | \ - (((int32_t)(v2) << 16) & (int32_t)0x00FF0000) | \ - (((int32_t)(v3) << 24) & (int32_t)0xFF000000) ) -#else - #define __PACKq7(v0,v1,v2,v3) ( (((int32_t)(v3) << 0) & (int32_t)0x000000FF) | \ - (((int32_t)(v2) << 8) & (int32_t)0x0000FF00) | \ - (((int32_t)(v1) << 16) & (int32_t)0x00FF0000) | \ - (((int32_t)(v0) << 24) & (int32_t)0xFF000000) ) -#endif - - - - -/* - * @brief C custom defined intrinsic functions - */ -#if !defined (ARM_MATH_DSP) - - - /* - * @brief C custom defined QADD8 - */ - __STATIC_FORCEINLINE uint32_t __QADD8( - uint32_t x, - uint32_t y) - { - q31_t r, s, t, u; - - r = __SSAT(((((q31_t)x << 24) >> 24) + (((q31_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF; - s = __SSAT(((((q31_t)x << 16) >> 24) + (((q31_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF; - t = __SSAT(((((q31_t)x << 8) >> 24) + (((q31_t)y << 8) >> 24)), 8) & (int32_t)0x000000FF; - u = __SSAT(((((q31_t)x ) >> 24) + (((q31_t)y ) >> 24)), 8) & (int32_t)0x000000FF; - - return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r ))); - } - - - /* - * @brief C custom defined QSUB8 - */ - __STATIC_FORCEINLINE uint32_t __QSUB8( - uint32_t x, - uint32_t y) - { - q31_t r, s, t, u; - - r = __SSAT(((((q31_t)x << 24) >> 24) - (((q31_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF; - s = __SSAT(((((q31_t)x << 16) >> 24) - (((q31_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF; - t = __SSAT(((((q31_t)x << 8) >> 24) - (((q31_t)y << 8) >> 24)), 8) & (int32_t)0x000000FF; - u = __SSAT(((((q31_t)x ) >> 24) - (((q31_t)y ) >> 24)), 8) & (int32_t)0x000000FF; - - return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r ))); - } - - - /* - * @brief C custom defined QADD16 - */ - __STATIC_FORCEINLINE uint32_t __QADD16( - uint32_t x, - uint32_t y) - { -/* q31_t r, s; without initialisation 'arm_offset_q15 test' fails but 'intrinsic' tests pass! for armCC */ - q31_t r = 0, s = 0; - - r = __SSAT(((((q31_t)x << 16) >> 16) + (((q31_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF; - s = __SSAT(((((q31_t)x ) >> 16) + (((q31_t)y ) >> 16)), 16) & (int32_t)0x0000FFFF; - - return ((uint32_t)((s << 16) | (r ))); - } - - - /* - * @brief C custom defined SHADD16 - */ - __STATIC_FORCEINLINE uint32_t __SHADD16( - uint32_t x, - uint32_t y) - { - q31_t r, s; - - r = (((((q31_t)x << 16) >> 16) + (((q31_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF; - s = (((((q31_t)x ) >> 16) + (((q31_t)y ) >> 16)) >> 1) & (int32_t)0x0000FFFF; - - return ((uint32_t)((s << 16) | (r ))); - } - - - /* - * @brief C custom defined QSUB16 - */ - __STATIC_FORCEINLINE uint32_t __QSUB16( - uint32_t x, - uint32_t y) - { - q31_t r, s; - - r = __SSAT(((((q31_t)x << 16) >> 16) - (((q31_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF; - s = __SSAT(((((q31_t)x ) >> 16) - (((q31_t)y ) >> 16)), 16) & (int32_t)0x0000FFFF; - - return ((uint32_t)((s << 16) | (r ))); - } - - - /* - * @brief C custom defined SHSUB16 - */ - __STATIC_FORCEINLINE uint32_t __SHSUB16( - uint32_t x, - uint32_t y) - { - q31_t r, s; - - r = (((((q31_t)x << 16) >> 16) - (((q31_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF; - s = (((((q31_t)x ) >> 16) - (((q31_t)y ) >> 16)) >> 1) & (int32_t)0x0000FFFF; - - return ((uint32_t)((s << 16) | (r ))); - } - - - /* - * @brief C custom defined QASX - */ - __STATIC_FORCEINLINE uint32_t __QASX( - uint32_t x, - uint32_t y) - { - q31_t r, s; - - r = __SSAT(((((q31_t)x << 16) >> 16) - (((q31_t)y ) >> 16)), 16) & (int32_t)0x0000FFFF; - s = __SSAT(((((q31_t)x ) >> 16) + (((q31_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF; - - return ((uint32_t)((s << 16) | (r ))); - } - - - /* - * @brief C custom defined SHASX - */ - __STATIC_FORCEINLINE uint32_t __SHASX( - uint32_t x, - uint32_t y) - { - q31_t r, s; - - r = (((((q31_t)x << 16) >> 16) - (((q31_t)y ) >> 16)) >> 1) & (int32_t)0x0000FFFF; - s = (((((q31_t)x ) >> 16) + (((q31_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF; - - return ((uint32_t)((s << 16) | (r ))); - } - - - /* - * @brief C custom defined QSAX - */ - __STATIC_FORCEINLINE uint32_t __QSAX( - uint32_t x, - uint32_t y) - { - q31_t r, s; - - r = __SSAT(((((q31_t)x << 16) >> 16) + (((q31_t)y ) >> 16)), 16) & (int32_t)0x0000FFFF; - s = __SSAT(((((q31_t)x ) >> 16) - (((q31_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF; - - return ((uint32_t)((s << 16) | (r ))); - } - - - /* - * @brief C custom defined SHSAX - */ - __STATIC_FORCEINLINE uint32_t __SHSAX( - uint32_t x, - uint32_t y) - { - q31_t r, s; - - r = (((((q31_t)x << 16) >> 16) + (((q31_t)y ) >> 16)) >> 1) & (int32_t)0x0000FFFF; - s = (((((q31_t)x ) >> 16) - (((q31_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF; - - return ((uint32_t)((s << 16) | (r ))); - } - - - /* - * @brief C custom defined SMUSDX - */ - __STATIC_FORCEINLINE uint32_t __SMUSDX( - uint32_t x, - uint32_t y) - { - return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) - - ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) )); - } - - /* - * @brief C custom defined SMUADX - */ - __STATIC_FORCEINLINE uint32_t __SMUADX( - uint32_t x, - uint32_t y) - { - return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) + - ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) )); - } - - - /* - * @brief C custom defined QADD - */ - __STATIC_FORCEINLINE int32_t __QADD( - int32_t x, - int32_t y) - { - return ((int32_t)(clip_q63_to_q31((q63_t)x + (q31_t)y))); - } - - - /* - * @brief C custom defined QSUB - */ - __STATIC_FORCEINLINE int32_t __QSUB( - int32_t x, - int32_t y) - { - return ((int32_t)(clip_q63_to_q31((q63_t)x - (q31_t)y))); - } - - - /* - * @brief C custom defined SMLAD - */ - __STATIC_FORCEINLINE uint32_t __SMLAD( - uint32_t x, - uint32_t y, - uint32_t sum) - { - return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y << 16) >> 16)) + - ((((q31_t)x ) >> 16) * (((q31_t)y ) >> 16)) + - ( ((q31_t)sum ) ) )); - } - - - /* - * @brief C custom defined SMLADX - */ - __STATIC_FORCEINLINE uint32_t __SMLADX( - uint32_t x, - uint32_t y, - uint32_t sum) - { - return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) + - ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) + - ( ((q31_t)sum ) ) )); - } - - - /* - * @brief C custom defined SMLSDX - */ - __STATIC_FORCEINLINE uint32_t __SMLSDX( - uint32_t x, - uint32_t y, - uint32_t sum) - { - return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) - - ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) + - ( ((q31_t)sum ) ) )); - } - - - /* - * @brief C custom defined SMLALD - */ - __STATIC_FORCEINLINE uint64_t __SMLALD( - uint32_t x, - uint32_t y, - uint64_t sum) - { -/* return (sum + ((q15_t) (x >> 16) * (q15_t) (y >> 16)) + ((q15_t) x * (q15_t) y)); */ - return ((uint64_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y << 16) >> 16)) + - ((((q31_t)x ) >> 16) * (((q31_t)y ) >> 16)) + - ( ((q63_t)sum ) ) )); - } - - - /* - * @brief C custom defined SMLALDX - */ - __STATIC_FORCEINLINE uint64_t __SMLALDX( - uint32_t x, - uint32_t y, - uint64_t sum) - { -/* return (sum + ((q15_t) (x >> 16) * (q15_t) y)) + ((q15_t) x * (q15_t) (y >> 16)); */ - return ((uint64_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) + - ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) + - ( ((q63_t)sum ) ) )); - } - - - /* - * @brief C custom defined SMUAD - */ - __STATIC_FORCEINLINE uint32_t __SMUAD( - uint32_t x, - uint32_t y) - { - return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y << 16) >> 16)) + - ((((q31_t)x ) >> 16) * (((q31_t)y ) >> 16)) )); - } - - - /* - * @brief C custom defined SMUSD - */ - __STATIC_FORCEINLINE uint32_t __SMUSD( - uint32_t x, - uint32_t y) - { - return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y << 16) >> 16)) - - ((((q31_t)x ) >> 16) * (((q31_t)y ) >> 16)) )); - } - - - /* - * @brief C custom defined SXTB16 - */ - __STATIC_FORCEINLINE uint32_t __SXTB16( - uint32_t x) - { - return ((uint32_t)(((((q31_t)x << 24) >> 24) & (q31_t)0x0000FFFF) | - ((((q31_t)x << 8) >> 8) & (q31_t)0xFFFF0000) )); - } - - /* - * @brief C custom defined SMMLA - */ - __STATIC_FORCEINLINE int32_t __SMMLA( - int32_t x, - int32_t y, - int32_t sum) - { - return (sum + (int32_t) (((int64_t) x * y) >> 32)); - } - -#endif /* !defined (ARM_MATH_DSP) */ - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _TRANSFORM_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/statistics_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/statistics_functions.h deleted file mode 100755 index 68e145d..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/statistics_functions.h +++ /dev/null @@ -1,483 +0,0 @@ -/****************************************************************************** - * @file statistics_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _STATISTICS_FUNCTIONS_H_ -#define _STATISTICS_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#include "dsp/basic_math_functions.h" -#include "dsp/fast_math_functions.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - - -/** - * @defgroup groupStats Statistics Functions - */ - -/** - * @brief Computation of the LogSumExp - * - * In probabilistic computations, the dynamic of the probability values can be very - * wide because they come from gaussian functions. - * To avoid underflow and overflow issues, the values are represented by their log. - * In this representation, multiplying the original exp values is easy : their logs are added. - * But adding the original exp values is requiring some special handling and it is the - * goal of the LogSumExp function. - * - * If the values are x1...xn, the function is computing: - * - * ln(exp(x1) + ... + exp(xn)) and the computation is done in such a way that - * rounding issues are minimised. - * - * The max xm of the values is extracted and the function is computing: - * xm + ln(exp(x1 - xm) + ... + exp(xn - xm)) - * - * @param[in] *in Pointer to an array of input values. - * @param[in] blockSize Number of samples in the input array. - * @return LogSumExp - * - */ - - -float32_t arm_logsumexp_f32(const float32_t *in, uint32_t blockSize); - -/** - * @brief Dot product with log arithmetic - * - * Vectors are containing the log of the samples - * - * @param[in] pSrcA points to the first input vector - * @param[in] pSrcB points to the second input vector - * @param[in] blockSize number of samples in each vector - * @param[in] pTmpBuffer temporary buffer of length blockSize - * @return The log of the dot product . - * - */ - - -float32_t arm_logsumexp_dot_prod_f32(const float32_t * pSrcA, - const float32_t * pSrcB, - uint32_t blockSize, - float32_t *pTmpBuffer); - -/** - * @brief Entropy - * - * @param[in] pSrcA Array of input values. - * @param[in] blockSize Number of samples in the input array. - * @return Entropy -Sum(p ln p) - * - */ - - -float32_t arm_entropy_f32(const float32_t * pSrcA,uint32_t blockSize); - - -/** - * @brief Entropy - * - * @param[in] pSrcA Array of input values. - * @param[in] blockSize Number of samples in the input array. - * @return Entropy -Sum(p ln p) - * - */ - - -float64_t arm_entropy_f64(const float64_t * pSrcA, uint32_t blockSize); - - -/** - * @brief Kullback-Leibler - * - * @param[in] pSrcA Pointer to an array of input values for probability distribution A. - * @param[in] pSrcB Pointer to an array of input values for probability distribution B. - * @param[in] blockSize Number of samples in the input array. - * @return Kullback-Leibler Divergence D(A || B) - * - */ -float32_t arm_kullback_leibler_f32(const float32_t * pSrcA - ,const float32_t * pSrcB - ,uint32_t blockSize); - - -/** - * @brief Kullback-Leibler - * - * @param[in] pSrcA Pointer to an array of input values for probability distribution A. - * @param[in] pSrcB Pointer to an array of input values for probability distribution B. - * @param[in] blockSize Number of samples in the input array. - * @return Kullback-Leibler Divergence D(A || B) - * - */ -float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, - const float64_t * pSrcB, - uint32_t blockSize); - - - /** - * @brief Sum of the squares of the elements of a Q31 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_power_q31( - const q31_t * pSrc, - uint32_t blockSize, - q63_t * pResult); - - - /** - * @brief Sum of the squares of the elements of a floating-point vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_power_f32( - const float32_t * pSrc, - uint32_t blockSize, - float32_t * pResult); - - - /** - * @brief Sum of the squares of the elements of a Q15 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_power_q15( - const q15_t * pSrc, - uint32_t blockSize, - q63_t * pResult); - - - /** - * @brief Sum of the squares of the elements of a Q7 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_power_q7( - const q7_t * pSrc, - uint32_t blockSize, - q31_t * pResult); - - - /** - * @brief Mean value of a Q7 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_mean_q7( - const q7_t * pSrc, - uint32_t blockSize, - q7_t * pResult); - - - /** - * @brief Mean value of a Q15 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_mean_q15( - const q15_t * pSrc, - uint32_t blockSize, - q15_t * pResult); - - - /** - * @brief Mean value of a Q31 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_mean_q31( - const q31_t * pSrc, - uint32_t blockSize, - q31_t * pResult); - - - /** - * @brief Mean value of a floating-point vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_mean_f32( - const float32_t * pSrc, - uint32_t blockSize, - float32_t * pResult); - - - /** - * @brief Variance of the elements of a floating-point vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_var_f32( - const float32_t * pSrc, - uint32_t blockSize, - float32_t * pResult); - - - /** - * @brief Variance of the elements of a Q31 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_var_q31( - const q31_t * pSrc, - uint32_t blockSize, - q31_t * pResult); - - - /** - * @brief Variance of the elements of a Q15 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_var_q15( - const q15_t * pSrc, - uint32_t blockSize, - q15_t * pResult); - - - /** - * @brief Root Mean Square of the elements of a floating-point vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_rms_f32( - const float32_t * pSrc, - uint32_t blockSize, - float32_t * pResult); - - - /** - * @brief Root Mean Square of the elements of a Q31 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_rms_q31( - const q31_t * pSrc, - uint32_t blockSize, - q31_t * pResult); - - - /** - * @brief Root Mean Square of the elements of a Q15 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_rms_q15( - const q15_t * pSrc, - uint32_t blockSize, - q15_t * pResult); - - - /** - * @brief Standard deviation of the elements of a floating-point vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_std_f32( - const float32_t * pSrc, - uint32_t blockSize, - float32_t * pResult); - - - /** - * @brief Standard deviation of the elements of a Q31 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_std_q31( - const q31_t * pSrc, - uint32_t blockSize, - q31_t * pResult); - - - /** - * @brief Standard deviation of the elements of a Q15 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output value. - */ - void arm_std_q15( - const q15_t * pSrc, - uint32_t blockSize, - q15_t * pResult); - - - - /** - * @brief Minimum value of a Q7 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] result is output pointer - * @param[in] index is the array index of the minimum value in the input buffer. - */ - void arm_min_q7( - const q7_t * pSrc, - uint32_t blockSize, - q7_t * result, - uint32_t * index); - - - /** - * @brief Minimum value of a Q15 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output pointer - * @param[in] pIndex is the array index of the minimum value in the input buffer. - */ - void arm_min_q15( - const q15_t * pSrc, - uint32_t blockSize, - q15_t * pResult, - uint32_t * pIndex); - - - /** - * @brief Minimum value of a Q31 vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output pointer - * @param[out] pIndex is the array index of the minimum value in the input buffer. - */ - void arm_min_q31( - const q31_t * pSrc, - uint32_t blockSize, - q31_t * pResult, - uint32_t * pIndex); - - - /** - * @brief Minimum value of a floating-point vector. - * @param[in] pSrc is input pointer - * @param[in] blockSize is the number of samples to process - * @param[out] pResult is output pointer - * @param[out] pIndex is the array index of the minimum value in the input buffer. - */ - void arm_min_f32( - const float32_t * pSrc, - uint32_t blockSize, - float32_t * pResult, - uint32_t * pIndex); - - -/** - * @brief Maximum value of a Q7 vector. - * @param[in] pSrc points to the input buffer - * @param[in] blockSize length of the input vector - * @param[out] pResult maximum value returned here - * @param[out] pIndex index of maximum value returned here - */ - void arm_max_q7( - const q7_t * pSrc, - uint32_t blockSize, - q7_t * pResult, - uint32_t * pIndex); - - -/** - * @brief Maximum value of a Q15 vector. - * @param[in] pSrc points to the input buffer - * @param[in] blockSize length of the input vector - * @param[out] pResult maximum value returned here - * @param[out] pIndex index of maximum value returned here - */ - void arm_max_q15( - const q15_t * pSrc, - uint32_t blockSize, - q15_t * pResult, - uint32_t * pIndex); - - -/** - * @brief Maximum value of a Q31 vector. - * @param[in] pSrc points to the input buffer - * @param[in] blockSize length of the input vector - * @param[out] pResult maximum value returned here - * @param[out] pIndex index of maximum value returned here - */ - void arm_max_q31( - const q31_t * pSrc, - uint32_t blockSize, - q31_t * pResult, - uint32_t * pIndex); - - -/** - * @brief Maximum value of a floating-point vector. - * @param[in] pSrc points to the input buffer - * @param[in] blockSize length of the input vector - * @param[out] pResult maximum value returned here - * @param[out] pIndex index of maximum value returned here - */ - void arm_max_f32( - const float32_t * pSrc, - uint32_t blockSize, - float32_t * pResult, - uint32_t * pIndex); - - /** - @brief Maximum value of a floating-point vector. - @param[in] pSrc points to the input vector - @param[in] blockSize number of samples in input vector - @param[out] pResult maximum value returned here - @return none - */ - void arm_max_no_idx_f32( - const float32_t *pSrc, - uint32_t blockSize, - float32_t *pResult); - - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _STATISTICS_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/support_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/support_functions.h deleted file mode 100755 index f4f3b88..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/support_functions.h +++ /dev/null @@ -1,426 +0,0 @@ -/****************************************************************************** - * @file support_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _SUPPORT_FUNCTIONS_H_ -#define _SUPPORT_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * @defgroup groupSupport Support Functions - */ - - -/** - * @brief Converts the elements of the floating-point vector to Q31 vector. - * @param[in] pSrc points to the floating-point input vector - * @param[out] pDst points to the Q31 output vector - * @param[in] blockSize length of the input vector - */ - void arm_float_to_q31( - const float32_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the floating-point vector to Q15 vector. - * @param[in] pSrc points to the floating-point input vector - * @param[out] pDst points to the Q15 output vector - * @param[in] blockSize length of the input vector - */ - void arm_float_to_q15( - const float32_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the floating-point vector to Q7 vector. - * @param[in] pSrc points to the floating-point input vector - * @param[out] pDst points to the Q7 output vector - * @param[in] blockSize length of the input vector - */ - void arm_float_to_q7( - const float32_t * pSrc, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q31 vector to floating-point vector. - * @param[in] pSrc is input pointer - * @param[out] pDst is output pointer - * @param[in] blockSize is the number of samples to process - */ - void arm_q31_to_float( - const q31_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q31 vector to Q15 vector. - * @param[in] pSrc is input pointer - * @param[out] pDst is output pointer - * @param[in] blockSize is the number of samples to process - */ - void arm_q31_to_q15( - const q31_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q31 vector to Q7 vector. - * @param[in] pSrc is input pointer - * @param[out] pDst is output pointer - * @param[in] blockSize is the number of samples to process - */ - void arm_q31_to_q7( - const q31_t * pSrc, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q15 vector to floating-point vector. - * @param[in] pSrc is input pointer - * @param[out] pDst is output pointer - * @param[in] blockSize is the number of samples to process - */ - void arm_q15_to_float( - const q15_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q15 vector to Q31 vector. - * @param[in] pSrc is input pointer - * @param[out] pDst is output pointer - * @param[in] blockSize is the number of samples to process - */ - void arm_q15_to_q31( - const q15_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q15 vector to Q7 vector. - * @param[in] pSrc is input pointer - * @param[out] pDst is output pointer - * @param[in] blockSize is the number of samples to process - */ - void arm_q15_to_q7( - const q15_t * pSrc, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q7 vector to floating-point vector. - * @param[in] pSrc is input pointer - * @param[out] pDst is output pointer - * @param[in] blockSize is the number of samples to process - */ - void arm_q7_to_float( - const q7_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q7 vector to Q31 vector. - * @param[in] pSrc input pointer - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_q7_to_q31( - const q7_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Converts the elements of the Q7 vector to Q15 vector. - * @param[in] pSrc input pointer - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_q7_to_q15( - const q7_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - - - - /** - * @brief Struct for specifying sorting algorithm - */ - typedef enum - { - ARM_SORT_BITONIC = 0, - /**< Bitonic sort */ - ARM_SORT_BUBBLE = 1, - /**< Bubble sort */ - ARM_SORT_HEAP = 2, - /**< Heap sort */ - ARM_SORT_INSERTION = 3, - /**< Insertion sort */ - ARM_SORT_QUICK = 4, - /**< Quick sort */ - ARM_SORT_SELECTION = 5 - /**< Selection sort */ - } arm_sort_alg; - - /** - * @brief Struct for specifying sorting algorithm - */ - typedef enum - { - ARM_SORT_DESCENDING = 0, - /**< Descending order (9 to 0) */ - ARM_SORT_ASCENDING = 1 - /**< Ascending order (0 to 9) */ - } arm_sort_dir; - - /** - * @brief Instance structure for the sorting algorithms. - */ - typedef struct - { - arm_sort_alg alg; /**< Sorting algorithm selected */ - arm_sort_dir dir; /**< Sorting order (direction) */ - } arm_sort_instance_f32; - - /** - * @param[in] S points to an instance of the sorting structure. - * @param[in] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data. - * @param[in] blockSize number of samples to process. - */ - void arm_sort_f32( - const arm_sort_instance_f32 * S, - float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - /** - * @param[in,out] S points to an instance of the sorting structure. - * @param[in] alg Selected algorithm. - * @param[in] dir Sorting order. - */ - void arm_sort_init_f32( - arm_sort_instance_f32 * S, - arm_sort_alg alg, - arm_sort_dir dir); - - /** - * @brief Instance structure for the sorting algorithms. - */ - typedef struct - { - arm_sort_dir dir; /**< Sorting order (direction) */ - float32_t * buffer; /**< Working buffer */ - } arm_merge_sort_instance_f32; - - /** - * @param[in] S points to an instance of the sorting structure. - * @param[in,out] pSrc points to the block of input data. - * @param[out] pDst points to the block of output data - * @param[in] blockSize number of samples to process. - */ - void arm_merge_sort_f32( - const arm_merge_sort_instance_f32 * S, - float32_t *pSrc, - float32_t *pDst, - uint32_t blockSize); - - /** - * @param[in,out] S points to an instance of the sorting structure. - * @param[in] dir Sorting order. - * @param[in] buffer Working buffer. - */ - void arm_merge_sort_init_f32( - arm_merge_sort_instance_f32 * S, - arm_sort_dir dir, - float32_t * buffer); - - - - /** - * @brief Copies the elements of a floating-point vector. - * @param[in] pSrc input pointer - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_copy_f32( - const float32_t * pSrc, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Copies the elements of a Q7 vector. - * @param[in] pSrc input pointer - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_copy_q7( - const q7_t * pSrc, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Copies the elements of a Q15 vector. - * @param[in] pSrc input pointer - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_copy_q15( - const q15_t * pSrc, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Copies the elements of a Q31 vector. - * @param[in] pSrc input pointer - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_copy_q31( - const q31_t * pSrc, - q31_t * pDst, - uint32_t blockSize); - - - /** - * @brief Fills a constant value into a floating-point vector. - * @param[in] value input value to be filled - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_fill_f32( - float32_t value, - float32_t * pDst, - uint32_t blockSize); - - - /** - * @brief Fills a constant value into a Q7 vector. - * @param[in] value input value to be filled - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_fill_q7( - q7_t value, - q7_t * pDst, - uint32_t blockSize); - - - /** - * @brief Fills a constant value into a Q15 vector. - * @param[in] value input value to be filled - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_fill_q15( - q15_t value, - q15_t * pDst, - uint32_t blockSize); - - - /** - * @brief Fills a constant value into a Q31 vector. - * @param[in] value input value to be filled - * @param[out] pDst output pointer - * @param[in] blockSize number of samples to process - */ - void arm_fill_q31( - q31_t value, - q31_t * pDst, - uint32_t blockSize); - - - - - - - -/** - * @brief Weighted sum - * - * - * @param[in] *in Array of input values. - * @param[in] *weigths Weights - * @param[in] blockSize Number of samples in the input array. - * @return Weighted sum - * - */ -float32_t arm_weighted_sum_f32(const float32_t *in - , const float32_t *weigths - , uint32_t blockSize); - - -/** - * @brief Barycenter - * - * - * @param[in] in List of vectors - * @param[in] weights Weights of the vectors - * @param[out] out Barycenter - * @param[in] nbVectors Number of vectors - * @param[in] vecDim Dimension of space (vector dimension) - * @return None - * - */ -void arm_barycenter_f32(const float32_t *in - , const float32_t *weights - , float32_t *out - , uint32_t nbVectors - , uint32_t vecDim); - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _SUPPORT_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_defines.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_defines.h deleted file mode 100755 index 71ad2f7..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_defines.h +++ /dev/null @@ -1,42 +0,0 @@ -/****************************************************************************** - * @file svm_defines.h - * @brief Public header file for CMSIS DSP Library - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _SVM_DEFINES_H_ -#define _SVM_DEFINES_H_ - -/** - * @brief Struct for specifying SVM Kernel - */ -typedef enum -{ - ARM_ML_KERNEL_LINEAR = 0, - /**< Linear kernel */ - ARM_ML_KERNEL_POLYNOMIAL = 1, - /**< Polynomial kernel */ - ARM_ML_KERNEL_RBF = 2, - /**< Radial Basis Function kernel */ - ARM_ML_KERNEL_SIGMOID = 3 - /**< Sigmoid kernel */ -} arm_ml_kernel_type; - -#endif diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_functions.h deleted file mode 100755 index c50deb5..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/svm_functions.h +++ /dev/null @@ -1,298 +0,0 @@ -/****************************************************************************** - * @file svm_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _SVM_FUNCTIONS_H_ -#define _SVM_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" -#include "dsp/svm_defines.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -#define STEP(x) (x) <= 0 ? 0 : 1 - -/** - * @defgroup groupSVM SVM Functions - * This set of functions is implementing SVM classification on 2 classes. - * The training must be done from scikit-learn. The parameters can be easily - * generated from the scikit-learn object. Some examples are given in - * DSP/Testing/PatternGeneration/SVM.py - * - * If more than 2 classes are needed, the functions in this folder - * will have to be used, as building blocks, to do multi-class classification. - * - * No multi-class classification is provided in this SVM folder. - * - */ - -/** - * @brief Integer exponentiation - * @param[in] x value - * @param[in] nb integer exponent >= 1 - * @return x^nb - * - */ -__STATIC_INLINE float32_t arm_exponent_f32(float32_t x, int32_t nb) -{ - float32_t r = x; - nb --; - while(nb > 0) - { - r = r * x; - nb--; - } - return(r); -} - - - - - -/** - * @brief Instance structure for linear SVM prediction function. - */ -typedef struct -{ - uint32_t nbOfSupportVectors; /**< Number of support vectors */ - uint32_t vectorDimension; /**< Dimension of vector space */ - float32_t intercept; /**< Intercept */ - const float32_t *dualCoefficients; /**< Dual coefficients */ - const float32_t *supportVectors; /**< Support vectors */ - const int32_t *classes; /**< The two SVM classes */ -} arm_svm_linear_instance_f32; - - -/** - * @brief Instance structure for polynomial SVM prediction function. - */ -typedef struct -{ - uint32_t nbOfSupportVectors; /**< Number of support vectors */ - uint32_t vectorDimension; /**< Dimension of vector space */ - float32_t intercept; /**< Intercept */ - const float32_t *dualCoefficients; /**< Dual coefficients */ - const float32_t *supportVectors; /**< Support vectors */ - const int32_t *classes; /**< The two SVM classes */ - int32_t degree; /**< Polynomial degree */ - float32_t coef0; /**< Polynomial constant */ - float32_t gamma; /**< Gamma factor */ -} arm_svm_polynomial_instance_f32; - -/** - * @brief Instance structure for rbf SVM prediction function. - */ -typedef struct -{ - uint32_t nbOfSupportVectors; /**< Number of support vectors */ - uint32_t vectorDimension; /**< Dimension of vector space */ - float32_t intercept; /**< Intercept */ - const float32_t *dualCoefficients; /**< Dual coefficients */ - const float32_t *supportVectors; /**< Support vectors */ - const int32_t *classes; /**< The two SVM classes */ - float32_t gamma; /**< Gamma factor */ -} arm_svm_rbf_instance_f32; - -/** - * @brief Instance structure for sigmoid SVM prediction function. - */ -typedef struct -{ - uint32_t nbOfSupportVectors; /**< Number of support vectors */ - uint32_t vectorDimension; /**< Dimension of vector space */ - float32_t intercept; /**< Intercept */ - const float32_t *dualCoefficients; /**< Dual coefficients */ - const float32_t *supportVectors; /**< Support vectors */ - const int32_t *classes; /**< The two SVM classes */ - float32_t coef0; /**< Independant constant */ - float32_t gamma; /**< Gamma factor */ -} arm_svm_sigmoid_instance_f32; - -/** - * @brief SVM linear instance init function - * @param[in] S Parameters for SVM functions - * @param[in] nbOfSupportVectors Number of support vectors - * @param[in] vectorDimension Dimension of vector space - * @param[in] intercept Intercept - * @param[in] dualCoefficients Array of dual coefficients - * @param[in] supportVectors Array of support vectors - * @param[in] classes Array of 2 classes ID - * @return none. - * - */ - - -void arm_svm_linear_init_f32(arm_svm_linear_instance_f32 *S, - uint32_t nbOfSupportVectors, - uint32_t vectorDimension, - float32_t intercept, - const float32_t *dualCoefficients, - const float32_t *supportVectors, - const int32_t *classes); - -/** - * @brief SVM linear prediction - * @param[in] S Pointer to an instance of the linear SVM structure. - * @param[in] in Pointer to input vector - * @param[out] pResult Decision value - * @return none. - * - */ - -void arm_svm_linear_predict_f32(const arm_svm_linear_instance_f32 *S, - const float32_t * in, - int32_t * pResult); - - -/** - * @brief SVM polynomial instance init function - * @param[in] S points to an instance of the polynomial SVM structure. - * @param[in] nbOfSupportVectors Number of support vectors - * @param[in] vectorDimension Dimension of vector space - * @param[in] intercept Intercept - * @param[in] dualCoefficients Array of dual coefficients - * @param[in] supportVectors Array of support vectors - * @param[in] classes Array of 2 classes ID - * @param[in] degree Polynomial degree - * @param[in] coef0 coeff0 (scikit-learn terminology) - * @param[in] gamma gamma (scikit-learn terminology) - * @return none. - * - */ - - -void arm_svm_polynomial_init_f32(arm_svm_polynomial_instance_f32 *S, - uint32_t nbOfSupportVectors, - uint32_t vectorDimension, - float32_t intercept, - const float32_t *dualCoefficients, - const float32_t *supportVectors, - const int32_t *classes, - int32_t degree, - float32_t coef0, - float32_t gamma - ); - -/** - * @brief SVM polynomial prediction - * @param[in] S Pointer to an instance of the polynomial SVM structure. - * @param[in] in Pointer to input vector - * @param[out] pResult Decision value - * @return none. - * - */ -void arm_svm_polynomial_predict_f32(const arm_svm_polynomial_instance_f32 *S, - const float32_t * in, - int32_t * pResult); - - -/** - * @brief SVM radial basis function instance init function - * @param[in] S points to an instance of the polynomial SVM structure. - * @param[in] nbOfSupportVectors Number of support vectors - * @param[in] vectorDimension Dimension of vector space - * @param[in] intercept Intercept - * @param[in] dualCoefficients Array of dual coefficients - * @param[in] supportVectors Array of support vectors - * @param[in] classes Array of 2 classes ID - * @param[in] gamma gamma (scikit-learn terminology) - * @return none. - * - */ - -void arm_svm_rbf_init_f32(arm_svm_rbf_instance_f32 *S, - uint32_t nbOfSupportVectors, - uint32_t vectorDimension, - float32_t intercept, - const float32_t *dualCoefficients, - const float32_t *supportVectors, - const int32_t *classes, - float32_t gamma - ); - -/** - * @brief SVM rbf prediction - * @param[in] S Pointer to an instance of the rbf SVM structure. - * @param[in] in Pointer to input vector - * @param[out] pResult decision value - * @return none. - * - */ -void arm_svm_rbf_predict_f32(const arm_svm_rbf_instance_f32 *S, - const float32_t * in, - int32_t * pResult); - -/** - * @brief SVM sigmoid instance init function - * @param[in] S points to an instance of the rbf SVM structure. - * @param[in] nbOfSupportVectors Number of support vectors - * @param[in] vectorDimension Dimension of vector space - * @param[in] intercept Intercept - * @param[in] dualCoefficients Array of dual coefficients - * @param[in] supportVectors Array of support vectors - * @param[in] classes Array of 2 classes ID - * @param[in] coef0 coeff0 (scikit-learn terminology) - * @param[in] gamma gamma (scikit-learn terminology) - * @return none. - * - */ - -void arm_svm_sigmoid_init_f32(arm_svm_sigmoid_instance_f32 *S, - uint32_t nbOfSupportVectors, - uint32_t vectorDimension, - float32_t intercept, - const float32_t *dualCoefficients, - const float32_t *supportVectors, - const int32_t *classes, - float32_t coef0, - float32_t gamma - ); - -/** - * @brief SVM sigmoid prediction - * @param[in] S Pointer to an instance of the rbf SVM structure. - * @param[in] in Pointer to input vector - * @param[out] pResult Decision value - * @return none. - * - */ -void arm_svm_sigmoid_predict_f32(const arm_svm_sigmoid_instance_f32 *S, - const float32_t * in, - int32_t * pResult); - - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _SVM_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/transform_functions.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/transform_functions.h deleted file mode 100755 index b6e7284..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/transform_functions.h +++ /dev/null @@ -1,591 +0,0 @@ -/****************************************************************************** - * @file transform_functions.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef _TRANSFORM_FUNCTIONS_H_ -#define _TRANSFORM_FUNCTIONS_H_ - -#include "arm_math_types.h" -#include "arm_math_memory.h" - -#include "dsp/none.h" -#include "dsp/utils.h" - -#include "dsp/basic_math_functions.h" -#include "dsp/complex_math_functions.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - - -/** - * @defgroup groupTransforms Transform Functions - */ - - - /** - * @brief Instance structure for the Q15 CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ - uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ - const q15_t *pTwiddle; /**< points to the Sin twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ - } arm_cfft_radix2_instance_q15; - -/* Deprecated */ - arm_status arm_cfft_radix2_init_q15( - arm_cfft_radix2_instance_q15 * S, - uint16_t fftLen, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - -/* Deprecated */ - void arm_cfft_radix2_q15( - const arm_cfft_radix2_instance_q15 * S, - q15_t * pSrc); - - - /** - * @brief Instance structure for the Q15 CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ - uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ - const q15_t *pTwiddle; /**< points to the twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ - } arm_cfft_radix4_instance_q15; - -/* Deprecated */ - arm_status arm_cfft_radix4_init_q15( - arm_cfft_radix4_instance_q15 * S, - uint16_t fftLen, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - -/* Deprecated */ - void arm_cfft_radix4_q15( - const arm_cfft_radix4_instance_q15 * S, - q15_t * pSrc); - - /** - * @brief Instance structure for the Radix-2 Q31 CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ - uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ - const q31_t *pTwiddle; /**< points to the Twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ - } arm_cfft_radix2_instance_q31; - -/* Deprecated */ - arm_status arm_cfft_radix2_init_q31( - arm_cfft_radix2_instance_q31 * S, - uint16_t fftLen, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - -/* Deprecated */ - void arm_cfft_radix2_q31( - const arm_cfft_radix2_instance_q31 * S, - q31_t * pSrc); - - /** - * @brief Instance structure for the Q31 CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ - uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ - const q31_t *pTwiddle; /**< points to the twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ - } arm_cfft_radix4_instance_q31; - -/* Deprecated */ - void arm_cfft_radix4_q31( - const arm_cfft_radix4_instance_q31 * S, - q31_t * pSrc); - -/* Deprecated */ - arm_status arm_cfft_radix4_init_q31( - arm_cfft_radix4_instance_q31 * S, - uint16_t fftLen, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - - /** - * @brief Instance structure for the floating-point CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ - uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ - const float32_t *pTwiddle; /**< points to the Twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ - float32_t onebyfftLen; /**< value of 1/fftLen. */ - } arm_cfft_radix2_instance_f32; - - -/* Deprecated */ - arm_status arm_cfft_radix2_init_f32( - arm_cfft_radix2_instance_f32 * S, - uint16_t fftLen, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - -/* Deprecated */ - void arm_cfft_radix2_f32( - const arm_cfft_radix2_instance_f32 * S, - float32_t * pSrc); - - /** - * @brief Instance structure for the floating-point CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ - uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ - const float32_t *pTwiddle; /**< points to the Twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ - float32_t onebyfftLen; /**< value of 1/fftLen. */ - } arm_cfft_radix4_instance_f32; - - - -/* Deprecated */ - arm_status arm_cfft_radix4_init_f32( - arm_cfft_radix4_instance_f32 * S, - uint16_t fftLen, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - -/* Deprecated */ - void arm_cfft_radix4_f32( - const arm_cfft_radix4_instance_f32 * S, - float32_t * pSrc); - - /** - * @brief Instance structure for the fixed-point CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - const q15_t *pTwiddle; /**< points to the Twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t bitRevLength; /**< bit reversal table length. */ -#if defined(ARM_MATH_MVEI) - const uint32_t *rearranged_twiddle_tab_stride1_arr; /**< Per stage reordered twiddle pointer (offset 1) */ \ - const uint32_t *rearranged_twiddle_tab_stride2_arr; /**< Per stage reordered twiddle pointer (offset 2) */ \ - const uint32_t *rearranged_twiddle_tab_stride3_arr; /**< Per stage reordered twiddle pointer (offset 3) */ \ - const q15_t *rearranged_twiddle_stride1; /**< reordered twiddle offset 1 storage */ \ - const q15_t *rearranged_twiddle_stride2; /**< reordered twiddle offset 2 storage */ \ - const q15_t *rearranged_twiddle_stride3; -#endif - } arm_cfft_instance_q15; - -arm_status arm_cfft_init_q15( - arm_cfft_instance_q15 * S, - uint16_t fftLen); - -void arm_cfft_q15( - const arm_cfft_instance_q15 * S, - q15_t * p1, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - - /** - * @brief Instance structure for the fixed-point CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - const q31_t *pTwiddle; /**< points to the Twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t bitRevLength; /**< bit reversal table length. */ -#if defined(ARM_MATH_MVEI) - const uint32_t *rearranged_twiddle_tab_stride1_arr; /**< Per stage reordered twiddle pointer (offset 1) */ \ - const uint32_t *rearranged_twiddle_tab_stride2_arr; /**< Per stage reordered twiddle pointer (offset 2) */ \ - const uint32_t *rearranged_twiddle_tab_stride3_arr; /**< Per stage reordered twiddle pointer (offset 3) */ \ - const q31_t *rearranged_twiddle_stride1; /**< reordered twiddle offset 1 storage */ \ - const q31_t *rearranged_twiddle_stride2; /**< reordered twiddle offset 2 storage */ \ - const q31_t *rearranged_twiddle_stride3; -#endif - } arm_cfft_instance_q31; - -arm_status arm_cfft_init_q31( - arm_cfft_instance_q31 * S, - uint16_t fftLen); - -void arm_cfft_q31( - const arm_cfft_instance_q31 * S, - q31_t * p1, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - - /** - * @brief Instance structure for the floating-point CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - const float32_t *pTwiddle; /**< points to the Twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t bitRevLength; /**< bit reversal table length. */ -#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) - const uint32_t *rearranged_twiddle_tab_stride1_arr; /**< Per stage reordered twiddle pointer (offset 1) */ \ - const uint32_t *rearranged_twiddle_tab_stride2_arr; /**< Per stage reordered twiddle pointer (offset 2) */ \ - const uint32_t *rearranged_twiddle_tab_stride3_arr; /**< Per stage reordered twiddle pointer (offset 3) */ \ - const float32_t *rearranged_twiddle_stride1; /**< reordered twiddle offset 1 storage */ \ - const float32_t *rearranged_twiddle_stride2; /**< reordered twiddle offset 2 storage */ \ - const float32_t *rearranged_twiddle_stride3; -#endif - } arm_cfft_instance_f32; - - - - arm_status arm_cfft_init_f32( - arm_cfft_instance_f32 * S, - uint16_t fftLen); - - void arm_cfft_f32( - const arm_cfft_instance_f32 * S, - float32_t * p1, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - - - /** - * @brief Instance structure for the Double Precision Floating-point CFFT/CIFFT function. - */ - typedef struct - { - uint16_t fftLen; /**< length of the FFT. */ - const float64_t *pTwiddle; /**< points to the Twiddle factor table. */ - const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ - uint16_t bitRevLength; /**< bit reversal table length. */ - } arm_cfft_instance_f64; - - arm_status arm_cfft_init_f64( - arm_cfft_instance_f64 * S, - uint16_t fftLen); - - void arm_cfft_f64( - const arm_cfft_instance_f64 * S, - float64_t * p1, - uint8_t ifftFlag, - uint8_t bitReverseFlag); - - /** - * @brief Instance structure for the Q15 RFFT/RIFFT function. - */ - typedef struct - { - uint32_t fftLenReal; /**< length of the real FFT. */ - uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */ - uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */ - uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - const q15_t *pTwiddleAReal; /**< points to the real twiddle factor table. */ - const q15_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */ -#if defined(ARM_MATH_MVEI) - arm_cfft_instance_q15 cfftInst; -#else - const arm_cfft_instance_q15 *pCfft; /**< points to the complex FFT instance. */ -#endif - } arm_rfft_instance_q15; - - arm_status arm_rfft_init_q15( - arm_rfft_instance_q15 * S, - uint32_t fftLenReal, - uint32_t ifftFlagR, - uint32_t bitReverseFlag); - - void arm_rfft_q15( - const arm_rfft_instance_q15 * S, - q15_t * pSrc, - q15_t * pDst); - - /** - * @brief Instance structure for the Q31 RFFT/RIFFT function. - */ - typedef struct - { - uint32_t fftLenReal; /**< length of the real FFT. */ - uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */ - uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */ - uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - const q31_t *pTwiddleAReal; /**< points to the real twiddle factor table. */ - const q31_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */ -#if defined(ARM_MATH_MVEI) - arm_cfft_instance_q31 cfftInst; -#else - const arm_cfft_instance_q31 *pCfft; /**< points to the complex FFT instance. */ -#endif - } arm_rfft_instance_q31; - - arm_status arm_rfft_init_q31( - arm_rfft_instance_q31 * S, - uint32_t fftLenReal, - uint32_t ifftFlagR, - uint32_t bitReverseFlag); - - void arm_rfft_q31( - const arm_rfft_instance_q31 * S, - q31_t * pSrc, - q31_t * pDst); - - /** - * @brief Instance structure for the floating-point RFFT/RIFFT function. - */ - typedef struct - { - uint32_t fftLenReal; /**< length of the real FFT. */ - uint16_t fftLenBy2; /**< length of the complex FFT. */ - uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */ - uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */ - uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ - const float32_t *pTwiddleAReal; /**< points to the real twiddle factor table. */ - const float32_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */ - arm_cfft_radix4_instance_f32 *pCfft; /**< points to the complex FFT instance. */ - } arm_rfft_instance_f32; - - arm_status arm_rfft_init_f32( - arm_rfft_instance_f32 * S, - arm_cfft_radix4_instance_f32 * S_CFFT, - uint32_t fftLenReal, - uint32_t ifftFlagR, - uint32_t bitReverseFlag); - - void arm_rfft_f32( - const arm_rfft_instance_f32 * S, - float32_t * pSrc, - float32_t * pDst); - - /** - * @brief Instance structure for the Double Precision Floating-point RFFT/RIFFT function. - */ -typedef struct - { - arm_cfft_instance_f64 Sint; /**< Internal CFFT structure. */ - uint16_t fftLenRFFT; /**< length of the real sequence */ - const float64_t * pTwiddleRFFT; /**< Twiddle factors real stage */ - } arm_rfft_fast_instance_f64 ; - -arm_status arm_rfft_fast_init_f64 ( - arm_rfft_fast_instance_f64 * S, - uint16_t fftLen); - - -void arm_rfft_fast_f64( - arm_rfft_fast_instance_f64 * S, - float64_t * p, float64_t * pOut, - uint8_t ifftFlag); - - - /** - * @brief Instance structure for the floating-point RFFT/RIFFT function. - */ -typedef struct - { - arm_cfft_instance_f32 Sint; /**< Internal CFFT structure. */ - uint16_t fftLenRFFT; /**< length of the real sequence */ - const float32_t * pTwiddleRFFT; /**< Twiddle factors real stage */ - } arm_rfft_fast_instance_f32 ; - -arm_status arm_rfft_fast_init_f32 ( - arm_rfft_fast_instance_f32 * S, - uint16_t fftLen); - - - void arm_rfft_fast_f32( - const arm_rfft_fast_instance_f32 * S, - float32_t * p, float32_t * pOut, - uint8_t ifftFlag); - - /** - * @brief Instance structure for the floating-point DCT4/IDCT4 function. - */ - typedef struct - { - uint16_t N; /**< length of the DCT4. */ - uint16_t Nby2; /**< half of the length of the DCT4. */ - float32_t normalize; /**< normalizing factor. */ - const float32_t *pTwiddle; /**< points to the twiddle factor table. */ - const float32_t *pCosFactor; /**< points to the cosFactor table. */ - arm_rfft_instance_f32 *pRfft; /**< points to the real FFT instance. */ - arm_cfft_radix4_instance_f32 *pCfft; /**< points to the complex FFT instance. */ - } arm_dct4_instance_f32; - - - /** - * @brief Initialization function for the floating-point DCT4/IDCT4. - * @param[in,out] S points to an instance of floating-point DCT4/IDCT4 structure. - * @param[in] S_RFFT points to an instance of floating-point RFFT/RIFFT structure. - * @param[in] S_CFFT points to an instance of floating-point CFFT/CIFFT structure. - * @param[in] N length of the DCT4. - * @param[in] Nby2 half of the length of the DCT4. - * @param[in] normalize normalizing factor. - * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if fftLenReal is not a supported transform length. - */ - arm_status arm_dct4_init_f32( - arm_dct4_instance_f32 * S, - arm_rfft_instance_f32 * S_RFFT, - arm_cfft_radix4_instance_f32 * S_CFFT, - uint16_t N, - uint16_t Nby2, - float32_t normalize); - - - /** - * @brief Processing function for the floating-point DCT4/IDCT4. - * @param[in] S points to an instance of the floating-point DCT4/IDCT4 structure. - * @param[in] pState points to state buffer. - * @param[in,out] pInlineBuffer points to the in-place input and output buffer. - */ - void arm_dct4_f32( - const arm_dct4_instance_f32 * S, - float32_t * pState, - float32_t * pInlineBuffer); - - - /** - * @brief Instance structure for the Q31 DCT4/IDCT4 function. - */ - typedef struct - { - uint16_t N; /**< length of the DCT4. */ - uint16_t Nby2; /**< half of the length of the DCT4. */ - q31_t normalize; /**< normalizing factor. */ - const q31_t *pTwiddle; /**< points to the twiddle factor table. */ - const q31_t *pCosFactor; /**< points to the cosFactor table. */ - arm_rfft_instance_q31 *pRfft; /**< points to the real FFT instance. */ - arm_cfft_radix4_instance_q31 *pCfft; /**< points to the complex FFT instance. */ - } arm_dct4_instance_q31; - - - /** - * @brief Initialization function for the Q31 DCT4/IDCT4. - * @param[in,out] S points to an instance of Q31 DCT4/IDCT4 structure. - * @param[in] S_RFFT points to an instance of Q31 RFFT/RIFFT structure - * @param[in] S_CFFT points to an instance of Q31 CFFT/CIFFT structure - * @param[in] N length of the DCT4. - * @param[in] Nby2 half of the length of the DCT4. - * @param[in] normalize normalizing factor. - * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if N is not a supported transform length. - */ - arm_status arm_dct4_init_q31( - arm_dct4_instance_q31 * S, - arm_rfft_instance_q31 * S_RFFT, - arm_cfft_radix4_instance_q31 * S_CFFT, - uint16_t N, - uint16_t Nby2, - q31_t normalize); - - - /** - * @brief Processing function for the Q31 DCT4/IDCT4. - * @param[in] S points to an instance of the Q31 DCT4 structure. - * @param[in] pState points to state buffer. - * @param[in,out] pInlineBuffer points to the in-place input and output buffer. - */ - void arm_dct4_q31( - const arm_dct4_instance_q31 * S, - q31_t * pState, - q31_t * pInlineBuffer); - - - /** - * @brief Instance structure for the Q15 DCT4/IDCT4 function. - */ - typedef struct - { - uint16_t N; /**< length of the DCT4. */ - uint16_t Nby2; /**< half of the length of the DCT4. */ - q15_t normalize; /**< normalizing factor. */ - const q15_t *pTwiddle; /**< points to the twiddle factor table. */ - const q15_t *pCosFactor; /**< points to the cosFactor table. */ - arm_rfft_instance_q15 *pRfft; /**< points to the real FFT instance. */ - arm_cfft_radix4_instance_q15 *pCfft; /**< points to the complex FFT instance. */ - } arm_dct4_instance_q15; - - - /** - * @brief Initialization function for the Q15 DCT4/IDCT4. - * @param[in,out] S points to an instance of Q15 DCT4/IDCT4 structure. - * @param[in] S_RFFT points to an instance of Q15 RFFT/RIFFT structure. - * @param[in] S_CFFT points to an instance of Q15 CFFT/CIFFT structure. - * @param[in] N length of the DCT4. - * @param[in] Nby2 half of the length of the DCT4. - * @param[in] normalize normalizing factor. - * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if N is not a supported transform length. - */ - arm_status arm_dct4_init_q15( - arm_dct4_instance_q15 * S, - arm_rfft_instance_q15 * S_RFFT, - arm_cfft_radix4_instance_q15 * S_CFFT, - uint16_t N, - uint16_t Nby2, - q15_t normalize); - - - /** - * @brief Processing function for the Q15 DCT4/IDCT4. - * @param[in] S points to an instance of the Q15 DCT4 structure. - * @param[in] pState points to state buffer. - * @param[in,out] pInlineBuffer points to the in-place input and output buffer. - */ - void arm_dct4_q15( - const arm_dct4_instance_q15 * S, - q15_t * pState, - q15_t * pInlineBuffer); - - - -#ifdef __cplusplus -} -#endif - -#endif /* ifndef _TRANSFORM_FUNCTIONS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/utils.h b/src/third_party/cmsis/CMSIS/DSP/Include/dsp/utils.h deleted file mode 100755 index 794023c..0000000 --- a/src/third_party/cmsis/CMSIS/DSP/Include/dsp/utils.h +++ /dev/null @@ -1,239 +0,0 @@ -/****************************************************************************** - * @file arm_math_utils.h - * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 - ******************************************************************************/ -/* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _ARM_MATH_UTILS_H_ - -#define _ARM_MATH_UTILS_H_ - -#include "arm_math_types.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - - /** - * @brief Macros required for reciprocal calculation in Normalized LMS - */ - -#define INDEX_MASK 0x0000003F - - -#define SQ(x) ((x) * (x)) - - - - /** - * @brief Function to Calculates 1/in (reciprocal) value of Q31 Data type. - */ - __STATIC_FORCEINLINE uint32_t arm_recip_q31( - q31_t in, - q31_t * dst, - const q31_t * pRecipTable) - { - q31_t out; - uint32_t tempVal; - uint32_t index, i; - uint32_t signBits; - - if (in > 0) - { - signBits = ((uint32_t) (__CLZ( in) - 1)); - } - else - { - signBits = ((uint32_t) (__CLZ(-in) - 1)); - } - - /* Convert input sample to 1.31 format */ - in = (in << signBits); - - /* calculation of index for initial approximated Val */ - index = (uint32_t)(in >> 24); - index = (index & INDEX_MASK); - - /* 1.31 with exp 1 */ - out = pRecipTable[index]; - - /* calculation of reciprocal value */ - /* running approximation for two iterations */ - for (i = 0U; i < 2U; i++) - { - tempVal = (uint32_t) (((q63_t) in * out) >> 31); - tempVal = 0x7FFFFFFFu - tempVal; - /* 1.31 with exp 1 */ - /* out = (q31_t) (((q63_t) out * tempVal) >> 30); */ - out = clip_q63_to_q31(((q63_t) out * tempVal) >> 30); - } - - /* write output */ - *dst = out; - - /* return num of signbits of out = 1/in value */ - return (signBits + 1U); - } - - - /** - * @brief Function to Calculates 1/in (reciprocal) value of Q15 Data type. - */ - __STATIC_FORCEINLINE uint32_t arm_recip_q15( - q15_t in, - q15_t * dst, - const q15_t * pRecipTable) - { - q15_t out = 0; - uint32_t tempVal = 0; - uint32_t index = 0, i = 0; - uint32_t signBits = 0; - - if (in > 0) - { - signBits = ((uint32_t)(__CLZ( in) - 17)); - } - else - { - signBits = ((uint32_t)(__CLZ(-in) - 17)); - } - - /* Convert input sample to 1.15 format */ - in = (in << signBits); - - /* calculation of index for initial approximated Val */ - index = (uint32_t)(in >> 8); - index = (index & INDEX_MASK); - - /* 1.15 with exp 1 */ - out = pRecipTable[index]; - - /* calculation of reciprocal value */ - /* running approximation for two iterations */ - for (i = 0U; i < 2U; i++) - { - tempVal = (uint32_t) (((q31_t) in * out) >> 15); - tempVal = 0x7FFFu - tempVal; - /* 1.15 with exp 1 */ - out = (q15_t) (((q31_t) out * tempVal) >> 14); - /* out = clip_q31_to_q15(((q31_t) out * tempVal) >> 14); */ - } - - /* write output */ - *dst = out; - - /* return num of signbits of out = 1/in value */ - return (signBits + 1); - } - - -/** - * @brief 64-bit to 32-bit unsigned normalization - * @param[in] in is input unsigned long long value - * @param[out] normalized is the 32-bit normalized value - * @param[out] norm is norm scale - */ -__STATIC_INLINE void arm_norm_64_to_32u(uint64_t in, int32_t * normalized, int32_t *norm) -{ - int32_t n1; - int32_t hi = (int32_t) (in >> 32); - int32_t lo = (int32_t) ((in << 32) >> 32); - - n1 = __CLZ(hi) - 32; - if (!n1) - { - /* - * input fits in 32-bit - */ - n1 = __CLZ(lo); - if (!n1) - { - /* - * MSB set, need to scale down by 1 - */ - *norm = -1; - *normalized = (((uint32_t) lo) >> 1); - } else - { - if (n1 == 32) - { - /* - * input is zero - */ - *norm = 0; - *normalized = 0; - } else - { - /* - * 32-bit normalization - */ - *norm = n1 - 1; - *normalized = lo << *norm; - } - } - } else - { - /* - * input fits in 64-bit - */ - n1 = 1 - n1; - *norm = -n1; - /* - * 64 bit normalization - */ - *normalized = (((uint32_t) lo) >> n1) | (hi << (32 - n1)); - } -} - -__STATIC_INLINE q31_t arm_div_q63_to_q31(q63_t num, q31_t den) -{ - q31_t result; - uint64_t absNum; - int32_t normalized; - int32_t norm; - - /* - * if sum fits in 32bits - * avoid costly 64-bit division - */ - absNum = num > 0 ? num : -num; - arm_norm_64_to_32u(absNum, &normalized, &norm); - if (norm > 0) - /* - * 32-bit division - */ - result = (q31_t) num / den; - else - /* - * 64-bit division - */ - result = (q31_t) (num / den); - - return result; -} - - -#ifdef __cplusplus -} -#endif - -#endif /*ifndef _ARM_MATH_UTILS_H_ */ diff --git a/src/third_party/cmsis/CMSIS/NN/Include/arm_nn_tables.h b/src/third_party/cmsis/CMSIS/NN/Include/arm_nn_tables.h deleted file mode 100644 index 36be5a8..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Include/arm_nn_tables.h +++ /dev/null @@ -1,56 +0,0 @@ -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_tables.h - * Description: Extern declaration for NN tables - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _ARM_NN_TABLES_H -#define _ARM_NN_TABLES_H - -#include "arm_math.h" - -/** -* @brief tables for various activation functions -* -*/ - -extern const q15_t sigmoidTable_q15[256]; -extern const q7_t sigmoidTable_q7[256]; - -extern const q7_t tanhTable_q7[256]; -extern const q15_t tanhTable_q15[256]; - - /** - * @brief 2-way tables for various activation functions - * - * 2-way table, H table for value larger than 1/4 - * L table for value smaller than 1/4, H table for remaining - * We have this only for the q15_t version. It does not make - * sense to have it for q7_t type - */ -extern const q15_t sigmoidHTable_q15[192]; -extern const q15_t sigmoidLTable_q15[128]; - -#endif /* ARM_NN_TABLES_H */ diff --git a/src/third_party/cmsis/CMSIS/NN/Include/arm_nn_types.h b/src/third_party/cmsis/CMSIS/NN/Include/arm_nn_types.h deleted file mode 100644 index 4e825c5..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Include/arm_nn_types.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_types.h - * Description: Public header file to contain the CMSIS-NN structs for the - * TensorFlowLite micro compliant functions - * - * $Date: April 23, 2020 - * $Revision: V.0.5.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - - -#ifndef _ARM_NN_TYPES_H -#define _ARM_NN_TYPES_H - -/** CMSIS-NN object to contain the width and height of a tile */ -typedef struct -{ - int32_t w; /**< Width */ - int32_t h; /**< Height */ -} cmsis_nn_tile; - -/** CMSIS-NN object used for the function context. */ -typedef struct -{ - void *buf; /**< Pointer to a buffer needed for the optimization */ - int32_t size; /**< Buffer size */ -} cmsis_nn_context; - -/** CMSIS-NN object to contain the dimensions of the tensors */ -typedef struct -{ - int32_t n; /**< Generic dimension to contain either the batch size or output channels. Please refer to the function documentation for more information */ - int32_t h; /**< Height */ - int32_t w; /**< Width */ - int32_t c; /**< Input channels */ -} cmsis_nn_dims; - -/** CMSIS-NN object for the per-channel quantization parameters */ -typedef struct -{ - int32_t *multiplier; /**< Multiplier values */ - int32_t *shift; /**< Shift values */ -} cmsis_nn_per_channel_quant_params; - -/** CMSIS-NN object for the per-tensor quantization parameters */ -typedef struct -{ - int32_t multiplier; /**< Multiplier value */ - int32_t shift; /**< Shift value */ -} cmsis_nn_per_tensor_quant_params; - -/** CMSIS-NN object for the quantized Relu activation */ -typedef struct -{ - int32_t min; /**< Min value used to clamp the result */ - int32_t max; /**< Max value used to clamp the result */ -} cmsis_nn_activation; - -/** CMSIS-NN object for the convolution layer parameters */ -typedef struct -{ - int32_t input_offset; /**< Zero value for the input tensor */ - int32_t output_offset; /**< Zero value for the output tensor */ - cmsis_nn_tile stride; - cmsis_nn_tile padding; - cmsis_nn_tile dilation; - cmsis_nn_activation activation; -} cmsis_nn_conv_params; - -/** CMSIS-NN object for Depthwise convolution layer parameters */ -typedef struct -{ - int32_t input_offset; /**< Zero value for the input tensor */ - int32_t output_offset; /**< Zero value for the output tensor */ - int32_t ch_mult; /**< Channel Multiplier. ch_mult * in_ch = out_ch */ - cmsis_nn_tile stride; - cmsis_nn_tile padding; - cmsis_nn_tile dilation; - cmsis_nn_activation activation; -} cmsis_nn_dw_conv_params; -/** CMSIS-NN object for pooling layer parameters */ -typedef struct -{ - cmsis_nn_tile stride; - cmsis_nn_tile padding; - cmsis_nn_activation activation; -} cmsis_nn_pool_params; - -/** CMSIS-NN object for Fully Connected layer parameters */ -typedef struct -{ - int32_t input_offset; /**< Zero value for the input tensor */ - int32_t filter_offset; /**< Zero value for the filter tensor */ - int32_t output_offset; /**< Zero value for the output tensor */ - cmsis_nn_activation activation; -} cmsis_nn_fc_params; - -/** CMSIS-NN object for SVDF layer parameters */ -typedef struct -{ - int32_t rank; - int32_t input_offset; /**< Zero value for the input tensor */ - int32_t output_offset; /**< Zero value for the output tensor */ - cmsis_nn_activation input_activation; - cmsis_nn_activation output_activation; -} cmsis_nn_svdf_params; - -#endif // _ARM_NN_TYPES_H - - diff --git a/src/third_party/cmsis/CMSIS/NN/Include/arm_nnfunctions.h b/src/third_party/cmsis/CMSIS/NN/Include/arm_nnfunctions.h deleted file mode 100644 index 45ee891..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Include/arm_nnfunctions.h +++ /dev/null @@ -1,2124 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nnfunctions.h - * Description: Public header file for CMSIS NN Library - * - * $Date: August 21, 2020 - * $Revision: V.6.5.2 - * - * Target Processor: Cortex-M CPUs - * -------------------------------------------------------------------- */ - -/** - \mainpage CMSIS NN Software Library - * - * Introduction - * ------------ - * - * This user manual describes the CMSIS NN software library, - * a collection of efficient neural network kernels developed to maximize the - * performance and minimize the memory footprint of neural networks on Cortex-M processor cores. - * - * The library is divided into a number of functions each covering a specific category: - * - Convolution Functions - * - Activation Functions - * - Fully-connected Layer Functions - * - SVDF Layer Functions - * - Pooling Functions - * - Softmax Functions - * - Basic math Functions - * - * The library has separate functions for operating on different weight and activation data - * types including 8-bit integers (q7_t) and 16-bit integers (q15_t). The descrition of the - * kernels are included in the function description. The implementation details are also - * described in this paper [1]. - * - * Function Classification - * -------- - * The functions can be classified into two segments - * - Legacy functions supporting ARM's internal symmetric quantization(8 bits). - * - Functions that support TensorFlow Lite framework with symmetric quantization(8 bits). - * - * The legacy functions can be identified with their suffix of _q7 or _q15 and are no new development is done there. The article in [2] describes in detail - * how to run a network using the legacy functions. - * - * The functions supporting TensorFlow Lite framework is identified by the _s8 suffix and can be invoked from TFL micro. The functions are bit exact to - * TensorFlow Lite. Refer to the TensorFlow's documentation in [3] on how to run a TensorFlow Lite model using optimized CMSIS-NN kernels. - * - * Block Diagram - * -------- - * \image html CMSIS-NN-OVERVIEW.PNG - * - * Examples - * -------- - * - * The library ships with a number of examples which demonstrate how to use the library functions. - * - * Pre-processor Macros - * ------------ - * - * Each library project have different pre-processor macros. - * - * - ARM_MATH_DSP: - * - * Define macro ARM_MATH_DSP, If the silicon supports DSP instructions(DSP extension). - * - * - ARM_MATH_MVEI: - * - * Define macro ARM_MATH_MVEI, If the silicon supports M-Profile Vector Extension. - - * - ARM_MATH_AUTOVECTORIZE - * Used in conjucture with ARM_MATH_MVEI to let the compiler auto vectorize for the functions that uses inline assembly. - * It does not affect functions that use C or intrinsics. - * - ARM_MATH_BIG_ENDIAN: - * - * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. This is supported only for the legacy functions i.e, functions targetted at - * TensorFlow Lite do not support big endianness. By default library builds for little endian targets. - * - * - ARM_NN_TRUNCATE: - * - * Define macro ARM_NN_TRUNCATE to use floor instead of round-to-the-nearest-int for the computation. - * - * Upcoming Interface Change - * -------- - * Starting from the 1.4.0 next release, CMSIS-NN will gradually switch to a new API interface to: - * - * -# have a stable API - * -# avoid passing many variables by value - * -# improve security - * -# improve validation - * -# improve code readability - * - * The upcoming API interface change will be based on "struct" and only affect the TensorFlowLite micro compliant APIs [4] (functions with _s8 suffix) - * - * Below you can find a snapshot of how the new API interface will look like (names can change) - * - * i.e. arm_convolve_1x1_s8_fast - * - * Current API interface | New API interface proposal - * ------------- | ------------- - * const q7_t *input | const cmsis_nn_context &ctx - * const uint16_t input_x | const cmsis_nn_conv_params ¶ms - * const uint16_t input_y | const cmsis_nn_dims &input_dims - * const uint16_t input_ch | const q7_t *input_data - * const uint16_t input_batches | const cmsis_nn_dims &filter_dims - * const q7_t *kernel | const q7_t *filter_data - * const uint16_t output_ch | const cmsis_nn_dims &bias_dims - * const uint16_t pad_x | const q31_t *bias_data - * const uint16_t pad_y | const cmsis_nn_dims &output_dims - * const uint16_t stride_x | q7_t *output_data - * const uint16_t stride_y |
- * const int32_t *bias |
- * q7_t *output |
- * const int32_t *output_shift |
- * const int32_t *output_mult |
- * const int32_t out_offset |
- * const int32_t input_offset |
- * const int32_t out_activation_min |
- * const int32_t out_activation_max |
- * const uint16_t output_x |
- * const uint16_t output_y |
- * q15_t *buffer_a |
- * - * Copyright Notice - * ------------ - * - * Copyright (C) 2010-2019 Arm Limited. All rights reserved. - * - * [1] CMSIS-NN: Efficient Neural Network Kernels for Arm Cortex-M CPUs https://arxiv.org/abs/1801.06601 - * - * [2] Converting a Neural Network for Arm Cortex-M with CMSIS-NN - * https://developer.arm.com/solutions/machine-learning-on-arm/developer-material/how-to-guides/converting-a-neural-network-for-arm-cortex-m-with-cmsis-nn/single-page - * [3] https://www.tensorflow.org/lite/microcontrollers/library - * - * [4] https://github.com/ARM-software/CMSIS_5/tree/develop/CMSIS/NN#legacy-vs-tfl-micro-compliant-apis - */ - -/** - * @defgroup groupNN Neural Network Functions - * A collection of functions to perform basic operations for neural network layers. Functions with a _s8 suffix support - * TensorFlow Lite framework. - */ - -#ifndef _ARM_NNFUNCTIONS_H -#define _ARM_NNFUNCTIONS_H - -#include "arm_nnsupportfunctions.h" -#include "arm_nn_tables.h" -#include "arm_nn_types.h" - -#define USE_INTRINSIC - -//#define ARM_NN_TRUNCATE /* This config the rounding model to floor or round to the nearest int */ - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * @defgroup NNConv Convolution Functions - * - * Collection of convolution, depthwise convolution functions and their variants. - * - * The convolution is implemented in 2 steps: im2col and GEMM - * - * im2col is a process of converting each patch of image data into - * a column. After im2col, the convolution is computed as matrix-matrix - * multiplication. - * - * To reduce the memory footprint, the im2col is performed partially. - * Each iteration, only a few column (i.e., patches) are generated and - * computed with GEMM kernels similar to CMSIS-DSP arm_mat_mult functions. - * - */ - - /** - * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in cmsis-nn to perform the convolution. - * - * @param[in, out] ctx Function context that contains the additional buffer if required by the implementation. - arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial filter dimensions - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * @param[out] output_data Output data pointer. Data type: int8 - * - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, - * ARM_MATH_SUCCESS on successful completion. - * - */ - arm_status arm_convolve_wrapper_s8(const cmsis_nn_context* ctx, - const cmsis_nn_conv_params* conv_params, - const cmsis_nn_per_channel_quant_params* quant_params, - const cmsis_nn_dims* input_dims, - const q7_t *input_data, - const cmsis_nn_dims* filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims* bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims* output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for arm_convolve_wrapper_s8 - * - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN] - * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial filter dimensions - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * - * @return The function returns required buffer size(bytes) - * - */ - int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params* conv_params, - const cmsis_nn_dims* input_dims, - const cmsis_nn_dims* filter_dims, - const cmsis_nn_dims* output_dims); - - /** - * @brief Basic s8 convolution function - * @param[in, out] ctx Function context that contains the additional buffer if required by the implementation. - arm_convolve_s8_get_buffer_size will return the buffer_size if required - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial filter dimensions - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Optional bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * @param[out] output_data Output data pointer. Data type: int8 - - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * 1. Supported framework: TensorFlow Lite micro - * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - * 3. Additional memory is required for optimization. Refer to argument 'ctx' for details. - * - */ - arm_status arm_convolve_s8(const cmsis_nn_context* ctx, - const cmsis_nn_conv_params* conv_params, - const cmsis_nn_per_channel_quant_params* quant_params, - const cmsis_nn_dims* input_dims, - const q7_t *input_data, - const cmsis_nn_dims* filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims* bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims* output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for s8 convolution function - * - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial filter dimensions - * @return The function returns required buffer size(bytes) - * - */ - int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims* input_dims, - const cmsis_nn_dims* filter_dims); - - /** - * @brief Basic Q7 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - * - */ - arm_status arm_convolve_HWC_q7_basic(const q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Basic Q7 convolution function (non-square shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - */ - arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Basic Q15 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - * - */ - arm_status arm_convolve_HWC_q15_basic(const q15_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q15_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q15_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Fast Q7 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 - */ - arm_status arm_convolve_HWC_q7_fast(const q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Fast Q7 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 - */ - - arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, - * ARM_MATH_SUCCESS on successful completion. - * - * This function implement convolution with 1x1 kernel size (i.e., dim_kernel_x=1 - * and dim_kernel_y=1). It can be used for - * second half of MobileNets after depthwise separable convolution. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 - */ - arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Fast s8 version for 1x1 convolution (non-square shape) - * - * @param[in, out] ctx Function context that contains the additional buffer if required by the implementation. - arm_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN] - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Optional bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * @param[out] output_data Output data pointer. Data type: int8 - * - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, - * ARM_MATH_SUCCESS on successful completion. - * - * @details - * - Supported framework : TensorFlow Lite Micro - * - The following constrains on the arguments apply - * -# input_dims->c is a multiple of 4 - * -# conv_params->padding.w = conv_params->padding.h = 0 - * -# conv_params->stride.w = conv_params->stride.h = 1 - * - */ - arm_status arm_convolve_1x1_s8_fast(const cmsis_nn_context* ctx, - const cmsis_nn_conv_params* conv_params, - const cmsis_nn_per_channel_quant_params* quant_params, - const cmsis_nn_dims* input_dims, - const q7_t *input_data, - const cmsis_nn_dims* filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims* bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims* output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for arm_convolve_1x1_s8_fast - * - * @param[in] input_dims Input (activation) dimensions - * @return The function returns the required buffer size in bytes - * - */ - int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims* input_dims); - - /** - * @brief 1xn convolution - * - * @param[in, out] ctx Function context that contains the additional buffer if required by the implementation. - arm_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal spatial filter dimension - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Optional bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * @param[out] output_data Output data pointer. Data type: int8 - * - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, - * ARM_MATH_SUCCESS on successful completion. - * - * @details - * - Supported framework : TensorFlow Lite Micro - * - The following constrains on the arguments apply - * -# input_dims->n equals 1 - * -# ouput_dims->w is a multiple of 4 - * -# Explicit constraints(since it is for 1xN convolution) - * -## input_dims->h equals 1 - * -## output_dims->h equals 1 - * -## filter_dims->h equals 1 - *@todo Remove constraint on output_dims->w to make the function generic. - * - */ - arm_status arm_convolve_1_x_n_s8(const cmsis_nn_context* ctx, - const cmsis_nn_conv_params* conv_params, - const cmsis_nn_per_channel_quant_params* quant_params, - const cmsis_nn_dims* input_dims, - const q7_t *input_data, - const cmsis_nn_dims* filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims* bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims* output_dims, - q7_t *output_data); - - /** - * @brief Get the required additional buffer size for 1xn convolution - * - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal spatial filter dimension - * @return The function returns required buffer size(bytes) - * - */ - int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_dims* input_dims, - const cmsis_nn_dims* filter_dims); - - /** - * @brief Q7 version of convolution for RGB image - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This kernel is written exclusively for convolution with ch_im_in - * equals 3. This applies on the first layer of CNNs which has input - * image with RGB format. - */ - - arm_status arm_convolve_HWC_q7_RGB(const q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Fast Q15 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 2 - * ch_im_out is multiple of 2 - */ - - arm_status arm_convolve_HWC_q15_fast(const q15_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q15_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q15_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Fast Q15 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in is multiple of 2 - * - * ch_im_out is multipe of 2 - * - */ - - arm_status - arm_convolve_HWC_q15_fast_nonsquare(const q15_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q15_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q15_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Q7 depthwise separable convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 2 - * ch_im_out is multiple of 2 - */ - - arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Q7 depthwise separable convolution function (non-square shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding sizes x - * @param[in] padding_y padding sizes y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 2 - * ch_im_out is multiple of 2 - */ - arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB); - - /** - * @brief Wrapper function to pick the right optimized s8 depthwise convolution function - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if required. - * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) - * dw_conv_params->dilation is not used. - * Range of dw_conv_params->input_offset : [-127, 128] - * Range of dw_conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each - * output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] - * Batch argument N is not used and assumed to be 1. - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns - * ARM_MATH_SUCCESS - Successful completion. - * - * @details - * - Supported framework: TensorFlow Lite - * - Picks one of the the following functions - * -# arm_depthwise_conv_s8() - * -# arm_depthwise_conv_3x3_s8() - Cortex-M CPUs with DSP extension only - * -# arm_depthwise_conv_s8_opt() - * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - * - Check details of arm_depthwise_conv_s8_opt() for potential data that can be accessed outside of the boundary. - */ - arm_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8() - * - * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) - * dw_conv_params->dilation is not used. - * Range of dw_conv_params->input_offset : [-127, 128] - * Range of dw_conv_params->input_offset : [-128, 127] - * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] - * Batch argument N is not used and assumed to be 1. - * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] - * @return Size of additional memory required for optimizations in bytes. - * - */ - int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_dims *input_dims, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims); - - /** - * @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions. - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if an additional buffer is required. - * exists if additional memory is. - * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) - * dw_conv_params->dilation is not used. - * Range of dw_conv_params->input_offset : [-127, 128] - * Range of dw_conv_params->input_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each - * output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] - * Batch argument N is not used. - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - Supported framework: TensorFlow Lite - * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - */ - arm_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Optimized s8 depthwise convolution function for 3x3 kernel size with some constraints on - * the input arguments(documented below). Refer arm_depthwise_conv_s8() for function - * argument details. - * - * @return The function returns one of the following - * ARM_MATH_SIZE_MISMATCH - Unsupported dimension of tensors - * ARM_MATH_ARGUMENT_ERROR - Unsupported pad size along the x axis - * ARM_MATH_SUCCESS - Successful operation - * - * @details - * - Supported framework : TensorFlow Lite Micro - * - The following constrains on the arguments apply - * -# Number of input channel equals number of output channels - * -# Filter height and width equals 3 - * -# Padding along x is either 0 or 1. - * - */ - arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel. - * Refer arm_depthwise_conv_s8() for function argument details. - * - * @return The function returns one of the following - * ARM_MATH_SIZE_MISMATCH - input channel != output channel or - * ch_mult != 1 - * ARM_MATH_SUCCESS - Successful operation - * - * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out - * for the following if MVE optimizations(Arm Helium Technology) are used. - * - Output shift - * - Output multiplier - * - Output bias - * - kernel - * @details - * - Supported framework: TensorFlow Lite - * - The following constrains on the arguments apply - * -# Number of input channel equals number of output channels or ch_mult equals 1 - * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - * - Reccomended when number of channels is 4 or greater. - * - */ - arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for optimized s8 depthwise convolution - * function with constraint that in_channel equals out_channel. - * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] - * Batch argument N is not used. - * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] - * @return The function returns required buffer size in bytes - * - */ - int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims* input_dims, - const cmsis_nn_dims* filter_dims); - - /** - * @defgroup FC Fully-connected Layer Functions - * - * Collection of fully-connected and matrix multiplication functions. - * - * Fully-connected layer is basically a matrix-vector multiplication - * with bias. The matrix is the weights and the input/output vectors - * are the activation values. Supported {weight, activation} precisions - * include {8-bit, 8-bit}, {16-bit, 16-bit}, and {8-bit, 16-bit}. - * - * Here we have two types of kernel functions. The basic function - * implements the function using regular GEMV approach. The opt functions - * operates with weights in interleaved formats. - * - */ - - /** - * @brief Q7 basic fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_q7(const q7_t * pV, - const q7_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q7_t * pOut, - q15_t * vec_buffer); - - /** - * @brief Basic s8 Fully Connected function. - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if an additional buffer is required. - * @param[in] fc_params Fully Connected layer parameters (e.g. strides, dilations, pads,...) - * Range of fc_params->input_offset : [-127, 128] - * Range of fc_params->filter_offset : [-127, 128] - * Range of fc_params->output_offset : [-128, 127] - * @param[in] quant_params Per-tensor quantization info. - * It contains the multiplier and shift values to be applied to the output tensor. - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * Input dimension is taken as Nx(H * W * C_IN) - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] - * N : accumulation depth and equals (H * W * C_IN) from input_dims - * C : output depth and equals C_OUT in output_dims - * H & W : Not used - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * N, H, W : Not used - * @param[in] bias_data Bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] - * N : Batches - * C_OUT : Output depth - * H & W : Not used. - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - Supported framework: TensorFlow Lite - * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - */ - arm_status - arm_fully_connected_s8(const cmsis_nn_context *ctx, - const cmsis_nn_fc_params *fc_params, - const cmsis_nn_per_tensor_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for S8 basic fully-connected and - * matrix multiplication layer function for TF Lite - * @param[in] filter_dims dimension of filter - * @return The function returns required buffer size in bytes - * - */ - int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims); - - /** - * @brief Q7 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_q7_opt(const q7_t * pV, - const q7_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q7_t * pOut, - q15_t * vec_buffer); - - /** - * @brief Q15 basic fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_q15(const q15_t * pV, - const q15_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q15_t * bias, - q15_t * pOut, - q15_t * vec_buffer); - - /** - * @brief Q15 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_q15_opt(const q15_t * pV, - const q15_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q15_t * bias, - q15_t * pOut, - q15_t * vec_buffer); - - /** - * @brief Mixed Q15-Q7 fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_mat_q7_vec_q15(const q15_t * pV, - const q7_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q15_t * pOut, - q15_t * vec_buffer); - - /** - * @brief Mixed Q15-Q7 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t * pV, - const q7_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q15_t * pOut, - q15_t * vec_buffer); - -/** - * @brief Matrix-Multiplication Kernels for Convolution - * - * These functions are used within convolution layer functions for - * matrix multiplication. - * - * The implementation is similar to CMSIS-DSP arm_mat_mult functions - * with one Q7 and one Q15 operands. The Q15 operand is the im2col - * output which is always with 2 columns. - * - */ - - /** - * @brief Matrix-multiplication function for convolution - * @param[in] pA pointer to operand A - * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors - * @param[in] ch_im_out numRow of A - * @param[in] numCol_A numCol of A - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias the bias - * @param[in,out] pOut pointer to output - * @return The function returns the incremented output pointer - */ - - q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t * pA, - const q15_t * pInBuffer, - const uint16_t ch_im_out, - const uint16_t numCol_A, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q7_t * pOut); - /** - * @brief Matrix-multiplication function for convolution with per-channel requantization. - * @param[in] input_a pointer to operand A - * @param[in] input_b pointer to operand B, always consists of 2 vectors. - * @param[in] output_ch number of rows of A - * @param[in] out_shift pointer to per output channel requantization shift parameter. - * @param[in] out_mult pointer to per output channel requantization multiplier parameter. - * @param[in] out_offset output tensor offset. - * @param[in] activation_min minimum value to clamp the output to. Range : int8 - * @param[in] activation_max maximum value to clamp the output to. Range : int8 - * @param[in] num_col_a number of columns of A - * @param[in] output_bias per output channel bias. Range : int32 - * @param[in,out] out_0 pointer to output - * @return The function returns one of the two - * 1. The incremented output pointer for a successful operation or - * 2. NULL if implementation is not available. - * - * @details This function does the matrix multiplication of weight matrix for all output channels - * with 2 columns from im2col and produces two elements/output_channel. The outputs are - * clamped in the range provided by activation min and max. - * Supported framework: TensorFlow Lite micro. - */ - q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, - const q15_t *input_b, - const uint16_t output_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int16_t activation_min, - const int16_t activation_max, - const uint16_t num_col_a, - const int32_t *const output_bias, - q7_t *out_0); - - /** - * @brief Matrix-multiplication of re-ordered input B with A. - * - * @details For arguments, refer arm_nn_mat_mult_kernel_s8_s16. The re-ordering is a consequence - * of sign extension done by the SXTB16 command on input_b. The outputs are clamped in the range - * provided by activation min and max. - * * @details - * - Supported framework : TensorFlow Lite Micro - * - The following constrains on the arguments apply - * -# num_col_a is a multiple of 4 - * -# output_ch is a multiple of 2 - * - */ - q7_t *arm_nn_mat_mult_kernel_s8_s16_reordered(const q7_t *input_a, - const q15_t *input_b, - const uint16_t output_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int16_t activation_min, - const int16_t activation_max, - const uint16_t num_col_a, - const int32_t *const output_bias, - q7_t *out_0); - - /** - * @brief Matrix-multiplication function for convolution with reordered columns - * @param[in] pA pointer to operand A - * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors - * @param[in] ch_im_out numRow of A - * @param[in] numCol_A numCol of A - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias the bias - * @param[in,out] pOut pointer to output - * @return The function returns the incremented output pointer - * - * @details This function assumes that data in pInBuffer are reordered - */ - q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t * pA, - const q15_t * pInBuffer, - const uint16_t ch_im_out, - const uint16_t numCol_A, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q7_t * pOut); - -#ifdef __cplusplus -} -#endif - -/* - * Other functions - * These layers are typically not timing critical - * Basic implementation is supported here - */ - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * @defgroup BasicMath Basic math functions - * - * Element wise add and multiplication functions. - * - */ - -/** - * @brief s8 element wise add of two vectors - * @param[in] input_1_vect pointer to input vector 1 - * @param[in] input_2_vect pointer to input vector 2 - * @param[in] input_1_offset offset for input 1. Range: Range: -127 to 128 - * @param[in] input_1_mult multiplier for input 1 - * @param[in] input_1_shift shift for input 1 - * @param[in] input_2_offset offset for input 2. Range: Range: -127 to 128 - * @param[in] input_2_mult multiplier for input 2 - * @param[in] input_2_shift shift for input 2 - * @param[in] left_shift input left shift - * @param[in,out] output pointer to output vector - * @param[in] out_offset output offset - * @param[in] out_mult output multiplier - * @param[in] out_shift output shift - * @param[in] out_activation_min minimum value to clamp output to - * @param[in] out_activation_max maximum value to clamp output to - * @param[in] block_size number of samples - * @return The function returns ARM_MATH_SUCCESS - */ - arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, - const int8_t *input_2_vect, - const int32_t input_1_offset, - const int32_t input_1_mult, - const int32_t input_1_shift, - const int32_t input_2_offset, - const int32_t input_2_mult, - const int32_t input_2_shift, - const int32_t left_shift, - int8_t *output, - const int32_t out_offset, - const int32_t out_mult, - const int32_t out_shift, - const int32_t out_activation_min, - const int32_t out_activation_max, - const uint32_t block_size); - -/** - * @brief s8 element wise multiplication - * @param[in] input_1_vect pointer to input vector 1 - * @param[in] input_2_vect pointer to input vector 2 - * @param[in] input_1_offset offset for input 1. Range: Range: -127 to 128 - * @param[in] input_2_offset offset for input 2. Range: Range: -127 to 128 - * @param[in,out] output pointer to output vector - * @param[in] out_offset output offset - * @param[in] out_mult output multiplier - * @param[in] out_shift output shift - * @param[in] out_activation_min minimum value to clamp output to - * @param[in] out_activation_max maximum value to clamp output to - * @param[in] block_size number of samples - * @return The function returns ARM_MATH_SUCCESS - * - * @details Supported framework: TensorFlow Lite micro - */ - arm_status arm_elementwise_mul_s8(const int8_t *input_1_vect, - const int8_t *input_2_vect, - const int32_t input_1_offset, - const int32_t input_2_offset, - int8_t *output, - const int32_t out_offset, - const int32_t out_mult, - const int32_t out_shift, - const int32_t out_activation_min, - const int32_t out_activation_max, - const uint32_t block_size); -/** - * @defgroup Acti Activation Functions - * - * Perform activation layers, including ReLU (Rectified Linear Unit), - * sigmoid and tanh - * - */ - - /** - * @brief Q7 RELU function - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @return none. - */ - - void arm_relu_q7(q7_t *data, uint16_t size); - - /** - * @brief s8 ReLU6 function - * @param[in,out] data pointer to input - * @param[in] size number of elements - */ - - void arm_relu6_s8(q7_t *data, uint16_t size); - - /** - * @brief Q15 RELU function - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @return none. - */ - - void arm_relu_q15(q15_t *data, uint16_t size); - - /** - * @brief Q7 neural network activation function using direct table look-up - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 - * @param[in] type type of activation functions - * @return none. - */ - - void arm_nn_activations_direct_q7(q7_t * data, uint16_t size, uint16_t int_width, - arm_nn_activation_type type); - - /** - * @brief Q15 neural network activation function using direct table look-up - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 - * @param[in] type type of activation functions - * @return none. - * - * @details - * - * This is the direct table look-up approach. - * - * Assume here the integer part of the fixed-point is <= 3. - * More than 3 just not making much sense, makes no difference with - * saturation followed by any of these activation functions. - */ - - void arm_nn_activations_direct_q15(q15_t * data, uint16_t size, uint16_t int_width, - arm_nn_activation_type type); - -/** - * @defgroup Pooling Pooling Functions - * - * Perform pooling functions, including max pooling and average pooling - * - */ - - /** - * @brief Q7 max pooling function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] Im_out pointer to output tensor - * @return none. - * - */ - - void arm_maxpool_q7_HWC(q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const uint16_t dim_im_out, - q7_t * bufferA, - q7_t * Im_out); - - /** - * @brief Q7 average pooling function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] Im_out pointer to output tensor - * @return none. - * - */ - - void arm_avepool_q7_HWC(q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const uint16_t dim_im_out, - q7_t * bufferA, - q7_t * Im_out); - - /** - * @brief s8 average pooling function. - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if an additional buffer is required. - * @param[in] pool_params Pooling parameters - * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] - * Argument 'N' is not used. - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] - * Argument N and C are not used. - * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] - * Argument N is not used. - * C_OUT equals C_IN. - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns - * ARM_MATH_SUCCESS - Successful operation - * - * @details - * - Supported Framework: TensorFlow Lite - * - */ - arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for S8 average pooling function - * @param[in] dim_dst_width output tensor dimension - * @param[in] ch_src number of input tensor channels - * @return The function returns required buffer size in bytes - * - */ - int32_t arm_avgpool_s8_get_buffer_size(const int dim_dst_width, - const int ch_src); - - /** - * @brief s8 max pooling function. - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if an additional buffer is required. - * @param[in] pool_params Pooling parameters - * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] - * Argument 'N' is not used. - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] - * Argument N and C are not used. - * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] - * Argument N is not used. - * C_OUT equals C_IN. - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns - * ARM_MATH_SUCCESS - Successful operation - * - * @details - * - Supported Framework: TensorFlow Lite - * - */ - arm_status arm_max_pool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *output_data); -/** - * @defgroup Softmax Softmax Functions - * - * EXP(2) based softmax functions. - * - */ - - /** - * @brief Q7 softmax function - * @param[in] vec_in pointer to input vector - * @param[in] dim_vec input vector dimension - * @param[out] p_out pointer to output vector - * - * @note This function is an optimized version which is not bit-accurate with - * TensorFlow Lite's kernel - * - */ - -void arm_softmax_q7(const q7_t * vec_in, const uint16_t dim_vec, q7_t * p_out); - - /** - * @brief Q7 softmax function with batch parameter - * @param[in] vec_in pointer to input vector - * @param[in] nb_batches number of batches - * @param[in] dim_vec input vector dimension - * @param[out] p_out pointer to output vector - * @return none. - * - * @note This function is an optimized version which is not bit-accurate with - * TensorFlow Lite's kernel - * - */ - -void arm_softmax_with_batch_q7(const q7_t * vec_in, const uint16_t nb_batches,const uint16_t dim_vec, q7_t * p_out ); - /** - * @brief Q15 softmax function - * @param[in] vec_in pointer to input vector - * @param[in] dim_vec input vector dimension - * @param[out] p_out pointer to output vector - * @return none. - * - * @note This function is an optimized version which is not bit-accurate with - * TensorFlow Lite's kernel - * - */ - -void arm_softmax_q15(const q15_t * vec_in, const uint16_t dim_vec, q15_t * p_out); - - /** - * @brief S8 softmax function - * @param[in] input Pointer to the input tensor - * @param[in] num_rows Number of rows in the input tensor - * @param[in] row_size Number of elements in each input row - * @param[in] mult Input quantization multiplier - * @param[in] shift Input quantization shift within the range [0, 31] - * @param[in] diff_min Minimum difference with max in row. Used to check if - * the quantized exponential operation can be performed - * @param[out] output Pointer to the output tensor - * - * @note Supported framework: TensorFlow Lite micro (bit-accurate) - * - */ - -void arm_softmax_s8(const int8_t *input, - const int32_t num_rows, - const int32_t row_size, - const int32_t mult, - const int32_t shift, - const int32_t diff_min, - int8_t *output); - - /** - * @brief U8 softmax function - * @param[in] input Pointer to the input tensor - * @param[in] num_rows Number of rows in the input tensor - * @param[in] row_size Number of elements in each input row - * @param[in] mult Input quantization multiplier - * @param[in] shift Input quantization shift within the range [0, 31] - * @param[in] diff_min Minimum difference with max in row. Used to check if - * the quantized exponential operation can be performed - * @param[out] output Pointer to the output tensor - * - * @note Supported framework: TensorFlow Lite micro (bit-accurate) - * - */ - -void arm_softmax_u8(const uint8_t *input, - const int32_t num_rows, - const int32_t row_size, - const int32_t mult, - const int32_t shift, - const int32_t diff_min, - uint8_t *output); - - /** - * @brief uint8 depthwise convolution function with asymmetric quantization - * Unless specified otherwise, arguments are mandatory. - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_ch Channels in input tensor - * @param[in] kernel Pointer to kernel weights - * @param[in] kernel_x Width of kernel - * @param[in] kernel_y Height of kernel - * @param[in] ch_mult Number of channel multiplier - * @param[in] pad_x Padding sizes x - * @param[in] pad_y Padding sizes y - * @param[in] stride_x stride along the width - * @param[in] stride_y stride along the height - * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement. - * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement. - * @param[in] bias Pointer to optional bias values. If no bias is - * availble, NULL is expected - * @param[in] input_offset Input tensor zero offset - * @param[in] filter_offset Kernel tensor zero offset - * @param[in] output_offset Output tensor zero offset - * @param[in,out] output Pointer to output tensor - * @param[in] output_x Width of output tensor - * @param[in] output_y Height of output tensor - * @param[in] output_activation_min Minimum value to clamp the output to. Range : {0, 255} - * @param[in] output_activation_max Minimum value to clamp the output to. Range : {0, 255} - * @param[in] out_shift Amount of right-shift for output - * @param[in] out_mult Output multiplier for requantization - * @return The function returns the following - * ARM_MATH_SUCCESS - Successful operation - * - */ - arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_ch, - const uint8_t *kernel, - const uint16_t kernel_x, - const uint16_t kernel_y, - const int16_t ch_mult, - const int16_t pad_x, - const int16_t pad_y, - const int16_t stride_x, - const int16_t stride_y, - const int16_t dilation_x, - const int16_t dilation_y, - const int32_t *bias, - const int32_t input_offset, - const int32_t filter_offset, - const int32_t output_offset, - uint8_t *output, - const uint16_t output_x, - const uint16_t output_y, - const int32_t output_activation_min, - const int32_t output_activation_max, - const int32_t out_shift, - const int32_t out_mult); - -/** - * @defgroup Reshape Reshape Functions - * - */ - - /** - * @brief Reshape a s8 vector into another with different shape - * @param[in] input points to the s8 input vector - * @param[out] output points to the s8 output vector - * @param[in] total_size total size of the input and output vectors in bytes - * - * @note The output is expected to be in a memory area that does not overlap with the input's - * - */ - void arm_reshape_s8(const int8_t *input, - int8_t *output, - const uint32_t total_size); - -/** - * @defgroup Concatenation Concatenation Functions - * - */ - - /** - * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis - * This function should be called for each input tensor to concatenate. The argument offset_x - * will be used to store the input tensor in the correct position in the output tensor - * - * i.e. offset_x = 0 - * for(i = 0 i < num_input_tensors; ++i) - * { - * arm_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x) - * offset_x += input_x[i] - * } - * - * This function assumes that the output tensor has: - * -# The same height of the input tensor - * -# The same number of channels of the input tensor - * -# The same batch size of the input tensor - * - * Unless specified otherwise, arguments are mandatory. - * - * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_z Channels in input tensor - * @param[in] input_w Batch size in input tensor - * @param[out] output Pointer to output tensor - * @param[in] output_x Width of output tensor - * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor - * It is user responsibility to provide the correct value - * - * Input constraints - * offset_x is less than output_x - * - */ - void arm_concatenation_s8_x(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint16_t output_x, - const uint32_t offset_x); - - /** - * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis - * This function should be called for each input tensor to concatenate. The argument offset_y - * will be used to store the input tensor in the correct position in the output tensor - * - * i.e. offset_y = 0 - * for(i = 0 i < num_input_tensors; ++i) - * { - * arm_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y) - * offset_y += input_y[i] - * } - * - * This function assumes that the output tensor has: - * -# The same width of the input tensor - * -# The same number of channels of the input tensor - * -# The same batch size of the input tensor - * - * Unless specified otherwise, arguments are mandatory. - * - * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_z Channels in input tensor - * @param[in] input_w Batch size in input tensor - * @param[out] output Pointer to output tensor - * @param[in] output_y Height of output tensor - * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor - * It is user responsibility to provide the correct value - * - * Input constraints - * offset_y is less than output_y - * - */ - void arm_concatenation_s8_y(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint16_t output_y, - const uint32_t offset_y); - - /** - * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis - * This function should be called for each input tensor to concatenate. The argument offset_z - * will be used to store the input tensor in the correct position in the output tensor - * - * i.e. offset_z = 0 - * for(i = 0 i < num_input_tensors; ++i) - * { - * arm_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z) - * offset_z += input_z[i] - * } - * - * This function assumes that the output tensor has: - * -# The same width of the input tensor - * -# The same height of the input tensor - * -# The same batch size of the input tensor - * - * Unless specified otherwise, arguments are mandatory. - * - * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_z Channels in input tensor - * @param[in] input_w Batch size in input tensor - * @param[out] output Pointer to output tensor - * @param[in] output_z Channels in output tensor - * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor - * It is user responsibility to provide the correct value - * - * Input constraints - * offset_z is less than output_z - * - */ - void arm_concatenation_s8_z(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint16_t output_z, - const uint32_t offset_z); - - /** - * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size) - * This function should be called for each input tensor to concatenate. The argument offset_w - * will be used to store the input tensor in the correct position in the output tensor - * - * i.e. offset_w = 0 - * for(i = 0 i < num_input_tensors; ++i) - * { - * arm_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w) - * offset_w += input_w[i] - * } - * - * This function assumes that the output tensor has: - * -# The same width of the input tensor - * -# The same height of the input tensor - * -# The same number o channels of the input tensor - * - * Unless specified otherwise, arguments are mandatory. - * - * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_z Channels in input tensor - * @param[in] input_w Batch size in input tensor - * @param[out] output Pointer to output tensor - * @param[in] offset_w The offset on the W axis to start concatenating the input tensor - * It is user responsibility to provide the correct value - * - */ - void arm_concatenation_s8_w(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint32_t offset_w); -/** - * @defgroup SVDF SVDF Layer Functions - * - */ - - /** - * @brief s8 SVDF function - * - * @param[in] input_ctx Temporary scratch buffer - * @param[in] output_ctx Temporary output scratch buffer - * @param[in] svdf_params SVDF Parameters - * Range of svdf_params->input_offset : [-128, 127] - * Range of svdf_params->output_offset : [-128, 127] - * @param[in] input_quant_params Input quantization parameters - * @param[in] output_quant_params Output quantization parameters - * @param[in] input_dims Input tensor dimensions - * @param[in] input_data Pointer to input tensor - * @param[in] state_dims State tensor dimensions - * @param[in] state_data Pointer to state tensor - * @param[in] weights_feature_dims Weights (feature) tensor dimensions - * @param[in] weights_feature_data Pointer to the weights (feature) tensor - * @param[in] weights_time_dims Weights (time) tensor dimensions - * @param[in] weights_time_data Pointer to the weights (time) tensor - * @param[in] bias_dims Bias tensor dimensions - * @param[in] bias_data Pointer to bias tensor - * @param[in] output_dims Output tensor dimensions - * @param[out] output_data Pointer to the output tensor - * - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * 1. Supported framework: TensorFlow Lite micro - * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - * - */ - arm_status - arm_svdf_s8(const cmsis_nn_context *input_ctx, - const cmsis_nn_context *output_ctx, - const cmsis_nn_svdf_params *svdf_params, - const cmsis_nn_per_tensor_quant_params *input_quant_params, - const cmsis_nn_per_tensor_quant_params *output_quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *state_dims, - q15_t *state_data, - const cmsis_nn_dims *weights_feature_dims, - const q7_t *weights_feature_data, - const cmsis_nn_dims *weights_time_dims, - const q15_t *weights_time_data, - const cmsis_nn_dims *bias_dims, - const q31_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/third_party/cmsis/CMSIS/NN/Include/arm_nnsupportfunctions.h b/src/third_party/cmsis/CMSIS/NN/Include/arm_nnsupportfunctions.h deleted file mode 100644 index 2f9364c..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Include/arm_nnsupportfunctions.h +++ /dev/null @@ -1,954 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nnsupportfunctions.h - * Description: Public header file of support functions for CMSIS NN Library - * - * $Date: July 31, 2020 - * $Revision: V.4.5.4 - * - * Target Processor: Cortex-M CPUs - * -------------------------------------------------------------------- */ - -#ifndef _ARM_NNSUPPORTFUNCTIONS_H_ -#define _ARM_NNSUPPORTFUNCTIONS_H_ - -#include "arm_math.h" -#include "arm_common_tables.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -#define LEFT_SHIFT(_shift) (_shift > 0 ? _shift : 0) -#define RIGHT_SHIFT(_shift) (_shift > 0 ? 0 : -_shift) -#define MASK_IF_ZERO(x) (x) == 0 ? ~0 : 0 -#define MASK_IF_NON_ZERO(x) (x) != 0 ? ~0 : 0 -#define SELECT_USING_MASK(mask, a, b) ((mask) & (a)) ^ (~(mask) & (b)) - -#define MAX(A,B) ((A) > (B) ? (A) : (B)) -#define MIN(A,B) ((A) < (B) ? (A) : (B)) -#define CLAMP(x, h, l) MAX(MIN((x), (h)), (l)) - -/** - * @brief Union for SIMD access of q31/q15/q7 types - */ -union arm_nnword -{ - q31_t word; - /**< q31 type */ - q15_t half_words[2]; - /**< q15 type */ - q7_t bytes[4]; - /**< q7 type */ -}; - -/** - * @brief Union for data type long long - */ -struct arm_nn_double -{ - uint32_t low; - int32_t high; -}; - -union arm_nn_long_long -{ - int64_t long_long; - struct arm_nn_double word; -}; - -/** - * @brief Struct for specifying activation function types - * - */ -typedef enum -{ - ARM_SIGMOID = 0, - /**< Sigmoid activation function */ - ARM_TANH = 1, - /**< Tanh activation function */ -} arm_nn_activation_type; - -/** - * @defgroup nndata_convert Neural Network Data Conversion Functions - * - * Perform data type conversion in-between neural network operations - * - */ - -/** - * @brief Converts the elements of the q7 vector to q15 vector without left-shift - * @param[in] *pSrc points to the q7 input vector - * @param[out] *pDst points to the q15 output vector - * @param[in] blockSize length of the input vector - * - */ -void arm_q7_to_q15_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t blockSize); - -/** - * @brief Non-saturating addition of elements of a q7 vector - * @param[in] *input Pointer to the q7 input vector - * @param[out] *output Pointer to the q31 output variable. - * @param[in] block_size length of the input vector - * \par Description: - * - * 2^24 samples can be added without saturating the result. - * - * The equation used for the conversion process is: - * - *
- *  sum = input[0] + input[1] + .. + input[block_size -1]
- * 
- * - * */ -void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size); - -/** - * @brief Converts the elements of the q7 vector to reordered q15 vector without left-shift - * @param[in] *pSrc points to the q7 input vector - * @param[out] *pDst points to the q15 output vector - * @param[in] blockSize length of the input vector - * @return none. - * - */ -void arm_q7_to_q15_reordered_no_shift(const q7_t * pSrc, q15_t * pDst, uint32_t blockSize); - -/** - * @brief Converts the elements from a q7 vector to a q15 vector with an added offset - * @param[in] src pointer to the q7 input vector - * @param[out] dst pointer to the q15 output vector - * @param[in] block_size length of the input vector - * @param[in] offset q7 offset to be added to each input vector element. - * - * \par Description: - * - * The equation used for the conversion process is: - * - *
- *  dst[n] = (q15_t) src[n] + offset;   0 <= n < block_size.
- * 
- * - */ -void arm_q7_to_q15_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, q15_t offset); - -/** - * @brief Converts the elements of the q7 vector to reordered q15 vector with an added offset - * @param[in] src pointer to the q7 input vector - * @param[out] dst pointer to the q15 output vector - * @param[in] block_size length of the input vector - * @param[in] offset offset to be added to each input vector element. - * @return none. - * - * @details This function does the q7 to q15 expansion with re-ordering of bytes. Re-ordering is a consequence of - * the sign extension intrinsic(DSP extension). The tail (i.e., last (N % 4) elements) retains its original - * order. - * - */ -void arm_q7_to_q15_reordered_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, q15_t offset); - -/** - * @brief Converts the elements from a q7 vector and accumulate to a q15 vector - * @param[in] *src points to the q7 input vector - * @param[out] *dst points to the q15 output vector - * @param[in] block_size length of the input vector - * - * \par Description: - * - * The equation used for the conversion process is: - * - *
- *  dst[n] += (q15_t) src[n] ;   0 <= n < block_size.
- * 
- * - */ -void arm_nn_accumulate_q7_to_q15(q15_t *dst, const q7_t *src, uint32_t block_size); - -/** - * @brief Depthwise conv on an im2col buffer where the input channel equals output channel. - * @param[in] row pointer to row - * @param[in] col pointer to im2col buffer, always consists of 2 columns. - * @param[in] num_ch number of channels - * @param[in] out_shift pointer to per output channel requantization shift parameter. - * @param[in] out_mult pointer to per output channel requantization multiplier parameter. - * @param[in] out_offset output tensor offset. - * @param[in] activation_min minimum value to clamp the output to. Range : int8 - * @param[in] activation_max maximum value to clamp the output to. Range : int8 - * @param[in] kernel_size number of elements in one column. - * @param[in] output_bias per output channel bias. Range : int32 - * @param[out] out pointer to output - * @return The function returns one of the two - * 1. The incremented output pointer for a successful operation or - * 2. NULL if implementation is not available. - * - * @details Supported framework: TensorFlow Lite micro. - */ -q7_t *arm_nn_depthwise_conv_s8_core(const q7_t *row, - const q15_t *col, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t kernel_size, - const int32_t *const output_bias, - q7_t *out); - -/** - * @brief General Matrix-multiplication function with per-channel requantization. - * @param[in] input_row pointer to row operand - * @param[in] input_col pointer to col operand - * @param[in] output_ch number of rows of input_row - * @param[in] col_batches number of column batches. Range: 1 to 4 - * @param[in] output_shift pointer to per output channel requantization shift parameter. - * @param[in] output_mult pointer to per output channel requantization multiplier parameter. - * @param[in] out_offset output tensor offset. - * @param[in] col_offset input tensor(col) offset. - * @param[in] row_offset kernel offset(row). Not used. - * @param[in] out_activation_min minimum value to clamp the output to. Range : int8 - * @param[in] out_activation_max maximum value to clamp the output to. Range : int8 - * @param[in] row_len number of elements in each row - * @param[in] bias per output channel bias. Range : int32 - * @param[in,out] out pointer to output - * @return The function returns one of the two - * 1. The incremented output pointer for a successful operation or - * 2. NULL if implementation is not available. - * - * @details Supported framework: TensorFlow Lite -*/ -q7_t *arm_nn_mat_mult_s8(const q7_t *input_row, - const q7_t *input_col, - const uint16_t output_ch, - const uint16_t col_batches, - const int32_t *output_shift, - const int32_t *output_mult, - const int32_t out_offset, - const int32_t col_offset, - const int32_t row_offset, - const int16_t out_activation_min, - const int16_t out_activation_max, - const uint16_t row_len, - const int32_t *const bias, - q7_t *out); - -/** - * @brief General Matrix-multiplication without requantization for one row & one column - * @param[in] row_elements number of row elements - * @param[in] row_base pointer to row operand - * @param[in] col_base pointer to col operand - * @param[out] sum_col pointer to store sum of column elements - * @param[out] output pointer to store result of multiply-accumulate - * @return The function returns the multiply-accumulated result of the row by column. - * - * @details Pseudo-code - * *output = 0 - * sum_col = 0 - * for (i = 0; i < row_elements; i++) - * *output += row_base[i] * col_base[i] - * sum_col += col_base[i] - * -*/ -arm_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, - const int8_t *row_base, - const int8_t *col_base, - int32_t *const sum_col, - int32_t *const output); - -/** - * @brief General Matrix-multiplication without requantization for four rows and one column - * @param[in] row_elements number of row elements - * @param[in] offset offset between rows. Can be the same as row_elements. - * For e.g, in a 1x1 conv scenario with stride as 1. - * @param[in] row_base pointer to row operand - * @param[in] col_base pointer to col operand - * @param[out] sum_col pointer to store sum of column elements - * @param[out] output pointer to store result(4 int32's) of multiply-accumulate - * @return The function returns the multiply-accumulated result of the row by column - * - * @details Pseudo-code - * output[0] = 0 - * .. - * output[3] = 0 - * sum_col = 0 - * for (i = 0; i < row_elements; i++) - * output[0] += row_base[i] * col_base[i] - * .. - * output[3] += row_base[i + (row_elements * 3)] * col_base[i] - * sum_col += col_base[i] -*/ -arm_status arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, - const int32_t offset, - const int8_t *row_base, - const int8_t *col_base, - int32_t *const sum_col, - int32_t *const output); - -/** -* @brief General Matrix-multiplication function with per-channel requantization. -* This function assumes: -* - LHS input matrix NOT transposed (nt) -* - RHS input matrix transposed (t) -* -* @note This operation also performs the broadcast bias addition before the requantization -* -* @param[in] lhs Pointer to the LHS input matrix -* @param[in] rhs Pointer to the RHS input matrix -* @param[in] bias Pointer to the bias vector. The length of this vector is equal to the number of output columns (or RHS input rows) -* @param[out] dst Pointer to the output matrix with "m" rows and "n" columns -* @param[in] dst_multipliers Pointer to the multipliers vector needed for the per-channel requantization. The length of this vector is equal to -* the number of output columns (or RHS input rows) -* @param[in] dst_shifts Pointer to the shifts vector needed for the per-channel requantization. The length of this vector is equal to -* the number of output columns (or RHS input rows) -* @param[in] lhs_rows Number of LHS input rows -* @param[in] rhs_rows Number of RHS input rows -* @param[in] rhs_cols Number of LHS/RHS input columns -* @param[in] lhs_offset Offset to be applied to the LHS input value -* @param[in] dst_offset Offset to be applied the output result -* @param[in] activation_min Minimum value to clamp down the output. Range : int8 -* @param[in] activation_max Maximum value to clamp up the output. Range : int8 -* -* @return The function returns ARM_MATH_SUCCESS -* -*/ -arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, - const q7_t *rhs, - const q31_t *bias, - q7_t *dst, - const int32_t *dst_multipliers, - const int32_t *dst_shifts, - const int32_t lhs_rows, - const int32_t rhs_rows, - const int32_t rhs_cols, - const int32_t lhs_offset, - const int32_t dst_offset, - const int32_t activation_min, - const int32_t activation_max); - -/** - * @brief s8 Vector by Matrix (transposed) multiplication - * - * @param[in] lhs Input left-hand side vector - * @param[in] rhs Input right-hand side matrix (transposed) - * @param[in] bias Input bias - * @param[out] dst Output vector - * @param[in] lhs_offset Offset to be added to the input values of the left-hand side vector. Range: -127 to 128 - * @param[in] rhs_offset Offset to be added to the input values of the right-hand side matrix. Range: -127 to 128 - * @param[in] dst_offset Offset to be added to the output values. Range: -127 to 128 - * @param[in] dst_multiplier Output multiplier - * @param[in] dst_shift Output shift - * @param[in] rhs_cols Number of columns in the right-hand side input matrix - * @param[in] rhs_rows Number of rows in the right-hand side input matrix - * @param[in] activation_min Minimum value to clamp the output to. Range: int8 - * @param[in] activation_max Maximum value to clamp the output to. Range: int8 - * - * @return The function returns ARM_MATH_SUCCESS - * - */ -arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, - const q7_t *rhs, - const q31_t *bias, - q7_t *dst, - const int32_t lhs_offset, - const int32_t rhs_offset, - const int32_t dst_offset, - const int32_t dst_multiplier, - const int32_t dst_shift, - const int32_t rhs_cols, - const int32_t rhs_rows, - const int32_t activation_min, - const int32_t activation_max); - -/** - * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in padded cases where - * the padding is -lhs_offset(Range: int8). Dimensions are the same for lhs and rhs. - * - * @param[in] lhs Input left-hand side matrix - * @param[in] rhs Input right-hand side matrix (transposed) - * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 - * @param[in] num_ch Number of channels in LHS/RHS - * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels - * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels - * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 - * @param[in] activation_min Minimum value to clamp the output to. Range: int8 - * @param[in] activation_max Maximum value to clamp the output to. Range: int8 - * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix - * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels - * @param[in] out Output pointer - * - * @return The function returns one of the two - * - Updated output pointer if an implementaiton is available - * - NULL if no implementation is available. - * - * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out - * for the following. - * - Output shift - * - Output multiplier - * - Output bias - * - rhs - */ -q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, - const q7_t *rhs, - const int32_t lhs_offset, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t row_x_col, - const int32_t *const output_bias, - q7_t *out); - -/** - * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in non-padded cases. - * Dimensions are the same for lhs and rhs. - * - * @param[in] lhs Input left-hand side matrix - * @param[in] rhs Input right-hand side matrix (transposed) - * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 - * @param[in] num_ch Number of channels in LHS/RHS - * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels. - * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels. - * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 - * @param[in] activation_min Minimum value to clamp the output to. Range: int8 - * @param[in] activation_max Maximum value to clamp the output to. Range: int8 - * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix - * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels. - * @param[in] out Output pointer - * - * @return The function returns one of the two - * - Updated output pointer if an implementaiton is available - * - NULL if no implementation is available. - * - * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out - * for the following. - * - Output shift - * - Output multiplier - * - Output bias - * - rhs - */ -q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, - const q7_t *rhs, - const int32_t lhs_offset, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t row_x_col, - const int32_t *const output_bias, - q7_t *out); - -/** - @brief Read 2 q15 elements and post increment pointer. - @param[in] in_q15 Pointer to pointer that holds address of input. - @return q31 value - */ -__STATIC_FORCEINLINE q31_t arm_nn_read_q15x2_ia(const q15_t **in_q15) -{ - q31_t val; - - memcpy(&val, *in_q15, 4); - *in_q15 += 2; - - return (val); -} - -/** - @brief Read 4 q7 from q7 pointer and post increment pointer. - @param[in] in_q7 Pointer to pointer that holds address of input. - @return q31 value - */ -__STATIC_FORCEINLINE q31_t arm_nn_read_q7x4_ia(const q7_t **in_q7) -{ - q31_t val; - memcpy(&val, *in_q7, 4); - *in_q7 += 4; - - return (val); -} - -/** - @brief Read 2 q15 from q15 pointer. - @param[in] in_q15 pointer to address of input. - @return q31 value - */ -__STATIC_FORCEINLINE q31_t arm_nn_read_q15x2(const q15_t *in_q15) -{ - q31_t val; - memcpy(&val, in_q15, 4); - - return (val); -} - -/** - @brief Read 4 q7 values. - @param[in] in_q7 pointer to address of input. - @return q31 value - */ -__STATIC_FORCEINLINE q31_t arm_nn_read_q7x4(const q7_t *in_q7) -{ - q31_t val; - memcpy(&val, in_q7, 4); - - return (val); -} - -/** - * @brief memset optimized for MVE - * @param[in, out] dst Destination pointer - * @param[in] val Value to set - * @param[in] block_size Number of bytes to copy. - * - */ -__STATIC_FORCEINLINE void arm_memset_q7(q7_t *dst, - const q7_t val, - uint32_t block_size) -{ -#if defined(ARM_MATH_MVEI) - __asm volatile ( - " vdup.8 q0, %[set_val] \n" - " wlstp.8 lr, %[cnt], 1f \n" - "2: \n" - " vstrb.8 q0, [%[in]], 16 \n" - " letp lr, 2b \n" - "1: \n" - :[in] "+r"(dst) - :[cnt] "r"(block_size), [set_val] "r"(val) - :"q0", "memory", "r14"); -#else - memset(dst, val, block_size); -#endif -} - -#if defined (ARM_MATH_DSP) - -/** - * @brief read and expand one q7 word into two q15 words - */ - -__STATIC_FORCEINLINE const q7_t *read_and_pad(const q7_t *source, q31_t * out1, q31_t * out2) -{ - q31_t inA = arm_nn_read_q7x4_ia(&source); - q31_t inAbuf1 = __SXTB16(__ROR((uint32_t)inA, 8)); - q31_t inAbuf2 = __SXTB16(inA); - -#ifndef ARM_MATH_BIG_ENDIAN - *out2 = (int32_t) (__PKHTB (inAbuf1, inAbuf2, 16)); - *out1 = (int32_t) (__PKHBT (inAbuf2, inAbuf1, 16)); -#else - *out1 = (int32_t) (__PKHTB(inAbuf1, inAbuf2, 16)); - *out2 = (int32_t) (__PKHBT(inAbuf2, inAbuf1, 16)); -#endif - - return source; -} - -/** - * @brief read and expand one q7 word into two q15 words with reordering - */ - -__STATIC_FORCEINLINE const q7_t *read_and_pad_reordered(const q7_t *source, q31_t * out1, q31_t * out2) -{ - q31_t inA = arm_nn_read_q7x4_ia(&source); -#ifndef ARM_MATH_BIG_ENDIAN - *out2 = __SXTB16(__ROR((uint32_t)inA, 8)); - *out1 = __SXTB16(inA); -#else - *out1 = __SXTB16(__ROR((uint32_t)inA, 8)); - *out2 = __SXTB16(inA); -#endif - - return source; -} - -/** - * @brief read and expand one q7 word into two q15 words with reordering and add an offset - */ -__STATIC_FORCEINLINE const q7_t *read_and_pad_reordered_with_offset(const q7_t *source, q31_t * out1, q31_t * out2, q31_t offset) -{ - q31_t inA = arm_nn_read_q7x4_ia(&source); - -#ifndef ARM_MATH_BIG_ENDIAN - *out2 = __SXTB16(__ROR((uint32_t)inA, 8)); - *out1 = __SXTB16(inA); -#else - *out1 = __SXTB16(__ROR((uint32_t)inA, 8)); - *out2 = __SXTB16(inA); -#endif - *out1 = __QADD16(*out1,offset); - *out2 = __QADD16(*out2,offset); - - return source; -} - -#endif - - - -/** - * @defgroup NNBasicMath Basic Math Functions for Neural Network Computation - * - * Basic Math Functions for Neural Network Computation - * - */ - -/** - * @brief q7 vector multiplication with variable output shifts - * @param[in] *pSrcA pointer to the first input vector - * @param[in] *pSrcB pointer to the second input vector - * @param[out] *pDst pointer to the output vector - * @param[in] out_shift amount of right-shift for output - * @param[in] blockSize number of samples in each vector - * @return none. - * - * Scaling and Overflow Behavior: - * \par - * The function uses saturating arithmetic. - * Results outside of the allowable q15 range [0x8000 0x7FFF] will be saturated. - */ - -void arm_nn_mult_q15( - q15_t * pSrcA, - q15_t * pSrcB, - q15_t * pDst, - const uint16_t out_shift, - uint32_t blockSize); - -/** - * @brief q7 vector multiplication with variable output shifts - * @param[in] *pSrcA pointer to the first input vector - * @param[in] *pSrcB pointer to the second input vector - * @param[out] *pDst pointer to the output vector - * @param[in] out_shift amount of right-shift for output - * @param[in] blockSize number of samples in each vector - * @return none. - * - * Scaling and Overflow Behavior: - * \par - * The function uses saturating arithmetic. - * Results outside of the allowable q7 range [0x80 0x7F] will be saturated. - */ - -void arm_nn_mult_q7( - q7_t * pSrcA, - q7_t * pSrcB, - q7_t * pDst, - const uint16_t out_shift, - uint32_t blockSize); - -/** - * @brief macro for adding rounding offset - */ -#ifndef ARM_NN_TRUNCATE - #define NN_ROUND(out_shift) ( (0x1u << out_shift) >> 1 ) -#else - #define NN_ROUND(out_shift) 0 -#endif - -// Macros for shortening quantization functions' names and avoid long lines -#define MUL_SAT(a, b) arm_nn_doubling_high_mult((a), (b)) -#define MUL_SAT_MVE(a, b) arm_doubling_high_mult_mve_32x4((a), (b)) -#define MUL_POW2(a, b) arm_nn_mult_by_power_of_two((a), (b)) - - -#define DIV_POW2(a, b) arm_nn_divide_by_power_of_two((a), (b)) -#define DIV_POW2_MVE(a, b) arm_divide_by_power_of_two_mve((a), (b)) - - -#define EXP_ON_NEG(x) arm_nn_exp_on_negative_values((x)) -#define ONE_OVER1(x) arm_nn_one_over_one_plus_x_for_x_in_0_1((x)) - -/** - * @brief Saturating doubling high multiply. Result matches - * NEON instruction VQRDMULH. - * @param[in] m1 Multiplicand. Range: {Q31_MIN, Q31_MAX} - * @param[in] m2 Multiplier. Range: {Q31_MIN, Q31_MAX} - * @return Result of multiplication. - * - */ -__STATIC_FORCEINLINE q31_t arm_nn_doubling_high_mult(const q31_t m1, const q31_t m2) -{ - q31_t result = 0; - // Rounding offset to add for a right shift of 31 - q63_t mult = 1 << 30; - - if ((m1 < 0) ^ (m2 < 0)) - { - mult = 1 - mult; - } - // Gets resolved as a SMLAL instruction - mult = mult + (q63_t)m1 * m2; - - // Utilize all of the upper 32 bits. This is the doubling step - // as well. - result = (int32_t) (mult / (1ll << 31)); - - if ((m1 == m2) && (m1 == (int32_t)Q31_MIN)) - { - result = Q31_MAX; - } - return result; -} - -/** - * @brief Doubling high multiply without saturation. This is intended - * for requantization where the scale is a positive integer - * - * @param[in] m1 Multiplicand. Range: {Q31_MIN, Q31_MAX} - * @param[in] m2 Multiplier Range: {Q31_MIN, Q31_MAX} - * @return Result of multiplication. - * @note The result of this matches that of neon instruction - * VQRDMULH for m1 in range {Q31_MIN, Q31_MAX} and m2 in - * range {Q31_MIN + 1, Q31_MAX}. Saturation occurs when - * m1 equals m2 equals Q31_MIN and that is not handled by - * this function. - * - */ -__STATIC_FORCEINLINE q31_t arm_nn_doubling_high_mult_no_sat(const q31_t m1, const q31_t m2) -{ - q31_t result = 0; - union arm_nn_long_long mult; - - // Rounding offset to add for a right shift of 31 - mult.word.low = 1 << 30; - mult.word.high = 0; - - // Gets resolved as a SMLAL instruction - mult.long_long = mult.long_long + (q63_t)m1 * m2; - - // Utilize all of the upper 32 bits. This is the doubling step - // as well. - result = (int32_t)(mult.long_long >> 31); - - return result; -} - -/** - * @brief Rounding divide by power of two. - * @param[in] dividend - Dividend - * @param[in] exponent - Divisor = power(2, exponent) - * Range: [0, 31] - * @return Rounded result of division. Midpoint is rounded away from zero. - * - */ -__STATIC_FORCEINLINE q31_t arm_nn_divide_by_power_of_two(const q31_t dividend, const q31_t exponent) -{ - q31_t result = 0; - const q31_t remainder_mask = (1 << exponent) - 1; - int32_t remainder = remainder_mask & dividend; - - // Basic division - result = dividend >> exponent; - - // Adjust 'result' for rounding (mid point away from zero) - q31_t threshold = remainder_mask >> 1; - if (result < 0) - { - threshold++; - } - if (remainder > threshold) - { - result++; - } - - return result; -} - -/** - * @brief Requantize a given value. - * @param[in] val Value to be requantized - * @param[in] multiplier multiplier. Range {Q31_MIN + 1, Q32_MAX} - * @param[in] shift left or right shift for 'val * multiplier' - * - * @return Returns (val * multiplier)/(2 ^ shift) - * - */ -__STATIC_FORCEINLINE q31_t arm_nn_requantize(const q31_t val, const q31_t multiplier, const q31_t shift) -{ - return arm_nn_divide_by_power_of_two( - arm_nn_doubling_high_mult_no_sat(val * (1 << LEFT_SHIFT(shift)), multiplier), - RIGHT_SHIFT(shift)); -} - -/** - * @brief memcpy optimized for MVE - * @param[in, out] dst Destination pointer - * @param[in] src Source pointer. - * @param[in] block_size Number of bytes to copy. - * - */ -__STATIC_FORCEINLINE void arm_memcpy_q7(q7_t *__RESTRICT dst, - const q7_t *__RESTRICT src, - uint32_t block_size) -{ -#if defined(ARM_MATH_MVEI) - __asm volatile ( - " wlstp.8 lr, %[cnt], 1f \n" - "2: \n" - " vldrb.8 q0, [%[in]], 16 \n" - " vstrb.8 q0, [%[out]], 16 \n" - " letp lr, 2b \n" - "1: \n" - :[in] "+r"(src) - ,[out] "+r"(dst) - :[cnt] "r"(block_size) - :"q0", "memory", "r14"); -#else - memcpy(dst, src, block_size); -#endif -} - -#if defined(ARM_MATH_MVEI) -/** - * @brief Vector saturating doubling high multiply returning high half. - * @param[in] m1 Multiplicand - * @param[in] m2 Multiplier - * @return Result of multiplication. - * - */ -__STATIC_FORCEINLINE int32x4_t arm_doubling_high_mult_mve(const int32x4_t m1, const q31_t m2) -{ - return vqrdmulhq_n_s32(m1, m2); -} - -/** - * @brief Vector rounding divide by power of two. - * @param[in] dividend - Dividend vector - * @param[in] exponent - Divisor = power(2, exponent) - * Range: [0, 31] - * @return Rounded result of division. Midpoint is rounded away from zero. - * - */ -__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve(const int32x4_t dividend, const q31_t exponent) -{ - const int32x4_t shift = vdupq_n_s32(-exponent); - const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31); - const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup); - return vrshlq_s32(fixed_up_dividend, shift); -} - -/** - * @brief Requantize a given vector. - * @param[in] val Vector to be requantized - * @param[in] multiplier multiplier - * @param[in] shift shift - * - * @return Returns (val * multiplier)/(2 ^ shift) - * - */ -__STATIC_FORCEINLINE int32x4_t arm_requantize_mve(const int32x4_t val, const q31_t multiplier, const q31_t shift) -{ - return arm_divide_by_power_of_two_mve( - arm_doubling_high_mult_mve(vshlq_s32(val, vdupq_n_s32(LEFT_SHIFT(shift))), multiplier), - RIGHT_SHIFT(shift)); -} - -__STATIC_FORCEINLINE int32x4_t arm_doubling_high_mult_mve_32x4(const int32x4_t m1, const int32x4_t m2) -{ - return vqrdmulhq_s32(m1, m2); -} - -__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve_32x4(const int32x4_t dividend, const int32x4_t exponent) -{ - const int32x4_t shift = -exponent; - const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31); - const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup); - return vrshlq_s32(fixed_up_dividend, shift); -} - -__STATIC_FORCEINLINE int32x4_t arm_requantize_mve_32x4(const int32x4_t val, const int32x4_t multiplier, const int32x4_t shift) -{ - const int32x4_t zz = vdupq_n_s32(0); - const mve_pred16_t p = vcmpgtq_n_s32(shift, 0); - - const int32x4_t left_shift = vpselq_s32(shift, zz, p); - const int32x4_t right_shift = -vpselq_s32(zz, shift, p); - - return arm_divide_by_power_of_two_mve_32x4(arm_doubling_high_mult_mve_32x4(vshlq_s32(val, left_shift), multiplier), right_shift); -} -#endif - -// @note The following functions are used only for softmax layer, scaled bits = 5 assumed - -__STATIC_FORCEINLINE int32_t arm_nn_exp_on_negative_values(int32_t val) -{ - int32_t mask = 0; - int32_t shift = 24; - - const int32_t val_mod_minus_quarter = (val & ((1 << shift) - 1)) - (1 << shift); - const int32_t remainder = val_mod_minus_quarter - val; - const int32_t x = (val_mod_minus_quarter << 5) + (1 << 28); - const int32_t x2 = MUL_SAT(x, x); - - int32_t result = 1895147668 + MUL_SAT(1895147668, x + - DIV_POW2(MUL_SAT(DIV_POW2(MUL_SAT(x2, x2), 2) + MUL_SAT(x2, x), 715827883) + x2, 1)); - -#define SELECT_IF_NON_ZERO(x) \ -{ \ - mask = MASK_IF_NON_ZERO(remainder & (1 << shift++)); \ - result = SELECT_USING_MASK(mask, MUL_SAT(result, x), result); \ -} - - SELECT_IF_NON_ZERO(1672461947) - SELECT_IF_NON_ZERO(1302514674) - SELECT_IF_NON_ZERO(790015084) - SELECT_IF_NON_ZERO(290630308) - SELECT_IF_NON_ZERO(39332535) - SELECT_IF_NON_ZERO(720401) - SELECT_IF_NON_ZERO(242) - -#undef SELECT_IF_NON_ZERO - - mask = MASK_IF_ZERO(val); - return SELECT_USING_MASK(mask, Q31_MAX, result); -} - -__STATIC_FORCEINLINE q31_t arm_nn_mult_by_power_of_two(const int32_t val, const int32_t exp) -{ - const int32_t thresh = ((1 << (31 - exp)) - 1); - int32_t result = val << exp; - result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val > thresh), Q31_MAX, result); - result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val < -thresh), Q31_MIN, result); - return result; -} - -__STATIC_FORCEINLINE int32_t arm_nn_one_over_one_plus_x_for_x_in_0_1(int32_t val) -{ - const int64_t sum = (int64_t)val + (int64_t)Q31_MAX; - const int32_t half_denominator = (int32_t)((sum + (sum >= 0 ? 1 : -1)) / 2L); - int32_t x = 1515870810 + MUL_SAT(half_denominator, -1010580540); - - const int32_t shift = (1 << 29); - x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); - x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); - x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); - - return MUL_POW2(x, 1); -} - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q15.c b/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q15.c deleted file mode 100644 index 7f46074..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q15.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_activations_q15.c - * Description: Q15 neural network activation function using direct table look-up - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_common_tables.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Acti - * @{ - */ - -/** - * @brief neural network activation function using direct table look-up - * - * @note Refer header file for details. - * - */ - -void arm_nn_activations_direct_q15(q15_t * data, uint16_t size, uint16_t int_width, arm_nn_activation_type type) -{ - uint16_t i = size; - q15_t *pIn = data; - q15_t *pOut = data; - uint16_t shift_size = 8 + 3 - int_width; - uint32_t bit_mask = 0x7FF >> int_width; - uint32_t full_frac = bit_mask + 1; - const q15_t *lookup_table; - - switch (type) - { - case ARM_SIGMOID: - lookup_table = sigmoidTable_q15; - break; - case ARM_TANH: - default: - lookup_table = tanhTable_q15; - break; - } - - while (i) - { - q15_t out; - q15_t in = *pIn++; - q15_t frac = (uint32_t) in & bit_mask; - q15_t value = lookup_table[(uint8_t)(in >> shift_size)]; - if ((in >> shift_size) != 0x7f) - { - q15_t value2 = lookup_table[(uint8_t)(1 + ((uint8_t)(in >> shift_size)))]; - /* doing the interpolation here for better accuracy */ - out = ((q31_t) (full_frac - frac) * value + (q31_t) value2 * frac) >> shift_size; - } else - { - /* the largest positive value does not have a right side for linear interpolation */ - out = value; - } - - *pOut++ = out; - i--; - } - -} - -/** - * @} end of Acti group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c b/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c deleted file mode 100644 index 40ab678..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_activations_q7.c - * Description: Q7 neural network activation function using direct table look-up - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_common_tables.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Acti - * @{ - */ - - /** - * @brief Q7 neural network activation function using direct table look-up - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 - * @param[in] type type of activation functions - * - * @details - * - * This is the direct table look-up approach. - * - * Assume here the integer part of the fixed-point is <= 3. - * More than 3 just not making much sense, makes no difference with - * saturation followed by any of these activation functions. - */ - -void arm_nn_activations_direct_q7(q7_t * data, uint16_t size, uint16_t int_width, arm_nn_activation_type type) -{ - uint16_t i = size; - q7_t *pIn = data; - q7_t *pOut = data; - q7_t in; - q7_t out; - uint16_t shift_size = 3 - int_width; - const q7_t *lookup_table; - switch (type) - { - case ARM_SIGMOID: - lookup_table = sigmoidTable_q7; - break; - case ARM_TANH: - default: - lookup_table = tanhTable_q7; - break; - } - while (i) - { - in = *pIn++; - out = lookup_table[(uint8_t) (in >> shift_size)]; - *pOut++ = out; - i--; - } -} - -/** - * @} end of Acti group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c deleted file mode 100644 index 6114aa8..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_relu6_s8.c - * Description: Basic s8 version of ReLU6 - * - * $Date: Spetember 2019 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Acti - * @{ - */ - - /* - * Basic ReLU6 function - * - * Refer to header file for details. - * - */ - -void arm_relu6_s8(q7_t *data, uint16_t size) -{ - int32_t i; - - for (i = 0; i < size; i++) - { - int32_t ip = data[i]; - - ip = MAX(ip, 0); - data[i] = MIN(ip, 6); - } -} - -/** - * @} end of Acti group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c b/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c deleted file mode 100644 index 711518d..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_relu_q15.c - * Description: Q15 version of ReLU - * - * $Date: February 27, 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Acti - * @{ - */ - -/** - * @brief Q15 RELU function - * @param[in,out] data pointer to input - * @param[in] size number of elements - * - * @details - * - * Optimized relu with QSUB instructions. - * - */ - -void arm_relu_q15(q15_t *data, uint16_t size) -{ - -#if defined(ARM_MATH_DSP) - /* Run the following code for M cores with DSP extension */ - - uint16_t i = size >> 1; - q15_t *input = data; - q15_t *output = data; - q31_t in; - q31_t buf; - q31_t mask; - - while (i) - { - in = read_q15x2_ia(&input); - - /* extract the first bit */ - buf = __ROR(in & 0x80008000, 15); - - /* if MSB=1, mask will be 0xFF, 0x0 otherwise */ - mask = __QSUB16(0x00000000, buf); - - write_q15x2_ia(&output, in & (~mask)); - i--; - } - - if (size & 0x1) - { - if (*input < 0) - { - *input = 0; - } - input++; - } -#else - /* Run the following code as reference implementation for M cores without DSP extension */ - uint16_t i; - - for (i = 0; i < size; i++) - { - if (data[i] < 0) - data[i] = 0; - } - -#endif /* ARM_MATH_DSP */ -} - -/** - * @} end of Acti group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c b/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c deleted file mode 100644 index e007fd1..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_relu_q7.c - * Description: Q7 version of ReLU - * - * $Date: May 29, 2020 - * $Revision: V.1.0.2 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Acti - * @{ - */ - - /** - * @brief Q7 RELU function - * @param[in,out] data pointer to input - * @param[in] size number of elements - * - * @details - * - * Optimized relu with QSUB instructions. - * - */ - -void arm_relu_q7(q7_t *data, uint16_t size) -{ - -#if defined(ARM_MATH_DSP) - /* Run the following code for M cores with DSP extension */ - - uint16_t i = size >> 2; - q7_t *input = data; - q7_t *output = data; - q31_t in; - q31_t buf; - q31_t mask; - - while (i) - { - in = read_q7x4_ia(&input); - - /* extract the first bit */ - buf = (int32_t)__ROR((uint32_t)in & 0x80808080, 7); - - /* if MSB=1, mask will be 0xFF, 0x0 otherwise */ - mask = __QSUB8(0x00000000, buf); - - write_q7x4_ia(&output, in & (~mask)); - - i--; - } - - i = size & 0x3; - while (i) - { - if (*input < 0) - { - *input = 0; - } - input++; - i--; - } - -#else - /* Run the following code as reference implementation for cores without DSP extension */ - - uint16_t i; - - for (i = 0; i < size; i++) - { - if (data[i] < 0) - data[i] = 0; - } - -#endif -} - -/** - * @} end of Acti group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c deleted file mode 100644 index 6248c15..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_elementwise_add_s8 - * Description: Element wise add - * - * $Date: July 31, 2020 - * $Revision: V.2.5.1 - * - * Target Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" -#include "arm_nnsupportfunctions.h" -#if defined(ARM_MATH_MVEI) -#include "arm_helium_utils.h" -#endif - -#if defined(ARM_MATH_MVEI) -#define SAT_INPUT_VECT(__INPUT_V, __MULT, __SHIFT) \ - __INPUT_V = arm_doubling_high_mult_mve(__INPUT_V, __MULT); \ - __INPUT_V = arm_divide_by_power_of_two_mve(__INPUT_V, -__SHIFT); -#endif - - -/** - * @note The *_no_sat API does not mean that the input not saturated, Since - * __MULT is a positive integer, it is saturated. The API definition - * has more info about it. - */ -#define SAT_INPUT(__INPUT, __MULT, __SHIFT) \ - __INPUT = arm_nn_doubling_high_mult_no_sat(__INPUT, __MULT); \ - __INPUT = arm_nn_divide_by_power_of_two(__INPUT, -__SHIFT); - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup BasicMath - * @{ - */ - -/* - * s8 element wise add - * - * Refer header file for details. - * - */ - -/* Note: __SHIFT is expected to be <=0 */ - - -arm_status -arm_elementwise_add_s8(const int8_t *input_1_vect, - const int8_t *input_2_vect, - const int32_t input_1_offset, - const int32_t input_1_mult, - const int32_t input_1_shift, - const int32_t input_2_offset, - const int32_t input_2_mult, - const int32_t input_2_shift, - const int32_t left_shift, - int8_t *output, - const int32_t out_offset, - const int32_t out_mult, - const int32_t out_shift, - const int32_t out_activation_min, - const int32_t out_activation_max, - const uint32_t block_size) -{ -#if defined(ARM_MATH_MVEI) - int32_t count = (int32_t)block_size; - - while (count > 0) - { - int32x4_t vect_1; - int32x4_t vect_2; - - mve_pred16_t p = vctp32q((uint32_t)count); - - vect_1 = vldrbq_z_s32(input_1_vect, p); - vect_2 = vldrbq_z_s32(input_2_vect, p); - - vect_1 = vaddq_s32(vect_1, vdupq_n_s32(input_1_offset)); - vect_2 = vaddq_s32(vect_2, vdupq_n_s32(input_2_offset)); - - vect_1 = vshlq_r_s32(vect_1, left_shift); - vect_2 = vshlq_r_s32(vect_2, left_shift); - - SAT_INPUT_VECT(vect_1, input_1_mult, input_1_shift); - SAT_INPUT_VECT(vect_2, input_2_mult, input_2_shift); - - vect_1 = vaddq_s32(vect_1, vect_2); - SAT_INPUT_VECT(vect_1, out_mult, out_shift); - - vect_1 = vaddq_n_s32(vect_1, out_offset); - - vect_1 = vmaxq_s32(vect_1, vdupq_n_s32(out_activation_min)); - vect_1 = vminq_s32(vect_1, vdupq_n_s32(out_activation_max)); - - input_1_vect += 4; - input_2_vect += 4; - vstrbq_p_s32(output, vect_1, p); - - output += 4; - count -= 4; - } -#else - uint32_t loop_count; - int32_t input_1; - int32_t input_2; - int32_t sum; - -#if defined(ARM_MATH_DSP) - int32_t a_1, b_1, a_2, b_2; - - int32_t offset_1_packed, offset_2_packed; - - int8_t r1, r2, r3, r4; - - offset_1_packed = (input_1_offset << 16U) | (input_1_offset & 0x0FFFFL); - offset_2_packed = (input_2_offset << 16U) | (input_2_offset & 0x0FFFFL); - - loop_count = block_size >> 2; - - while (loop_count > 0U) - { - /* 4 outputs are calculated in one loop. The order of calculation is follows the order of output sign extension - intrinsic */ - input_1_vect = read_and_pad_reordered(input_1_vect, &b_1, &a_1); - input_2_vect = read_and_pad_reordered(input_2_vect, &b_2, &a_2); - - a_1 = __SADD16(a_1, offset_1_packed); - b_1 = __SADD16(b_1, offset_1_packed); - - a_2 = __SADD16(a_2, offset_2_packed); - b_2 = __SADD16(b_2, offset_2_packed); - - /* Sum 1 */ - input_1 = (int16_t)(b_1 & 0x0FFFFL) << left_shift; - SAT_INPUT(input_1, input_1_mult, input_1_shift); - - input_2 = (int16_t)(b_2 & 0x0FFFFL) << left_shift; - SAT_INPUT(input_2, input_2_mult, input_2_shift); - - sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); - sum += out_offset; - sum = MAX(sum, out_activation_min); - sum = MIN(sum, out_activation_max); - r1 = (q7_t)sum; - - /* Sum 3 */ - input_1 = (int16_t)((b_1 >> 16) & 0x0FFFFL) << left_shift; - SAT_INPUT(input_1, input_1_mult, input_1_shift); - - input_2 = (int16_t)((b_2 >> 16) & 0x0FFFFL) << left_shift; - SAT_INPUT(input_2, input_2_mult, input_2_shift); - - sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); - sum += out_offset; - sum = MAX(sum, out_activation_min); - sum = MIN(sum, out_activation_max); - r3 = (q7_t)sum; - - /* Sum 2 */ - input_1 = (int16_t)(a_1 & 0x0FFFFL) << left_shift; - SAT_INPUT(input_1, input_1_mult, input_1_shift); - - input_2 = (int16_t)(a_2 & 0x0FFFFL) << left_shift; - SAT_INPUT(input_2, input_2_mult, input_2_shift); - - sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); - sum += out_offset; - sum = MAX(sum, out_activation_min); - sum = MIN(sum, out_activation_max); - r2 = (q7_t)sum; - - /* Sum 4 */ - input_1 = (int16_t)((a_1 >> 16) & 0x0FFFFL) << left_shift; - SAT_INPUT(input_1, input_1_mult, input_1_shift); - - input_2 = (int16_t)((a_2 >> 16) & 0x0FFFFL) << left_shift; - SAT_INPUT(input_2, input_2_mult, input_2_shift); - - sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); - sum += out_offset; - sum = MAX(sum, out_activation_min); - sum = MIN(sum, out_activation_max); - r4 = (q7_t)sum; - - write_q7x4_ia(&output, __PACKq7(r1, r2, r3, r4)); - - loop_count--; - } - - loop_count = block_size & 0x3; -#else - loop_count = block_size; -#endif - - while (loop_count > 0U) - { - /* C = A + B */ - - input_1 = (*input_1_vect++ + input_1_offset) << left_shift; - input_2 = (*input_2_vect++ + input_2_offset) << left_shift; - - input_1 = arm_nn_doubling_high_mult(input_1, input_1_mult); - input_1 = arm_nn_divide_by_power_of_two(input_1, -input_1_shift); - - input_2 = arm_nn_doubling_high_mult(input_2, input_2_mult); - input_2 = arm_nn_divide_by_power_of_two(input_2, -input_2_shift); - - sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); - sum += out_offset; - - sum = MAX(sum, out_activation_min); - sum = MIN(sum, out_activation_max); - - *output++ = (q7_t)sum; - - /* Decrement loop counter */ - loop_count--; - } - -#endif /* ARM_MATH_MVEI */ - - return (ARM_MATH_SUCCESS); -} - -/** - * @} end of BasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s8.c deleted file mode 100644 index 4923e9c..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s8.c +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_elementwise_mul_s8 - * Description: Element wise multiplication - * - * $Date: May 29, 2020 - * $Revision: V.1.0.3 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup BasicMath - * @{ - */ - -/** - * @brief s8 element wise multiplication of two vectors - * - * @note Refer header file for details. - * - */ - -arm_status -arm_elementwise_mul_s8(const int8_t *input_1_vect, - const int8_t *input_2_vect, - const int32_t input_1_offset, - const int32_t input_2_offset, - int8_t *output, - const int32_t out_offset, - const int32_t out_mult, - const int32_t out_shift, - const int32_t out_activation_min, - const int32_t out_activation_max, - const uint32_t block_size) -{ - - int32_t loop_count; -#if defined(ARM_MATH_MVEI) - - loop_count = (block_size + 3) / 4; - uint32_t num_elements = block_size; - - for (int i = 0; i < loop_count; i++) - { - mve_pred16_t p = vctp32q(num_elements); - - int32x4_t input_1 = vldrbq_z_s32(input_1_vect, p); - input_1 = vaddq_n_s32(input_1, input_1_offset); - - int32x4_t input_2 = vldrbq_z_s32(input_2_vect, p); - input_2 = vaddq_n_s32(input_2, input_2_offset); - - int32x4_t res_0 = vmulq_s32(input_1, input_2); - - res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); - - res_0 += vdupq_n_s32(out_offset); - - res_0 = vmaxq_s32(res_0, vdupq_n_s32(out_activation_min)); - res_0 = vminq_s32(res_0, vdupq_n_s32(out_activation_max)); - - vstrbq_p_s32(output, res_0, p); - input_1_vect += 4; - input_2_vect += 4; - output += 4; - num_elements -= 4; - } - -#else - int32_t input_1; - int32_t input_2; - int32_t mul_res; - -#if defined(ARM_MATH_DSP) - int32_t a_1, b_1, a_2, b_2; - - int32_t offset_1_packed, offset_2_packed; - - int8_t r1, r2, r3, r4; - - offset_1_packed = (input_1_offset << 16U) | (input_1_offset & 0x0FFFFL); - offset_2_packed = (input_2_offset << 16U) | (input_2_offset & 0x0FFFFL); - - loop_count = block_size >> 2; - - while (loop_count > 0U) - { - /* 4 outputs are calculated in one loop. The order of calculation is follows the order of output sign extension - intrinsic */ - input_1_vect = read_and_pad_reordered(input_1_vect, &b_1, &a_1); - input_2_vect = read_and_pad_reordered(input_2_vect, &b_2, &a_2); - - a_1 = __SADD16(a_1, offset_1_packed); - b_1 = __SADD16(b_1, offset_1_packed); - - a_2 = __SADD16(a_2, offset_2_packed); - b_2 = __SADD16(b_2, offset_2_packed); - - /* Mul 1 */ - input_1 = (int16_t)(b_1 & 0x0FFFFL); - input_2 = (int16_t)(b_2 & 0x0FFFFL); - - mul_res = input_1 * input_2; - mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; - - mul_res = MAX(mul_res, out_activation_min); - mul_res = MIN(mul_res, out_activation_max); - r1 = (q7_t)mul_res; - - /* Mul 3 */ - input_1 = (int16_t)((b_1 >> 16U) & 0x0FFFFL); - input_2 = (int16_t)((b_2 >> 16U) & 0x0FFFFL); - - mul_res = input_1 * input_2; - mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; - mul_res = MAX(mul_res, out_activation_min); - mul_res = MIN(mul_res, out_activation_max); - r3 = (q7_t)mul_res; - - /* Mul 2 */ - input_1 = (int16_t)(a_1 & 0x0FFFFL); - input_2 = (int16_t)(a_2 & 0x0FFFFL); - - mul_res = input_1 * input_2; - mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; - mul_res = MAX(mul_res, out_activation_min); - mul_res = MIN(mul_res, out_activation_max); - r2 = (q7_t)mul_res; - - /* Mul 4 */ - input_1 = (int16_t)((a_1 >> 16U) & 0x0FFFFL); - input_2 = (int16_t)((a_2 >> 16U) & 0x0FFFFL); - - mul_res = input_1 * input_2; - mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; - mul_res = MAX(mul_res, out_activation_min); - mul_res = MIN(mul_res, out_activation_max); - r4 = (q7_t)mul_res; - - write_q7x4_ia(&output, __PACKq7(r1, r2, r3, r4)); - - loop_count--; - } - - loop_count = block_size & 0x3; -#else - loop_count = block_size; -#endif - - while (loop_count > 0U) - { - /* C = A * B */ - - input_1 = *input_1_vect++ + input_1_offset; - input_2 = *input_2_vect++ + input_2_offset; - - mul_res = input_1 * input_2; - mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; - - mul_res = MAX(mul_res, out_activation_min); - mul_res = MIN(mul_res, out_activation_max); - - *output++ = (q7_t)mul_res; - - /* Decrement loop counter */ - loop_count--; - } -#endif - return ARM_MATH_SUCCESS; -} - -/** - * @} end of BasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c b/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c deleted file mode 100644 index ab7cdeb..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_concatenation_s8_y.c - * Description: s8 version of concatenation along the Y axis - * - * $Date: October 2019 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Concatenation - * @{ - */ - - /* - * s8 version of concatenation along the Y axis - * - * Refer to header file for details. - * - */ -void arm_concatenation_s8_y(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint16_t output_y, - const uint32_t offset_y) -{ - const uint32_t num_iterations = input_z * input_w; - const uint32_t input_copy_size = input_x * input_y; - const uint32_t output_stride = input_x * output_y; - - output += offset_y * input_x; - uint32_t i; - - // Copy per tile - for (i = 0; i < num_iterations; ++i) - { - memcpy(output, input, input_copy_size); - input += input_copy_size; - output += output_stride; - } -} - -/** - * @} end of Concatenation group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c b/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c deleted file mode 100644 index 2ab005f..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_concatenation_s8_z.c - * Description: s8 version of concatenation along the Z axis - * - * $Date: October 2019 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Concatenation - * @{ - */ - - /* - * s8 version of concatenation along the Z axis - * - * Refer to header file for details. - * - */ -void arm_concatenation_s8_z(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint16_t output_z, - const uint32_t offset_z) -{ - const uint32_t input_copy_size = input_x * input_y * input_z; - const uint32_t output_stride = input_x * input_y * output_z; - - output += offset_z * (input_x * input_y); - - uint32_t i; - - for (i = 0; i < input_w; ++i) - { - memcpy(output, input, input_copy_size); - input += input_copy_size; - output += output_stride; - } -} - -/** - * @} end of Concatenation group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c deleted file mode 100644 index bda1198..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_1_x_n_s8.c - * Description: s8 version of 1xN convolution using symmetric quantization. - * - * $Date: July 27, 2020 - * $Revision: V.2.0.1 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nn_types.h" -#include "arm_nnfunctions.h" -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/* - * 1xN s8 convolution function. - * - * Refer header file for details. - * - */ - -arm_status arm_convolve_1_x_n_s8(const cmsis_nn_context* ctx, - const cmsis_nn_conv_params* conv_params, - const cmsis_nn_per_channel_quant_params* quant_params, - const cmsis_nn_dims* input_dims, - const q7_t *input_data, - const cmsis_nn_dims* filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims* bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims* output_dims, - q7_t *output_data) -{ - (void)bias_dims; - arm_status status = ARM_MATH_SUCCESS; - if (output_dims->w % 4 != 0) - { - status = ARM_MATH_SIZE_MISMATCH; - goto out; - } - -#if defined(ARM_MATH_MVEI) - q15_t *buffer_a = (q15_t *)ctx->buf; - - const uint16_t input_x = input_dims->w; - const uint16_t kernel_x = filter_dims->w; - const uint16_t output_x = output_dims->w; - const uint16_t output_ch = output_dims->c; - const uint16_t input_ch = input_dims->c; - const uint16_t pad_x = conv_params->padding.w; - const uint16_t stride_x = conv_params->stride.w; - - const int32_t input_offset = conv_params->input_offset; - const int32_t out_offset = conv_params->output_offset; - const int32_t out_activation_min = conv_params->activation.min; - const int32_t out_activation_max = conv_params->activation.max; - int32_t *output_mult = quant_params->multiplier; - int32_t *output_shift = quant_params->shift; - - for (int i_out_x = 0; i_out_x <= (output_x - 4); i_out_x += 4) - { - int32_t input_begin_idx[4]; - int32_t ker_begin_idx[4]; - int32_t ker_end_idx[4]; - - for (int i = 0; i < 4; i++) - { - const int32_t est_input_x_idx = stride_x * (i_out_x + i) - pad_x; - input_begin_idx[i] = MAX(0, est_input_x_idx); - ker_begin_idx[i] = MAX(0, -est_input_x_idx); - ker_end_idx[i] = MIN(kernel_x, input_x - est_input_x_idx); - } - - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32x4_t s_offset; - int32_t acc[4]; - if ((ker_begin_idx[0] != 0) || (ker_end_idx[3] != kernel_x)) - { - int32_t sum_row[4]; - - (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[0] - ker_begin_idx[0]) * input_ch, - input_data + input_begin_idx[0] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch) + (ker_begin_idx[0] * input_ch), - &sum_row[0], - &acc[0]); - (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[1] - ker_begin_idx[1]) * input_ch, - input_data + input_begin_idx[1] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch) + (ker_begin_idx[1] * input_ch), - &sum_row[1], - &acc[1]); - - (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[2] - ker_begin_idx[2]) * input_ch, - input_data + input_begin_idx[2] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch) + (ker_begin_idx[2] * input_ch), - &sum_row[2], - &acc[2]); - - (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[3] - ker_begin_idx[3]) * input_ch, - input_data + input_begin_idx[3] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch) + (ker_begin_idx[3] * input_ch), - &sum_row[3], - &acc[3]); - - s_offset = vldrwq_s32(sum_row); - } - else - { - int32_t sum_row; - (void)arm_nn_mat_mul_core_4x_s8(kernel_x * input_ch, - stride_x * input_ch, - input_data + input_begin_idx[0] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch), - &sum_row, - acc); - - s_offset = vdupq_n_s32(sum_row); - } - int32x4_t res = vldrwq_s32(acc); - s_offset = vmulq_n_s32(s_offset, input_offset); - res = vaddq_s32(res, s_offset); - if (bias_data) - { - res = vaddq_n_s32(res, bias_data[i_out_ch]); - } - res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); - res = vaddq_n_s32(res, out_offset); - - res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); - res = vminq_s32(res, vdupq_n_s32(out_activation_max)); - - const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; - vstrbq_scatter_offset_s32(output_data, scatter_offset, res); - output_data++; - } - output_data += (3 * output_ch); - } - -#else - status = arm_convolve_s8(ctx, - conv_params, - quant_params, - input_dims, - input_data, - filter_dims, - filter_data, - bias_dims, - bias_data, - output_dims, - output_data); -#endif - -out: - /* Return to application */ - return status; -} - -int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_dims* input_dims, - const cmsis_nn_dims* filter_dims) -{ -#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) - return (2 * input_dims->c * filter_dims->w * filter_dims->h) * sizeof(int16_t); -#else - (void)input_dims; - (void)filter_dims; - return 0; -#endif -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c deleted file mode 100644 index 68f95f7..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_1x1_HWC_q7_fast_nonsquare.c - * Description: Fast Q7 version of 1x1 convolution (non-square shape) - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/** - * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is optimized for convolution with 1x1 kernel size (i.e., dim_kernel_x=1 - * and dim_kernel_y=1). It can be used for the second half of MobileNets [1] after depthwise - * separable convolution. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 - * - * [1] MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications - * https://arxiv.org/abs/1704.04861 - */ - -arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - (void)dim_im_in_y; - int16_t i_out_y, i_out_x; - int16_t i_ch_out; - - /* ----------------------- - * Here we use bufferA as q15_t internally as computation are done with q15_t level - * im2col are done to output in q15_t format from q7_t input - */ - - q15_t *pBuffer = bufferA; - q7_t *pOut = Im_out; - - if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0 || dim_kernel_x != 1 || dim_kernel_y != 1 - || padding_x != 0 || padding_y != 0 || stride_x != 1 || stride_y != 1) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) - { - /* This part implements the im2col function */ - arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_out_y * dim_im_in_x + i_out_x) * ch_im_in, pBuffer, - ch_im_in); - pBuffer += ch_im_in; - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in, bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - } - - /* check if there is left-over for compute */ - if (pBuffer != bufferA) - { - const q7_t *pA = wt; - for (i_ch_out = 0; i_ch_out < ch_im_out; i_ch_out++) - { - q31_t sum = ((q31_t)(bias[i_ch_out]) << bias_shift) + NN_ROUND(out_shift); - const q15_t *pB = bufferA; - /* basically each time it process 4 entries */ - uint16_t colCnt = ch_im_in * dim_kernel_x * dim_kernel_y >> 2; - - while (colCnt) - { - - q31_t inA1, inA2; - q31_t inB1, inB2; - - pA = read_and_pad_reordered(pA, &inA1, &inA2); - - inB1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inA1, inB1, sum); - inB2 = arm_nn_read_q15x2_ia(&pB); - - sum = __SMLAD(inA2, inB2, sum); - - colCnt--; - } - colCnt = ch_im_in * dim_kernel_y * dim_kernel_x & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - sum += inA1 * inB1; - colCnt--; - } - *pOut = (q7_t) __SSAT((sum >> out_shift), 8); - pOut++; - - } - - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0 || dim_kernel_x != 1 || dim_kernel_y != 1 - || padding_x != 0 || padding_y != 0 || stride_x != 1 || stride_y != 1) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out_y; j++) - { - for (k = 0; k < dim_im_out_x; k++) - { - conv_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel_y; m++) - { - for (n = 0; n < dim_kernel_x; n++) - { - // if-for implementation - in_row = stride_y * j + m - padding_y; - in_col = stride_x * k + n - padding_x; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + l] * - wt[i * ch_im_in * dim_kernel_y * dim_kernel_x + (m * dim_kernel_y + n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c deleted file mode 100644 index 9474567..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_1x1_s8_fast.c - * Description: Fast q7 version of 1x1 convolution (non-square shape) - * - * $Date: July 27, 2020 - * $Revision: V.2.0.2 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" -#include "arm_nn_types.h" - -#define DIM_KER_X (1U) -#define DIM_KER_Y (1U) - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/* - * Fast s8 version for 1x1 convolution (non-square shape) - * - * Refer header file for details. - * - */ - -arm_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, - const cmsis_nn_conv_params *conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data) -{ - if (input_dims->c % 4 != 0 || - conv_params->padding.w != 0 || conv_params->padding.h != 0 || - conv_params->stride.w != 1 || conv_params->stride.h != 1) - { - return ARM_MATH_SIZE_MISMATCH; - } - - (void)ctx; - (void)filter_dims; - (void)bias_dims; - -#if defined(ARM_MATH_MVEI) - - const int32_t col_len = input_dims->w * input_dims->h * input_dims->n; - const int32_t output_ch = output_dims->c; - const int32_t input_ch = input_dims->c; - const int32_t input_offset = conv_params->input_offset; - const int32_t out_offset = conv_params->output_offset; - const int32_t out_activation_min = conv_params->activation.min; - const int32_t out_activation_max = conv_params->activation.max; - int32_t *output_mult = quant_params->multiplier; - int32_t *output_shift = quant_params->shift; - - for (int i_items = 0; i_items <= (col_len - 4); i_items += 4) - { - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32_t sum_row = 0; - int32_t temp_out[4]; - - (void)arm_nn_mat_mul_core_4x_s8(input_ch, - input_ch, - input_data + i_items * input_ch, - filter_data + i_out_ch * input_ch, - &sum_row, - temp_out); - int32x4_t res = vldrwq_s32(temp_out); - if (bias_data) - { - res = vaddq_n_s32(res, bias_data[i_out_ch]); - } - sum_row = sum_row * input_offset; - res = vaddq_n_s32(res, sum_row); - res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); - res = vaddq_n_s32(res, out_offset); - - res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); - res = vminq_s32(res, vdupq_n_s32(out_activation_max)); - - const uint32x4_t scatter_offset = {0, (uint32_t)output_ch, - (uint32_t)output_ch * 2, - (uint32_t)output_ch * 3}; - vstrbq_scatter_offset_s32(output_data, scatter_offset, res); - output_data++; - } - output_data += (3 * output_ch); - } - - /* Handle left over elements */ - for (int i_items = (col_len & ~0x3); i_items < col_len; i_items++) - { - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32_t sum_row = 0; - - int32_t acc; - (void)arm_nn_mat_mul_core_1x_s8(input_ch, - input_data + i_items * input_ch, - filter_data + i_out_ch * input_ch, - &sum_row, - &acc); - if (bias_data) - { - acc += bias_data[i_out_ch]; - } - sum_row = (sum_row * input_offset); - acc += sum_row; - acc = arm_nn_requantize(acc, output_mult[i_out_ch], output_shift[i_out_ch]); - acc += out_offset; - - acc = MAX(acc, out_activation_min); - acc = MIN(acc, out_activation_max); - *output_data++ = acc; - } - } - -#else - /* Run the following code as reference implementation for Cortex-M processors with or without DSP extension */ - - const int32_t lhs_rows = input_dims->w * input_dims->h * input_dims->n; - const int32_t rhs_rows = output_dims->c; - const int32_t rhs_cols = input_dims->c; - - arm_nn_mat_mult_nt_t_s8(input_data, - filter_data, - bias_data, - output_data, - quant_params->multiplier, - quant_params->shift, - lhs_rows, - rhs_rows, - rhs_cols, - conv_params->input_offset, - conv_params->output_offset, - conv_params->activation.min, - conv_params->activation.max); - -#endif - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims *input_dims) -{ - (void)input_dims; - return 0; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c deleted file mode 100644 index f11c517..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_HWC_q15_basic.c - * Description: Q15 version of convolution - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - - /** - * @brief Basic Q15 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * bufferA size: ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * This basic version is designed to work for any input tensor and weight - * dimension. - */ - -arm_status -arm_convolve_HWC_q15_basic(const q15_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q15_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q15_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - uint16_t im2col_out_pixel_index = 0; - q15_t *pBuffer = bufferA; - q15_t *pOut = Im_out; - q15_t *im_buffer = bufferA; - const q15_t *pA; - int i; - - /* This part implements the im2col function */ - for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) - { - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* Filling 0 for out-of-bound paddings */ - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - /* arm_copy_q15((q15_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ - memcpy(pBuffer, (q15_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, sizeof(q15_t)*ch_im_in); - } - pBuffer += ch_im_in; - } - } - - pA = wt; - for (i = 0; i < ch_im_out; i++) - { - q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - const q15_t *pB = im_buffer; - uint16_t colCnt = ch_im_in * dim_kernel * dim_kernel >> 2; - while (colCnt) - { - q31_t inA1 = arm_nn_read_q15x2_ia(&pA); - q31_t inB1 = arm_nn_read_q15x2_ia(&pB); - q31_t inA2 = arm_nn_read_q15x2_ia(&pA); - q31_t inB2 = arm_nn_read_q15x2_ia(&pB); - - sum = __SMLAD(inA1, inB1, sum); - sum = __SMLAD(inA2, inB2, sum); - - colCnt--; - } - colCnt = ch_im_in * dim_kernel * dim_kernel & 0x3; - while (colCnt) - { - q15_t inA1 = *pA++; - q15_t inB1 = *pB++; - sum += inA1 * inB1; - colCnt--; - } - *pOut = (q15_t) __SSAT((sum >> out_shift), 16); - pOut++; - } - - /* counter reset */ - pBuffer = im_buffer; - im2col_out_pixel_index++; - } - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out; j++) - { - for (k = 0; k < dim_im_out; k++) - { - conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel; m++) - { - for (n = 0; n < dim_kernel; n++) - { - in_row = stride * j + m - padding; - in_col = stride * k + n - padding; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += - Im_in[(in_row * dim_im_in + in_col) * ch_im_in + - l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + - n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q15_t) __SSAT((conv_out >> out_shift), 16); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c deleted file mode 100644 index 9c4a657..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_HWC_q15_fast.c - * Description: Fast Q15 version of convolution - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - - /** - * @brief Fast Q15 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in is multiple of 2 - * - * ch_im_out is multipe of 2 - * - */ - -arm_status -arm_convolve_HWC_q15_fast(const q15_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q15_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q15_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - q15_t *pBuffer = bufferA; - q15_t *im_buffer = bufferA; - q15_t *pOut = Im_out; - - if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - /* This part implements the im2col function */ - for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) - { - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - /* arm_copy_q15((q15_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ - memcpy(pBuffer, (q15_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, sizeof(q15_t)*ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (i_out_x & 0x1) - { - int i; - /* initialize the matrix pointers for A */ - const q15_t *pA = wt; - - /* set up the second output pointers */ - q15_t *pOut2 = pOut + ch_im_out; - - /* this loop over rows in A */ - for (i = 0; i < ch_im_out; i += 2) - { - /* setup pointers for B */ - const q15_t *pB = im_buffer; - const q15_t *pB2 = pB + ch_im_in * dim_kernel * dim_kernel; - - /* aling the second pointer for A */ - const q15_t *pA2 = pA + ch_im_in * dim_kernel * dim_kernel; - - /* init the sum with bias */ - q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = ch_im_in * dim_kernel * dim_kernel >> 1; - /* accumulate over the vector */ - while (colCnt) - { - q31_t inA1 = arm_nn_read_q15x2_ia(&pA); - q31_t inB1 = arm_nn_read_q15x2_ia(&pB); - q31_t inA2 = arm_nn_read_q15x2_ia(&pA2); - q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); - - sum = __SMLAD(inA1, inB1, sum); - sum2 = __SMLAD(inA1, inB2, sum2); - sum3 = __SMLAD(inA2, inB1, sum3); - sum4 = __SMLAD(inA2, inB2, sum4); - - colCnt--; - } /* while over colCnt */ - colCnt = ch_im_in * dim_kernel * dim_kernel & 0x1; - while (colCnt) - { - q15_t inA1 = *pA++; - q15_t inB1 = *pB++; - q15_t inA2 = *pA2++; - q15_t inB2 = *pB2++; - - sum += inA1 * inB1; - sum2 += inA1 * inB2; - sum3 += inA2 * inB1; - sum4 += inA2 * inB2; - colCnt--; - } /* while over colCnt */ - *pOut++ = (q15_t) __SSAT(sum >> out_shift, 16); - *pOut++ = (q15_t) __SSAT(sum3 >> out_shift, 16); - *pOut2++ = (q15_t) __SSAT(sum2 >> out_shift, 16); - *pOut2++ = (q15_t) __SSAT(sum4 >> out_shift, 16); - - /* skip the row computed with A2 */ - pA += ch_im_in * dim_kernel * dim_kernel; - } /* for over ch_im_out */ - - pOut += ch_im_out; - /* counter reset */ - pBuffer = im_buffer; - } - } - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out; j++) - { - for (k = 0; k < dim_im_out; k++) - { - conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel; m++) - { - for (n = 0; n < dim_kernel; n++) - { - in_row = stride * j + m - padding; - in_col = stride * k + n - padding; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += - Im_in[(in_row * dim_im_in + in_col) * ch_im_in + - l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + - n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q15_t) __SSAT((conv_out >> out_shift), 16); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c deleted file mode 100644 index 4889f49..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_HWC_q15_fast.c - * Description: Fast Q15 version of convolution - * - * $Date: 24. May 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - - /** - * @brief Fast Q15 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in is multiple of 2 - * - * ch_im_out is multipe of 2 - * - */ - -arm_status -arm_convolve_HWC_q15_fast_nonsquare(const q15_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q15_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q15_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - q15_t *pBuffer = bufferA; - q15_t *im_buffer = bufferA; - q15_t *pOut = Im_out; - - if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - /* This part implements the im2col function */ - for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) - { - for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; i_ker_y++) - { - for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - /* arm_copy_q15((q15_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ - memcpy(pBuffer, (q15_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, sizeof(q15_t)*ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (i_out_x & 0x1) - { - int i; - /* initialize the matrix pointers for A */ - const q15_t *pA = wt; - - /* set up the second output pointers */ - q15_t *pOut2 = pOut + ch_im_out; - - /* this loop over rows in A */ - for (i = 0; i < ch_im_out; i += 2) - { - /* setup pointers for B */ - const q15_t *pB = im_buffer; - const q15_t *pB2 = pB + ch_im_in * dim_kernel_y * dim_kernel_x; - - /* aling the second pointer for A */ - const q15_t *pA2 = pA + ch_im_in * dim_kernel_y * dim_kernel_x; - - /* init the sum with bias */ - q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = ch_im_in * dim_kernel_y * dim_kernel_x >> 1; - /* accumulate over the vector */ - while (colCnt) - { - q31_t inA1 = arm_nn_read_q15x2_ia(&pA); - q31_t inB1 = arm_nn_read_q15x2_ia(&pB); - q31_t inA2 = arm_nn_read_q15x2_ia(&pA2); - q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); - - sum = __SMLAD(inA1, inB1, sum); - sum2 = __SMLAD(inA1, inB2, sum2); - sum3 = __SMLAD(inA2, inB1, sum3); - sum4 = __SMLAD(inA2, inB2, sum4); - - colCnt--; - } /* while over colCnt */ - colCnt = ch_im_in * dim_kernel_y * dim_kernel_x & 0x1; - while (colCnt) - { - q15_t inA1 = *pA++; - q15_t inB1 = *pB++; - q15_t inA2 = *pA2++; - q15_t inB2 = *pB2++; - - sum += inA1 * inB1; - sum2 += inA1 * inB2; - sum3 += inA2 * inB1; - sum4 += inA2 * inB2; - colCnt--; - } /* while over colCnt */ - *pOut++ = (q15_t) __SSAT(sum >> out_shift, 16); - *pOut++ = (q15_t) __SSAT(sum3 >> out_shift, 16); - *pOut2++ = (q15_t) __SSAT(sum2 >> out_shift, 16); - *pOut2++ = (q15_t) __SSAT(sum4 >> out_shift, 16); - - /* skip the row computed with A2 */ - pA += ch_im_in * dim_kernel_y * dim_kernel_x; - } /* for over ch_im_out */ - - pOut += ch_im_out; - /* counter reset */ - pBuffer = im_buffer; - } - } - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out_y; j++) - { - for (k = 0; k < dim_im_out_x; k++) - { - conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel_y; m++) - { - for (n = 0; n < dim_kernel_x; n++) - { - in_row = stride_y * j + m - padding_y; - in_col = stride_x * k + n - padding_x; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += - Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + - l] * wt[i * ch_im_in * dim_kernel_x * dim_kernel_y + (m * dim_kernel_x + - n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q15_t) __SSAT((conv_out >> out_shift), 16); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c deleted file mode 100644 index 0a55d72..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_HWC_q7_RGB.c - * Description: Q7 version of convolution for RGB image - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - - /** - * @brief Q7 convolution function for RGB image - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in equals 3 - * - * This kernel is written exclusively for convolution with ch_im_in - * equals 3. This applies on the first layer of CNNs which has input - * image with RGB format. - */ - -arm_status -arm_convolve_HWC_q7_RGB(const q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, const uint16_t dim_im_out, q15_t * bufferA, q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - /* - * Here we use bufferA as q15_t internally as computation are done with q15_t level - * im2col are done to output in q15_t format from q7_t input - */ - q15_t *pBuffer = bufferA; - q7_t *pOut = Im_out; - - // check if number of input channels is 3 - if (ch_im_in != 3) - { - return ARM_MATH_SIZE_MISMATCH; - } - // This part implements the im2col function - for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) - { - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* Equivalent to arm_fill_q15(0, pBuffer, ch_im_in) with assumption: ch_im_in = 3 */ - *__SIMD32(pBuffer) = 0x0; - *(pBuffer + 2) = 0; - pBuffer += 3; - } else - { - /* - * Equivalent to: - * arm_q7_to_q15_no_shift( (q7_t*)Im_in+(i_ker_y*dim_im_in+i_ker_x)*3, pBuffer, 3); - */ - - const q7_t *pPixel = Im_in + (i_ker_y * dim_im_in + i_ker_x) * 3; - q31_t buf = arm_nn_read_q7x4(pPixel); - - union arm_nnword top; - union arm_nnword bottom; - - top.word = __SXTB16(buf); - bottom.word = __SXTB16(__ROR(buf, 8)); - -#ifndef ARM_MATH_BIG_ENDIAN - /* - * little-endian, | omit | 3rd | 2nd | 1st | - * MSB LSB - * top | 3rd | 1st |; bottom | omit | 2nd | - * - * version 1, need to swap 2nd and 3rd weight - * *__SIMD32(pBuffer) = top.word; - * *(pBuffer+2) = bottom.half_words[0]; - * - * version 2, no weight shuffling required - */ - *pBuffer++ = top.half_words[0]; - *__SIMD32(pBuffer) = __PKHBT(bottom.word, top.word, 0); -#else - /* - * big-endian, | 1st | 2nd | 3rd | omit | - * MSB LSB - * top | 2nd | omit |; bottom | 1st | 3rd | - * - * version 1, need to swap 2nd and 3rd weight - * *__SIMD32(pBuffer) = bottom.word; - * *(pBuffer+2) = top.half_words[1]; - * - * version 2, no weight shuffling required - */ - *pBuffer++ = bottom.half_words[0]; - *__SIMD32(pBuffer) = __PKHTB(top.word, bottom.word, 0); -#endif - pBuffer += 2; - } - } - } - - if (pBuffer == bufferA + 2 * 3 * dim_kernel * dim_kernel) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15(wt, bufferA, - ch_im_out, - 3 * dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); - - /* counter reset */ - pBuffer = bufferA; - } - } - } - - /* left-over because odd number of output pixels */ - if (pBuffer != bufferA) - { - const q7_t *pA = wt; - int i; - - for (i = 0; i < ch_im_out; i++) - { - q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - q15_t *pB = bufferA; - /* basically each time it process 4 entries */ - uint16_t colCnt = 3 * dim_kernel * dim_kernel >> 2; - - while (colCnt) - { - - q31_t inA1, inA2; - q31_t inB1, inB2; - - pA = read_and_pad(pA, &inA1, &inA2); - - inB1 = arm_nn_read_q15x2_ia((const q15_t **)&pB); - sum = __SMLAD(inA1, inB1, sum); - inB2 = arm_nn_read_q15x2_ia((const q15_t **)&pB); - sum = __SMLAD(inA2, inB2, sum); - - colCnt--; - } - colCnt = 3 * dim_kernel * dim_kernel & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - sum += inA1 * inB1; - colCnt--; - } - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - } - } -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - // check if number of input channels is 3 - if (ch_im_in != 3) - { - return ARM_MATH_SIZE_MISMATCH; - } - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out; j++) - { - for (k = 0; k < dim_im_out; k++) - { - conv_out = (bias[i] << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel; m++) - { - for (n = 0; n < dim_kernel; n++) - { - /* if-for implementation */ - in_row = stride * j + m - padding; - in_col = stride * k + n - padding; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += - Im_in[(in_row * dim_im_in + in_col) * ch_im_in + - l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + - n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return (ARM_MATH_SUCCESS); -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c deleted file mode 100644 index d416d0a..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_HWC_q7_basic.c - * Description: Q7 version of convolution - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - - /** - * @brief Basic Q7 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * This basic version is designed to work for any input tensor and weight - * dimension. - */ - -arm_status -arm_convolve_HWC_q7_basic(const q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - /* - * Here we use bufferA as q15_t internally as computation are done with q15_t level - * im2col are done to output in q15_t format from q7_t input - */ - q15_t *pBuffer = bufferA; - q7_t *pOut = Im_out; - - /* This part implements the im2col function */ - for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) - { - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* Filling 0 for out-of-bound paddings */ - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - /* Copying the pixel data to column */ - arm_q7_to_q15_no_shift((q7_t *) - Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - /* Computation is filed for every 2 columns */ - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15(wt, bufferA, - ch_im_out, - ch_im_in * - dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); - - /* counter reset */ - pBuffer = bufferA; - } - } - } - - /* left-over because odd number of output pixels */ - if (pBuffer != bufferA) - { - const q7_t *pA = wt; - int i; - - for (i = 0; i < ch_im_out; i++) - { - /* Load the accumulator with bias first */ - q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - - /* Point to the beging of the im2col buffer */ - const q15_t *pB = bufferA; - - /* Each time it process 4 entries */ - uint16_t colCnt = ch_im_in * dim_kernel * dim_kernel >> 2; - - while (colCnt) - { - q31_t inA1, inA2; - q31_t inB1, inB2; - - pA = read_and_pad(pA, &inA1, &inA2); - - inB1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inA1, inB1, sum); - inB2 = arm_nn_read_q15x2_ia(&pB); - - sum = __SMLAD(inA2, inB2, sum); - - colCnt--; - } - colCnt = ch_im_in * dim_kernel * dim_kernel & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - sum += inA1 * inB1; - colCnt--; - } - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - } - } -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out; j++) - { - for (k = 0; k < dim_im_out; k++) - { - conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel; m++) - { - for (n = 0; n < dim_kernel; n++) - { - // if-for implementation - in_row = stride * j + m - padding; - in_col = stride * k + n - padding; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += - Im_in[(in_row * dim_im_in + in_col) * ch_im_in + - l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + - n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c deleted file mode 100644 index c5d3ed5..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_HWC_q7_basic.c - * Description: Q7 version of convolution - * - * $Date: 13. July 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - - /** - * @brief Basic Q7 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - */ - -arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - /* - * Here we use bufferA as q15_t internally as computation are done with q15_t level - * im2col are done to output in q15_t format from q7_t input - */ - q15_t *pBuffer = bufferA; - q7_t *pOut = Im_out; - - /* This part implements the im2col function */ - for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) - { - for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; i_ker_y++) - { - for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) - { - /* Filling 0 for out-of-bound paddings */ - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - /* Copying the pixel data to column */ - arm_q7_to_q15_no_shift((q7_t *) - Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - /* Computation is filed for every 2 columns */ - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_y * dim_kernel_x) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15(wt, bufferA, - ch_im_out, - ch_im_in * - dim_kernel_y * dim_kernel_x, bias_shift, out_shift, bias, pOut); - - /* counter reset */ - pBuffer = bufferA; - } - } - } - - /* left-over because odd number of output pixels */ - if (pBuffer != bufferA) - { - const q7_t *pA = wt; - int i; - - for (i = 0; i < ch_im_out; i++) - { - /* Load the accumulator with bias first */ - q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - - /* Point to the beging of the im2col buffer */ - const q15_t *pB = bufferA; - - /* Each time it process 4 entries */ - uint16_t colCnt = ch_im_in * dim_kernel_y * dim_kernel_x >> 2; - - while (colCnt) - { - q31_t inA1, inA2; - q31_t inB1, inB2; - - pA = read_and_pad(pA, &inA1, &inA2); - - inB1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inA1, inB1, sum); - inB2 = arm_nn_read_q15x2_ia(&pB); - - sum = __SMLAD(inA2, inB2, sum); - - colCnt--; - } - colCnt = ch_im_in * dim_kernel_y * dim_kernel_x & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - sum += inA1 * inB1; - colCnt--; - } - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - } - } -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out_y; j++) - { - for (k = 0; k < dim_im_out_x; k++) - { - conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel_y; m++) - { - for (n = 0; n < dim_kernel_x; n++) - { - // if-for implementation - in_row = stride_y * j + m - padding_y; - in_col = stride_x * k + n - padding_x; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += - Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + l] * - wt[i * ch_im_in * dim_kernel_y * dim_kernel_x + - (m * dim_kernel_x + n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c deleted file mode 100644 index e270640..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_HWC_q7_fast.c - * Description: Fast Q7 version of convolution - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - - /** - * @brief Fast Q7 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in is multiple of 4 ( because of the SIMD32 read and swap ) - * - * ch_im_out is multipe of 2 ( bacause 2x2 mat_mult kernel ) - * - * The im2col converts the Q7 tensor input into Q15 column, which is stored in - * bufferA. There is reordering happenning during this im2col process with - * arm_q7_to_q15_reordered_no_shift. For every four elements, the second and - * third elements are swapped. - * - * The computation kernel arm_nn_mat_mult_kernel_q7_q15_reordered does the - * GEMM computation with the reordered columns. - * - * To speed-up the determination of the padding condition, we split the - * computation into 3x3 parts, i.e., {top, mid, bottom} X {left, mid, right}. - * This reduces the total number of boundary condition checks and improves - * the data copying performance. - */ - -arm_status -arm_convolve_HWC_q7_fast(const q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - /* - * Here we use bufferA as q15_t internally as computation are done with q15_t level - * im2col are done to output in q15_t format from q7_t input - */ - - q15_t *pBuffer = bufferA; - q7_t *pOut = Im_out; - - if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - /* - * Here we split the entire matrix into three regions depending on the padding situation - * Top: i_out_y from 0 to padding - 1 - * Middle: i_out_y from padding to dim_im_out-padding-1 - * Bottom: i_out_y from dim_im_out-padding to dim_im_out-1 - */ - - /* top part */ - for (i_out_y = 0; i_out_y < padding; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - arm_q7_to_q15_reordered_no_shift - ((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, - bufferA, - ch_im_out, - ch_im_in - * - dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - } - - /* middle part, here we also divide the x into left, mid and right */ - for (; i_out_y < dim_im_out - padding; i_out_y++) - { - - /* left part */ - for (i_out_x = 0; i_out_x < padding; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - arm_q7_to_q15_reordered_no_shift - ((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, - bufferA, - ch_im_out, - ch_im_in - * - dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - - /* mid part */ - for (; i_out_x < dim_im_out - padding; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in - + - (i_ker_y * - dim_im_in + - i_out_x * - stride - padding) * ch_im_in, pBuffer, ch_im_in * dim_kernel); - pBuffer += ch_im_in * dim_kernel; - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, - bufferA, - ch_im_out, - ch_im_in - * - dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - - /* right part */ - for (; i_out_x < dim_im_out; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - arm_q7_to_q15_reordered_no_shift - ((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, - bufferA, - ch_im_out, - ch_im_in - * - dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - } - - for (; i_out_y < dim_im_out; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - arm_q7_to_q15_reordered_no_shift - ((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, - bufferA, - ch_im_out, - ch_im_in - * - dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - } - - /* check if there is left-over for compute */ - if (pBuffer != bufferA) - { - const q7_t *pA = wt; - int i; - - for (i = 0; i < ch_im_out; i++) - { - q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); - const q15_t *pB = bufferA; - /* each time it process 4 entries */ - uint16_t colCnt = ch_im_in * dim_kernel * dim_kernel >> 2; - - while (colCnt) - { - - q31_t inA1, inA2; - q31_t inB1, inB2; - - pA = read_and_pad_reordered(pA, &inA1, &inA2); - - inB1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inA1, inB1, sum); - inB2 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inA2, inB2, sum); - - colCnt--; - } - colCnt = ch_im_in * dim_kernel * dim_kernel & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - sum += inA1 * inB1; - colCnt--; - } - *pOut = (q7_t) __SSAT((sum >> out_shift), 8); - pOut++; - - } - - } -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out; j++) - { - for (k = 0; k < dim_im_out; k++) - { - conv_out = (bias[i] << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel; m++) - { - for (n = 0; n < dim_kernel; n++) - { - // if-for implementation - in_row = stride * j + m - padding; - in_col = stride * k + n - padding; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += - Im_in[(in_row * dim_im_in + in_col) * ch_im_in + - l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + - n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c deleted file mode 100644 index 2dc94ef..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_HWC_q7_fast_nonsquare.c - * Description: Fast Q7 version of convolution (non-sqaure shape) - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/** - * @brief Fast Q7 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 - */ - -arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - /* ----------------------- - * Here we use bufferA as q15_t internally as computation are done with q15_t level - * im2col are done to output in q15_t format from q7_t input - */ - - q15_t *pBuffer = bufferA; - q7_t *pOut = Im_out; - - if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - /* - * Here we split the entire matrix into three regions depending on the padding situation - * Top: i_out_y from 0 to padding - 1 - * Middle: i_out_y from padding to dim_im_out-padding-1 - * Bottom: i_out_y from dim_im_out-padding to dim_im_out-1 - */ - - /* top part */ - for (i_out_y = 0; i_out_y < padding_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; - i_ker_y++) - { - for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; - i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, - pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, - bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - } - - /* middle part, here we also divide the x into left, mid and right */ - for (; i_out_y < dim_im_out_y - padding_y; i_out_y++) - { - - /* left part */ - for (i_out_x = 0; i_out_x < padding_x; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; - i_ker_y++) - { - for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; - i_ker_x++) - { - if (i_ker_x < 0 || i_ker_x >= dim_im_in_x) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, - pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, - bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - - /* mid part */ - for (; i_out_x < dim_im_out_x - padding_x; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; - i_ker_y++) - { - arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + - (i_ker_y * dim_im_in_x + i_out_x * stride_x - padding_x) * ch_im_in, - pBuffer, ch_im_in * dim_kernel_x); - pBuffer += ch_im_in * dim_kernel_x; - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, - bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - - /* right part */ - for (; i_out_x < dim_im_out_x; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; - i_ker_y++) - { - for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; - i_ker_x++) - { - if (i_ker_x < 0 || i_ker_x >= dim_im_in_x) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, - pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, - bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - } - - for (; i_out_y < dim_im_out_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) - { - /* This part implements the im2col function */ - for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; - i_ker_y++) - { - for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; - i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) - { - /* arm_fill_q15(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); - } else - { - arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, - pBuffer, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) - { - pOut = - arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, - bias_shift, out_shift, bias, pOut); - /* counter reset */ - pBuffer = bufferA; - } - } - } - - /* check if there is left-over for compute */ - if (pBuffer != bufferA) - { - const q7_t *pA = wt; - int i; - for (i = 0; i < ch_im_out; i++) - { - q31_t sum = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); - const q15_t *pB = bufferA; - /* basically each time it process 4 entries */ - uint16_t colCnt = ch_im_in * dim_kernel_x * dim_kernel_y >> 2; - - while (colCnt) - { - - q31_t inA1, inA2; - q31_t inB1, inB2; - - pA = read_and_pad_reordered(pA, &inA1, &inA2); - - inB1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inA1, inB1, sum); - inB2 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inA2, inB2, sum); - - colCnt--; - } - colCnt = (ch_im_in * dim_kernel_y * dim_kernel_x) & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - sum += inA1 * inB1; - colCnt--; - } - *pOut = (q7_t) __SSAT((sum >> out_shift), 8); - pOut++; - - } - - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - int i, j, k, l, m, n; - int conv_out; - int in_row, in_col; - - if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) - { - /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; - } - - for (i = 0; i < ch_im_out; i++) - { - for (j = 0; j < dim_im_out_y; j++) - { - for (k = 0; k < dim_im_out_x; k++) - { - conv_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); - for (m = 0; m < dim_kernel_y; m++) - { - for (n = 0; n < dim_kernel_x; n++) - { - /* if-for implementation */ - in_row = stride_y * j + m - padding_y; - in_col = stride_x * k + n - padding_x; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) - { - for (l = 0; l < ch_im_in; l++) - { - conv_out += Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + l] * - wt[i * ch_im_in * dim_kernel_y * dim_kernel_x + (m * dim_kernel_x + n) * ch_im_in + l]; - } - } - } - } - Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); - } - } - } - - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s8.c deleted file mode 100644 index 56355b3..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s8.c +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_s8.c - * Description: s8 version of convolution using symmetric quantization. - * - * $Date: July 27, 2020 - * $Revision: V.2.0.2 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nn_types.h" -#include "arm_nnfunctions.h" -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/* - * Basic s8 convolution function. - * - * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels - * are multiples of 4 or atleast greater than 4. - * - */ - -arm_status arm_convolve_s8(const cmsis_nn_context* ctx, - const cmsis_nn_conv_params* conv_params, - const cmsis_nn_per_channel_quant_params* quant_params, - const cmsis_nn_dims* input_dims, - const q7_t *input_data, - const cmsis_nn_dims* filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims* bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims* output_dims, - q7_t *output_data) -{ - q15_t *buffer_a = (q15_t *)ctx->buf; - - const uint16_t input_batches = input_dims->n; - const uint16_t input_x = input_dims->w; - const uint16_t input_y = input_dims->h; - const uint16_t input_ch = input_dims->c; - const uint16_t kernel_x = filter_dims->w; - const uint16_t kernel_y = filter_dims->h; - const uint16_t output_x = output_dims->w; - const uint16_t output_y = output_dims->h; - const uint16_t output_ch = output_dims->c; - - const uint16_t pad_x = conv_params->padding.w; - const uint16_t pad_y = conv_params->padding.h; - const uint16_t stride_x = conv_params->stride.w; - const uint16_t stride_y = conv_params->stride.h; - - const int32_t input_offset = conv_params->input_offset; - const int32_t out_offset = conv_params->output_offset; - const int32_t out_activation_min = conv_params->activation.min; - const int32_t out_activation_max = conv_params->activation.max; - int32_t *output_mult = quant_params->multiplier; - int32_t *output_shift = quant_params->shift; - - int i_batch; - for (i_batch = 0; i_batch < input_batches; i_batch++) - { -#if defined(ARM_MATH_MVEI) - (void)bias_dims; - /* Generate upto four columns from the input tensor a GEMM computation */ - q7_t *im2col_buf = (q7_t *)buffer_a; - q7_t *out = output_data; - int32_t buffer_fill_cnt = 0; - int32_t padded = 0; - const int32_t num_elem = kernel_x * kernel_y * input_ch; - - /* This part implements the im2col function */ - for (int i_out_y = 0; i_out_y < output_y; i_out_y++) - { - for (int i_out_x = 0; i_out_x < output_x; i_out_x++) - { - for (int i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; i_ker_y++) - { - for (int i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) - { - memset(im2col_buf, (int8_t)-input_offset, sizeof(q7_t) * input_ch); - padded = 1; - } - else - { - arm_memcpy_q7(im2col_buf, input_data + (i_ker_y * input_x + i_ker_x) * input_ch, input_ch); - } - im2col_buf += input_ch; - } - } - - buffer_fill_cnt++; - - /* Computation is filed for every 4 columns */ - if (buffer_fill_cnt == 4 && (padded == 0)) - { - buffer_fill_cnt = 0; - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32_t sum_row; - int32_t acc[4]; - - (void)arm_nn_mat_mul_core_4x_s8(num_elem, - num_elem, - (q7_t *)buffer_a, - filter_data + num_elem * i_out_ch, - &sum_row, - acc); - int32x4_t s_offset = vdupq_n_s32(sum_row); - - int32x4_t res = vldrwq_s32(acc); - s_offset = vmulq_n_s32(s_offset, input_offset); - if (bias_data) - { - res = vaddq_n_s32(res, bias_data[i_out_ch]); - } - res = vaddq_s32(res, s_offset); - res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); - res = vaddq_n_s32(res, out_offset); - - res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); - res = vminq_s32(res, vdupq_n_s32(out_activation_max)); - - const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; - vstrbq_scatter_offset_s32(out, scatter_offset, res); - out++; - } - out += (3 * output_ch); - im2col_buf = (q7_t *)buffer_a; - } - else if (buffer_fill_cnt == 4 && (padded != 0)) - { - buffer_fill_cnt = 0; - out = arm_nn_mat_mult_s8(filter_data, - (q7_t *)buffer_a, - output_ch, - 4, - output_shift, - output_mult, - out_offset, - input_offset, - 0, - out_activation_min, - out_activation_max, - num_elem, - bias_data, - out); - - im2col_buf = (q7_t *)buffer_a; - padded = 0; - } - } - } - /* Handle left over columns */ - if (buffer_fill_cnt != 0) - { - out = arm_nn_mat_mult_s8(filter_data, - (q7_t *)buffer_a, - output_ch, - buffer_fill_cnt, - output_shift, - output_mult, - out_offset, - input_offset, - 0, - out_activation_min, - out_activation_max, - num_elem, - bias_data, - out); - } - -#elif defined(ARM_MATH_DSP) - (void)bias_dims; - int32_t i_out_y, i_out_x, i_ker_y, i_ker_x; - - /* Generate two columns from the input tensor a GEMM computation */ - q15_t *two_column_buf = buffer_a; - q7_t *out = output_data; - - /* This part implements the im2col function */ - for (i_out_y = 0; i_out_y < output_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < output_x; i_out_x++) - { - for (i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; i_ker_y++) - { - for (i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) - { - /* Filling 0 for out-of-bound paddings */ - memset(two_column_buf, 0, sizeof(q15_t) * input_ch); - } - else - { - /* Copying the pixel data to column */ - arm_q7_to_q15_with_offset(input_data + (i_ker_y * input_x + i_ker_x) * input_ch, two_column_buf, input_ch, input_offset); - } - two_column_buf += input_ch; - } - } - - /* Computation is filed for every 2 columns */ - if (two_column_buf == buffer_a + 2 * input_ch * kernel_y * kernel_x) - { - out = - arm_nn_mat_mult_kernel_s8_s16(filter_data, - buffer_a, - output_ch, - output_shift, - output_mult, - out_offset, - out_activation_min, - out_activation_max, - input_ch * kernel_y * kernel_x, - bias_data, - out); - - /* counter reset */ - two_column_buf = buffer_a; - } - } - } - - /* left-over because odd number of output pixels */ - if (two_column_buf != buffer_a) - { - const q7_t *ker_a = filter_data; - int i; - - for (i = 0; i < output_ch; i++) - { - /* Load the accumulator with bias first */ - q31_t sum = 0; - if (bias_data) - { - sum = bias_data[i]; - } - - /* Point to the beginning of the im2col buffer where the input is available as a rearranged column */ - const q15_t *ip_as_col = buffer_a; - - /* 4 multiply and accumulates are done in one loop. */ - uint16_t col_count = (input_ch * kernel_y * kernel_x) >> 2; - - while (col_count) - { - q31_t ker_a1, ker_a2; - q31_t ip_b1, ip_b2; - - ker_a = read_and_pad(ker_a, &ker_a1, &ker_a2); - - ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col); - sum = __SMLAD(ker_a1, ip_b1, sum); - ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col); - sum = __SMLAD(ker_a2, ip_b2, sum); - - col_count--; - } - /* Handle left over mac */ - col_count = input_ch * kernel_y * kernel_x & 0x3; - while (col_count) - { - q7_t ker_a1 = *ker_a++; - q15_t ip_b1 = *ip_as_col++; - sum += ker_a1 * ip_b1; - col_count--; - } - - sum = arm_nn_requantize(sum, output_mult[i], output_shift[i]); - sum += out_offset; - sum = MAX(sum, out_activation_min); - sum = MIN(sum, out_activation_max); - *out++ = (q7_t)sum; - } - } -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - (void)buffer_a; - int32_t i_out_ch, i_out_y, i_out_x, i_input_ch, i_ker_y, i_ker_x; - int32_t conv_out; - - for (i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - for (i_out_y = 0; i_out_y < output_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < output_x; i_out_x++) - { - conv_out = 0; - - const int32_t base_idx_y = stride_y * i_out_y - pad_y; - const int32_t base_idx_x = stride_x * i_out_x - pad_x; - - const int32_t ker_y_start = MAX(0, -base_idx_y); - const int32_t ker_x_start = MAX(0, -base_idx_x); - - const int32_t ker_y_end = MIN(kernel_y, input_y - base_idx_y); - const int32_t ker_x_end = MIN(kernel_x, input_x - base_idx_x); - - for (i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) - { - for (i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) - { - const int32_t in_row = base_idx_y + i_ker_y; - const int32_t in_col = base_idx_x + i_ker_x; - for (i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) - { - conv_out += - (input_data[(in_row * input_x + in_col) * input_ch + i_input_ch] + input_offset) * - filter_data[i_out_ch * input_ch * kernel_y * kernel_x + - (i_ker_y * kernel_x + i_ker_x) * input_ch + i_input_ch]; - } - } - } - if (bias_data) - { - conv_out += bias_data[i_out_ch]; - } - conv_out = arm_nn_requantize(conv_out, output_mult[i_out_ch], output_shift[i_out_ch]); - conv_out += out_offset; - conv_out = MAX(conv_out, out_activation_min); - conv_out = MIN(conv_out, out_activation_max); - output_data[i_out_ch + (i_out_y * output_x + i_out_x) * output_ch] = (int8_t)conv_out; - } - } - } -#endif - /* Advance to the next batch */ - input_data += (input_x * input_y * input_ch); - output_data += (output_x * output_y * output_ch); - } - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims* input_dims, - const cmsis_nn_dims* filter_dims) -{ -#if defined(ARM_MATH_DSP) - return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t); -#else - (void)input_dims; - (void)filter_dims; - return 0; -#endif -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c deleted file mode 100644 index c55688c..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_convolve_wrapper_s8.c - * Description: s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in cmsis-nn to perform the convolution. - * - * $Date: May 18, 2020 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" -#include "arm_nn_types.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/* - * Convolution layer - * - * Refer header file for details. - * - */ - -arm_status arm_convolve_wrapper_s8(const cmsis_nn_context* ctx, - const cmsis_nn_conv_params* conv_params, - const cmsis_nn_per_channel_quant_params* quant_params, - const cmsis_nn_dims* input_dims, - const q7_t *input_data, - const cmsis_nn_dims* filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims* bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims* output_dims, - q7_t *output_data) -{ - if ((conv_params->padding.w == 0) && - (conv_params->padding.h == 0) && - (input_dims->c % 4 == 0) && - (conv_params->stride.w == 1) && - (conv_params->stride.h == 1) && - (filter_dims->w == 1) && - (filter_dims->h == 1)) - { - return arm_convolve_1x1_s8_fast(ctx, - conv_params, - quant_params, - input_dims, - input_data, - filter_dims, - filter_data, - bias_dims, - bias_data, - output_dims, - output_data); - } - else if ((output_dims->h == 1) && - (input_dims->h == 1) && - (filter_dims->h == 1) && - (output_dims->w % 4 == 0) && - (input_dims->n == 1)) - { - return arm_convolve_1_x_n_s8(ctx, - conv_params, - quant_params, - input_dims, - input_data, - filter_dims, - filter_data, - bias_dims, - bias_data, - output_dims, - output_data); - } - else - { - return arm_convolve_s8(ctx, - conv_params, - quant_params, - input_dims, - input_data, - filter_dims, - filter_data, - bias_dims, - bias_data, - output_dims, - output_data); - } -} - -int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params* conv_params, - const cmsis_nn_dims* input_dims, - const cmsis_nn_dims* filter_dims, - const cmsis_nn_dims* output_dims) -{ - if ((conv_params->padding.w == 0) && - (conv_params->padding.h == 0) && - (input_dims->c % 4 == 0) && - (conv_params->stride.w == 1) && - (conv_params->stride.h == 1) && - (filter_dims->w == 1) && - (filter_dims->h == 1)) - { - return arm_convolve_1x1_s8_fast_get_buffer_size(input_dims); - } - else if ((output_dims->h == 1) && - (input_dims->h == 1) && - (filter_dims->h == 1) && - (output_dims->w % 4 == 0) && - (input_dims->n == 1)) - { - return arm_convolve_1_x_n_s8_get_buffer_size(input_dims, filter_dims); - } - else - { - return arm_convolve_s8_get_buffer_size(input_dims, filter_dims); - } -} - -/** - * @} end of NNConv group - */ - diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c deleted file mode 100644 index fad8bda..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_depthwise_conv_3x3_s8.c - * Description: Optimized s8 depthwise convolution function for channel - * multiplier of 1 and 3x3 kernel size. - * - * $Date: May 14, 2020 - * $Revision: V.2.0.0 - * - * Target Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnsupportfunctions.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/* - * Optimized s8 depthwise convolution function with constraint that - * in_channel == out_channel and kernel_x == kernel_y == 3 with pads at most 1 - * - * Refer prototype header file for details. - * - */ - -arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *kernel, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) -{ - (void)ctx; - (void)bias_dims; - - const int32_t input_x = input_dims->w; - const int32_t input_y = input_dims->h; - const int32_t input_ch = input_dims->c; - const int32_t output_ch = output_dims->c; - const int32_t pad_x = dw_conv_params->padding.w; - const int32_t pad_y = dw_conv_params->padding.h; - const int32_t stride_x = dw_conv_params->stride.w; - const int32_t stride_y = dw_conv_params->stride.h; - const int32_t *output_shift = quant_params->shift; - const int32_t *output_mult = quant_params->multiplier; - const int32_t output_x = output_dims->w; - const int32_t output_y = output_dims->h; - const int32_t output_offset = dw_conv_params->output_offset; - const int32_t input_offset = dw_conv_params->input_offset; - const int32_t output_activation_min = dw_conv_params->activation.min; - const int32_t output_activation_max = dw_conv_params->activation.max; - - /* Check input constraints input_ch == output_ch */ - if (input_ch != output_ch) - { - return ARM_MATH_SIZE_MISMATCH; - } - /* Check input constraints pad_x <= 1 */ - if (pad_x > 1 || filter_dims->w != 3 || filter_dims->h != 3) - { - return ARM_MATH_ARGUMENT_ERROR; - } - - for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) - { - for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) - { - int32_t in_ch = 0; - int32_t ker_w_start = MAX(0, -in_w); - - for (; in_ch <= (input_ch - 4); in_ch += 4) - { - int32_t out_buff0 = bias[in_ch + 0]; - int32_t out_buff1 = bias[in_ch + 1]; - int32_t out_buff2 = bias[in_ch + 2]; - int32_t out_buff3 = bias[in_ch + 3]; - - const int8_t *input_ptr = input + (in_h + ker_h_start) * (input_ch * input_x) + in_w * input_ch + in_ch; - const int8_t *kernel_ptr = kernel + ker_h_start * (input_ch * 3) + in_ch; - - for (int32_t ker_h = ker_h_start; ker_h < MIN(3, input_y - in_h); ++ker_h) - { - int32_t in_val = 0; - int32_t ker_val = 0; - - if (ker_w_start == 0) - { - in_val = arm_nn_read_q7x4(input_ptr); - ker_val = arm_nn_read_q7x4(kernel_ptr); - - out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; - out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); - out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); - out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); - } - - in_val = arm_nn_read_q7x4(input_ptr + input_ch); - ker_val = arm_nn_read_q7x4(kernel_ptr + input_ch); - - out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; - out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); - out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); - out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); - - if ((input_x - in_w) >= 3) - { - in_val = arm_nn_read_q7x4(input_ptr + (input_ch << 1)); - ker_val = arm_nn_read_q7x4(kernel_ptr + (input_ch << 1)); - - out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; - out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); - out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); - out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); - } - - input_ptr += (input_ch * input_x); - kernel_ptr += (input_ch * 3); - } - - out_buff0 = arm_nn_requantize(out_buff0, output_mult[in_ch + 0], output_shift[in_ch + 0]); - out_buff1 = arm_nn_requantize(out_buff1, output_mult[in_ch + 1], output_shift[in_ch + 1]); - out_buff2 = arm_nn_requantize(out_buff2, output_mult[in_ch + 2], output_shift[in_ch + 2]); - out_buff3 = arm_nn_requantize(out_buff3, output_mult[in_ch + 3], output_shift[in_ch + 3]); - - out_buff0 += output_offset; - out_buff1 += output_offset; - out_buff2 += output_offset; - out_buff3 += output_offset; - - out_buff0 = MIN(MAX(out_buff0, output_activation_min), output_activation_max); - out_buff1 = MIN(MAX(out_buff1, output_activation_min), output_activation_max); - out_buff2 = MIN(MAX(out_buff2, output_activation_min), output_activation_max); - out_buff3 = MIN(MAX(out_buff3, output_activation_min), output_activation_max); - - output[out_idx++] = (int8_t)out_buff0; - output[out_idx++] = (int8_t)out_buff1; - output[out_idx++] = (int8_t)out_buff2; - output[out_idx++] = (int8_t)out_buff3; - } - - // Leftover - for (; in_ch < input_ch; ++in_ch) - { - int32_t out_buff = bias[in_ch]; - - const int8_t *input_ptr = input + (in_h + ker_h_start) * (input_ch * input_x) + in_w * input_ch + in_ch; - const int8_t *kernel_ptr = kernel + ker_h_start * (input_ch * 3) + in_ch; - - for (int32_t ker_h = ker_h_start; ker_h < MIN(3, input_y - in_h); ++ker_h) - { - if (ker_w_start == 0) - { - out_buff += (*(input_ptr) + input_offset) * *(kernel_ptr); - } - - out_buff += (*(input_ptr + input_ch) + input_offset) * *(kernel_ptr + input_ch); - - if ((input_x - in_w) >= 3) - { - out_buff += (*(input_ptr + (input_ch << 1)) + input_offset) * *(kernel_ptr + (input_ch << 1)); - } - - input_ptr += (input_ch * input_x); - kernel_ptr += (input_ch * 3); - } - - out_buff = arm_nn_requantize(out_buff, output_mult[in_ch], output_shift[in_ch]); - out_buff += output_offset; - out_buff = MIN(MAX(out_buff, output_activation_min), output_activation_max); - output[out_idx++] = (int8_t)out_buff; - } - } - } - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c deleted file mode 100644 index 9f2ab11..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_depthwise_conv_s8.c - * Description: s8 version of depthwise convolution. - * - * $Date: May 14, 2020 - * $Revision: V.2.0.0 - * - * Target Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nnfunctions.h" -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -static void depthwise_conv_s8_mult_4(const int8_t *input, - const int32_t input_x, - const int32_t input_y, - const int32_t input_ch, - const int8_t *kernel, - const int32_t output_ch, - const int32_t ch_mult, - const int32_t kernel_x, - const int32_t kernel_y, - const int32_t pad_x, - const int32_t pad_y, - const int32_t stride_x, - const int32_t stride_y, - const int32_t *bias, - int8_t *output, - const int32_t *output_shift, - const int32_t *output_mult, - const int32_t output_x, - const int32_t output_y, - const int32_t output_offset, - const int32_t input_offset, - const int32_t output_activation_min, - const int32_t output_activation_max) -{ - for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) - { - for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) - { - for (int32_t in_ch = 0, out_ch = 0, ker_w_start = MAX(0, -in_w); out_ch < output_ch; ++in_ch, out_ch += ch_mult) - { - for (int mult_tile = 0; mult_tile < ch_mult; mult_tile += 4) - { - int32_t out_buff[4]; - - out_buff[0] = bias[out_ch + 0 + mult_tile]; - out_buff[1] = bias[out_ch + 1 + mult_tile]; - out_buff[2] = bias[out_ch + 2 + mult_tile]; - out_buff[3] = bias[out_ch + 3 + mult_tile]; - - for (int32_t ker_h = ker_h_start; ker_h < MIN(kernel_y, input_y - in_h); ++ker_h) - { - int32_t ker_idx = ker_h * (output_ch * kernel_x) + ker_w_start * output_ch + out_ch; - int32_t in_idx = (in_h + ker_h) * (input_ch * input_x) + in_w * input_ch + in_ch; - - for (int32_t ker_w = ker_w_start; ker_w < MIN(kernel_x, input_x - in_w); ++ker_w, ker_idx += output_ch) - { - int32_t in_val = input[in_idx + ker_w * input_ch] + input_offset; - out_buff[0] += in_val * kernel[ker_idx + 0 + mult_tile]; - out_buff[1] += in_val * kernel[ker_idx + 1 + mult_tile]; - out_buff[2] += in_val * kernel[ker_idx + 2 + mult_tile]; - out_buff[3] += in_val * kernel[ker_idx + 3 + mult_tile]; - } - } -#if defined(ARM_MATH_MVEI) - (void)out_idx; - int32x4_t res = vldrwq_s32(out_buff); - res = arm_requantize_mve_32x4(res, vldrwq_s32(&output_mult[out_ch + mult_tile]), vldrwq_s32(&output_shift[out_ch + mult_tile])); - res = vaddq_n_s32(res, output_offset); - - res = vmaxq_s32(res, vdupq_n_s32(output_activation_min)); - res = vminq_s32(res, vdupq_n_s32(output_activation_max)); - vstrbq_s32(output, res); - output += 4; -#else - out_buff[0] = arm_nn_requantize(out_buff[0], output_mult[out_ch + 0 + mult_tile], output_shift[out_ch + 0 + mult_tile]); - out_buff[1] = arm_nn_requantize(out_buff[1], output_mult[out_ch + 1 + mult_tile], output_shift[out_ch + 1 + mult_tile]); - out_buff[2] = arm_nn_requantize(out_buff[2], output_mult[out_ch + 2 + mult_tile], output_shift[out_ch + 2 + mult_tile]); - out_buff[3] = arm_nn_requantize(out_buff[3], output_mult[out_ch + 3 + mult_tile], output_shift[out_ch + 3 + mult_tile]); - - out_buff[0] += output_offset; - out_buff[1] += output_offset; - out_buff[2] += output_offset; - out_buff[3] += output_offset; - - out_buff[0] = MIN(MAX(out_buff[0], output_activation_min), output_activation_max); - out_buff[1] = MIN(MAX(out_buff[1], output_activation_min), output_activation_max); - out_buff[2] = MIN(MAX(out_buff[2], output_activation_min), output_activation_max); - out_buff[3] = MIN(MAX(out_buff[3], output_activation_min), output_activation_max); - - output[out_idx++] = (int8_t)out_buff[0]; - output[out_idx++] = (int8_t)out_buff[1]; - output[out_idx++] = (int8_t)out_buff[2]; - output[out_idx++] = (int8_t)out_buff[3]; - -#endif - } - } - } - } -} - -static void depthwise_conv_s8_generic(const q7_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_ch, - const q7_t *kernel, - const uint16_t output_ch, - const uint16_t ch_mult, - const uint16_t kernel_x, - const uint16_t kernel_y, - const uint16_t pad_x, - const uint16_t pad_y, - const uint16_t stride_x, - const uint16_t stride_y, - const int32_t *bias, - q7_t *output, - const int32_t *output_shift, - const int32_t *output_mult, - const uint16_t output_x, - const uint16_t output_y, - const int32_t output_offset, - const int32_t input_offset, - const int32_t output_activation_min, - const int32_t output_activation_max) -{ - (void)output_ch; - int i_out = 0; - for (int i_out_y = 0; i_out_y < output_y; i_out_y++) - { - const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; - for (int i_out_x = 0; i_out_x < output_x; i_out_x++) - { - const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; - for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) - { - for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) - { - const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; - int32_t acc_0; - /* Condition for kernel start dimension: (base_idx_ + ker__start) >= 0 */ - const int ker_y_start = MAX(0, -base_idx_y); - const int ker_x_start = MAX(0, -base_idx_x); - /* Condition for kernel end dimension: (base_idx_ + ker__end) < input_ */ - const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); - const int ker_x_end = MIN(kernel_x, input_x - base_idx_x); - acc_0 = bias[idx_out_ch]; - - for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) - { - const int32_t idx_y = base_idx_y + i_ker_y; - for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) - { - const int32_t idx_x = base_idx_x + i_ker_x; - int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; - int32_t ker_idx_0 = (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; - - acc_0 += (input[idx_0] + input_offset) * kernel[ker_idx_0]; - } - } - - /* Requantize and clamp output to provided range */ - acc_0 = arm_nn_requantize(acc_0, output_mult[idx_out_ch], output_shift[idx_out_ch]); - acc_0 += output_offset; - acc_0 = MAX(acc_0, output_activation_min); - acc_0 = MIN(acc_0, output_activation_max); - - output[i_out++] = acc_0; - } - } - } - } -} - -/* - * Basic s8 depthwise convolution function. - * - * Refer header file for details. - * Optimization using DSP extension is not available for the generic case where channel multiplier is > 1. - * - */ -arm_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *kernel, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) -{ - (void)dw_conv_params->dilation; - (void)ctx; - - if (dw_conv_params->ch_mult % 4 == 0) - { - depthwise_conv_s8_mult_4(input, input_dims->w, input_dims->h, input_dims->c, kernel, output_dims->c, dw_conv_params->ch_mult, filter_dims->w, filter_dims->h, - dw_conv_params->padding.w, dw_conv_params->padding.h, dw_conv_params->stride.w, dw_conv_params->stride.h, bias, output, - quant_params->shift, quant_params->multiplier, output_dims->w, output_dims->h, dw_conv_params->output_offset, - dw_conv_params->input_offset, dw_conv_params->activation.min, dw_conv_params->activation.max); - } - else - { - depthwise_conv_s8_generic(input, input_dims->w, input_dims->h, input_dims->c, kernel, output_dims->c, dw_conv_params->ch_mult, filter_dims->w, filter_dims->h, - dw_conv_params->padding.w, dw_conv_params->padding.h, dw_conv_params->stride.w, dw_conv_params->stride.h, bias, output, - quant_params->shift, quant_params->multiplier, output_dims->w, output_dims->h, dw_conv_params->output_offset, - dw_conv_params->input_offset, dw_conv_params->activation.min, dw_conv_params->activation.max); - } - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c deleted file mode 100644 index 2908b37..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c +++ /dev/null @@ -1,425 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_depthwise_conv_s8_opt.c - * Description: Optimized s8 depthwise separable convolution function for - * channel multiplier of 1. - * - * $Date: May 29, 2020 - * $Revision: V.2.0.1 - * - * Target Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnsupportfunctions.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/* - * Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel - * - * Refer prototype header file for details. - * - */ - -arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *kernel, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) -{ - const int32_t input_x = input_dims->w; - const int32_t input_y = input_dims->h; - const int32_t input_ch = input_dims->c; - const int32_t output_ch = output_dims->c; - const int32_t kernel_x = filter_dims->w; - const int32_t kernel_y = filter_dims->h; - const int32_t pad_x = dw_conv_params->padding.w; - const int32_t pad_y = dw_conv_params->padding.h; - const int32_t stride_x = dw_conv_params->stride.w; - const int32_t stride_y = dw_conv_params->stride.h; - const int32_t *output_shift = quant_params->shift; - const int32_t *output_mult = quant_params->multiplier; - const int32_t output_x = output_dims->w; - const int32_t output_y = output_dims->h; - const int32_t output_offset = dw_conv_params->output_offset; - const int32_t input_offset = dw_conv_params->input_offset; - const int32_t output_activation_min = dw_conv_params->activation.min; - const int32_t output_activation_max = dw_conv_params->activation.max; - q15_t *buffer_a = (q15_t *)ctx->buf; - - /* Check input constraints input_ch == output_ch */ - if (input_ch != output_ch) - { - return ARM_MATH_SIZE_MISMATCH; - } -#ifdef ARM_MATH_MVEI - (void)bias_dims; - /* Generate two columns from the input tensor */ - q7_t *lhs_buffer = (q7_t *)buffer_a; - q7_t *out = output; - int padded = 0; - int buffer_count = 0; - const int32_t kernel_size = kernel_x * kernel_y; - - /* This part implements the im2col function */ - for (int i_out_y = 0, base_idx_y = -pad_y; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) - { - for (int i_out_x = 0, base_idx_x = -pad_x; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) - { - for (int i_ker_y = base_idx_y; i_ker_y < base_idx_y + kernel_y; i_ker_y++) - { - for (int i_ker_x = base_idx_x; i_ker_x < base_idx_x + kernel_x; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) - { - arm_memset_q7(lhs_buffer, (int8_t)-input_offset, (uint32_t)input_ch); - padded = 1; - } - else - { - arm_memcpy_q7(lhs_buffer, input + (i_ker_y * input_x + i_ker_x) * input_ch, (uint32_t)input_ch); - } - lhs_buffer += input_ch; - } - } - buffer_count++; - - if (buffer_count == 4) - { - lhs_buffer = (q7_t *)buffer_a; - if (padded == 0) - { - out = arm_nn_depthwise_conv_nt_t_s8(lhs_buffer, - kernel, - input_offset, - input_ch, - output_shift, - output_mult, - output_offset, - output_activation_min, - output_activation_max, - kernel_size, - bias, - out); - } - else - { - out = arm_nn_depthwise_conv_nt_t_padded_s8(lhs_buffer, - kernel, - input_offset, - input_ch, - output_shift, - output_mult, - output_offset, - output_activation_min, - output_activation_max, - kernel_size, - bias, - out); - padded = 0; - } - buffer_count = 0; - } - } - } - - /* Handle left over buffers */ - lhs_buffer = (q7_t *)buffer_a; - - for (int i_buf = 0; i_buf < buffer_count; i_buf++) - { - int32_t loop_count = (input_ch + 3) / 4; - - int32_t num_ch_to_process = input_ch; - for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; - num_ch_to_process -= 4, offset += 4, i_loop_cnt++) - { - const int8_t *col_0 = lhs_buffer + (kernel_size * input_ch * i_buf) + offset; - const int8_t *row_0 = kernel + offset; - int32x4_t out_0 = vldrwq_s32(&bias[offset]); - - for (int i_ker = 0; i_ker < kernel_size; i_ker++) - { - const int32x4_t ker_0 = vldrbq_s32(row_0); - - int32x4_t ip_0 = vldrbq_s32(col_0); - ip_0 = vaddq_n_s32(ip_0, input_offset); - out_0 += vmulq_s32(ip_0, ker_0); - - col_0 += input_ch; - row_0 += input_ch; - } - - const int32x4_t mult = vldrwq_s32(&output_mult[offset]); - const int32x4_t shift = vldrwq_s32(&output_shift[offset]); - - out_0 = arm_requantize_mve_32x4(out_0, mult, shift); - out_0 = vaddq_n_s32(out_0, output_offset); - out_0 = vmaxq_s32(out_0, vdupq_n_s32(output_activation_min)); - out_0 = vminq_s32(out_0, vdupq_n_s32(output_activation_max)); - mve_pred16_t p = vctp32q((uint32_t)num_ch_to_process); - vstrbq_p_s32(out, out_0, p); - - out += 4; - } - - const int tail_ch = input_ch & 0x3; - if (tail_ch != 0) - { - out -= (4 - tail_ch); - } - } - -#elif defined(ARM_MATH_DSP) - (void)bias_dims; - /* Run the following code in cores using DSP extension */ - q15_t *const col_buffer_start = buffer_a; - q15_t *col_buffer = col_buffer_start; - const int32_t *const bias_start_pos = bias; - const q31_t *const out_mult_start_pos = output_mult; - const q31_t *const out_shift_start_pos = output_shift; - uint16_t row_count; - uint16_t row_shift; - - for (int i_out_y = 0; i_out_y < output_y; i_out_y++) - { - const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; - for (int i_out_x = 0; i_out_x < output_x; i_out_x++) - { - const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; - - /* Out of bounds is only considered for the y axis as it provides a contiguous zero'ing opportunity than along - the x axis */ - const int ker_y_start = MAX(0, -base_idx_y); - /* Condition for kernel end dimension: (base_idx_y + ker_y_end) < input_y */ - const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); - - int32_t index = 0; - if (ker_y_start != 0) - { - memset(&col_buffer[index], 0, (kernel_x * input_ch) * ker_y_start * sizeof(q15_t)); - index += (kernel_x * input_ch) * ker_y_start; - } - - for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) - { - const int32_t idx_y = base_idx_y + i_ker_y; - - for (int i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) - { - const int32_t idx_x = base_idx_x + i_ker_x; - if (idx_x < 0 || idx_x >= input_x) - { - memset(&col_buffer[index], 0, input_ch * sizeof(q15_t)); - } - else - { - arm_q7_to_q15_with_offset((q7_t *)input + (idx_y * input_x + idx_x) * input_ch, &col_buffer[index], input_ch, input_offset); - } - index += input_ch; - } - } - - const int diff = kernel_y - ker_y_end; - if (diff != 0) - { - memset(&col_buffer[index], 0, (kernel_x * input_ch) * diff * sizeof(q15_t)); - } - - row_count = output_ch / 4; - row_shift = 0; - bias = bias_start_pos; - output_mult = out_mult_start_pos; - output_shift = out_shift_start_pos; - - while (row_count) - { - q31_t sum = *bias++; - q31_t sum_2 = *bias++; - q31_t sum_3 = *bias++; - q31_t sum_4 = *bias++; - - uint16_t col_count = (kernel_x * kernel_y) / 2; - q15_t *col_pos = col_buffer_start + row_shift; - const q7_t *row_pos = kernel + row_shift; - row_shift += 4; - - while (col_count) - { - /* General idea is to read 4 + 4 (input, kernel) pair and re-arrange them in the right order to - use in a SMLAD instruction . One run of this loop produces 4 partial outputs with 8 MACs. */ - /* Note: variable names can be improved here to align with rows and columns. */ - q31_t ip_a1, ip_a2, ip_b1, ip_b2, op_a, op_b, op_c; - /* Read 4 weights */ - ip_b1 = arm_nn_read_q7x4(row_pos); - ip_a1 = arm_nn_read_q7x4(row_pos + input_ch); - op_a = arm_nn_read_q15x2(col_pos); - op_b = arm_nn_read_q15x2(col_pos + input_ch); - - ip_a2 = __SXTB16(ip_b1); - ip_b1 = __SXTB16(__ROR(ip_b1, 8)); - - ip_b2 = __SXTB16(ip_a1); - ip_a1 = __SXTB16(__ROR(ip_a1, 8)); - - op_c = __PKHBT(op_b, op_a, 16); - op_a = __PKHTB(op_b, op_a, 16); - op_b = __PKHBT(ip_b2, ip_a2, 16); - sum = __SMLAD(op_c, op_b, sum); - - op_b = __PKHBT(ip_b1, ip_a1, 16); - sum_2 = __SMLAD(op_a, op_b, sum_2); - - op_a = arm_nn_read_q15x2(col_pos + 2); - op_b = arm_nn_read_q15x2(col_pos + input_ch + 2); - - op_c = __PKHBT(op_b, op_a, 16); - op_a = __PKHTB(op_b, op_a, 16); - op_b = __PKHTB(ip_a2, ip_b2, 16); - sum_3 = __SMLAD(op_c, op_b, sum_3); - - op_b = __PKHTB(ip_a1, ip_b1, 16); - sum_4 = __SMLAD(op_a, op_b, sum_4); - - row_pos += input_ch << 1; - col_pos += input_ch << 1; - col_count--; - } - - col_count = (kernel_x * kernel_y) & 0x1; - while (col_count) - { - sum += row_pos[0] * col_pos[0]; - sum_2 += row_pos[1] * col_pos[1]; - sum_3 += row_pos[2] * col_pos[2]; - sum_4 += row_pos[3] * col_pos[3]; - - row_pos += input_ch; - col_pos += input_ch; - - col_count--; - } - sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); - sum += output_offset; - sum = MAX(sum, output_activation_min); - sum = MIN(sum, output_activation_max); - *output++ = (q7_t)sum; - - sum_2 = arm_nn_requantize(sum_2, *output_mult++, *output_shift++); - sum_2 += output_offset; - sum_2 = MAX(sum_2, output_activation_min); - sum_2 = MIN(sum_2, output_activation_max); - *output++ = (q7_t)sum_2; - sum_3 = arm_nn_requantize(sum_3, *output_mult++, *output_shift++); - sum_3 += output_offset; - sum_3 = MAX(sum_3, output_activation_min); - sum_3 = MIN(sum_3, output_activation_max); - *output++ = (q7_t)sum_3; - - sum_4 = arm_nn_requantize(sum_4, *output_mult++, *output_shift++); - sum_4 += output_offset; - sum_4 = MAX(sum_4, output_activation_min); - sum_4 = MIN(sum_4, output_activation_max); - *output++ = (q7_t)sum_4; - - row_count--; - } - - row_count = output_ch & 0x3; - while (row_count) - { - q15_t *col_pos = col_buffer_start + row_shift; - const q7_t *row_pos = kernel + row_shift; - q31_t sum = *bias++; - const uint16_t col_count = (kernel_x * kernel_y); - row_shift += 1; - - for (int i = 0; i < col_count; i++) - { - sum += row_pos[i * input_ch] * col_pos[i * input_ch]; - } - sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); - sum += output_offset; - sum = MAX(sum, output_activation_min); - sum = MIN(sum, output_activation_max); - *output++ = (q7_t)sum; - - row_count--; - } - - // clear counter and pointers - col_buffer = col_buffer_start; - } - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - return arm_depthwise_conv_s8(ctx, - dw_conv_params, - quant_params, - input_dims, - input, - filter_dims, - kernel, - bias_dims, - bias, - output_dims, - output); -#endif /* ARM_MATH_MVEI | ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims *input_dims, - const cmsis_nn_dims *filter_dims) -{ -#if defined(ARM_MATH_MVEI) - /* The + 4 accounts for out of bounds read of the lhs buffers in the *_nt_t_* functions. */ - return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t) + 4; -#elif defined(ARM_MATH_DSP) - return (input_dims->c * filter_dims->w * filter_dims->h) * sizeof(int16_t); -#else - (void)input_dims; - (void)filter_dims; - return 0; -#endif -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c deleted file mode 100644 index 8f374aa..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_depthwise_conv_u8_basic_ver1.c - * Description: u8 depthwise convolution function - * - * $Date: May 29, 2020 - * $Revision: V.1.1.0 - * - * Target : Cortex-M CPUs - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -static void depthwise_conv_u8_mult_4(const uint8_t *input, - const int32_t input_x, - const int32_t input_y, - const int32_t input_ch, - const uint8_t *kernel, - const int32_t output_ch, - const int32_t ch_mult, - const int32_t kernel_x, - const int32_t kernel_y, - const int32_t pad_x, - const int32_t pad_y, - const int32_t stride_x, - const int32_t stride_y, - const int32_t *bias, - uint8_t *output, - const int32_t output_shift, - const int32_t output_mult, - const int32_t output_x, - const int32_t output_y, - const int32_t output_offset, - const int32_t input_offset, - const int32_t filter_offset, - const int32_t output_activation_min, - const int32_t output_activation_max) -{ - for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) - { - for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) - { - for (int32_t in_ch = 0, out_ch = 0, ker_w_start = MAX(0, -in_w); out_ch < output_ch; ++in_ch, out_ch += ch_mult) - { - for (int mult_tile = 0; mult_tile < ch_mult; mult_tile += 4) - { - int32_t out_buff[4]; - - out_buff[0] = 0; - out_buff[1] = 0; - out_buff[2] = 0; - out_buff[3] = 0; - - for (int32_t ker_h = ker_h_start; ker_h < MIN(kernel_y, input_y - in_h); ++ker_h) - { - int32_t ker_idx = ker_h * (output_ch * kernel_x) + ker_w_start * output_ch + out_ch; - int32_t in_idx = (in_h + ker_h) * (input_ch * input_x) + in_w * input_ch + in_ch; - - for (int32_t ker_w = ker_w_start; ker_w < MIN(kernel_x, input_x - in_w); ++ker_w, ker_idx += output_ch) - { - int32_t in_val = input[in_idx + ker_w * input_ch] + input_offset; - out_buff[0] += in_val * (kernel[ker_idx + 0 + mult_tile] + filter_offset); - out_buff[1] += in_val * (kernel[ker_idx + 1 + mult_tile] + filter_offset); - out_buff[2] += in_val * (kernel[ker_idx + 2 + mult_tile] + filter_offset); - out_buff[3] += in_val * (kernel[ker_idx + 3 + mult_tile] + filter_offset); - } - } - - if (bias != NULL) - { - out_buff[0] += bias[out_ch + 0 + mult_tile]; - out_buff[1] += bias[out_ch + 1 + mult_tile]; - out_buff[2] += bias[out_ch + 2 + mult_tile]; - out_buff[3] += bias[out_ch + 3 + mult_tile]; - } - out_buff[0] = arm_nn_requantize(out_buff[0], output_mult, output_shift); - out_buff[1] = arm_nn_requantize(out_buff[1], output_mult, output_shift); - out_buff[2] = arm_nn_requantize(out_buff[2], output_mult, output_shift); - out_buff[3] = arm_nn_requantize(out_buff[3], output_mult, output_shift); - - out_buff[0] += output_offset; - out_buff[1] += output_offset; - out_buff[2] += output_offset; - out_buff[3] += output_offset; - - out_buff[0] = MIN(MAX(out_buff[0], output_activation_min), output_activation_max); - out_buff[1] = MIN(MAX(out_buff[1], output_activation_min), output_activation_max); - out_buff[2] = MIN(MAX(out_buff[2], output_activation_min), output_activation_max); - out_buff[3] = MIN(MAX(out_buff[3], output_activation_min), output_activation_max); - - output[out_idx++] = (uint8_t)out_buff[0]; - output[out_idx++] = (uint8_t)out_buff[1]; - output[out_idx++] = (uint8_t)out_buff[2]; - output[out_idx++] = (uint8_t)out_buff[3]; - } - } - } - } -} - -static void depthwise_conv_u8_generic(const uint8_t *input, - const int32_t input_x, - const int32_t input_y, - const int32_t input_ch, - const uint8_t *kernel, - const int32_t output_ch, - const int32_t ch_mult, - const int32_t kernel_x, - const int32_t kernel_y, - const int32_t pad_x, - const int32_t pad_y, - const int32_t stride_x, - const int32_t stride_y, - const int32_t *bias, - uint8_t *output, - const int32_t output_shift, - const int32_t output_mult, - const int32_t output_x, - const int32_t output_y, - const int32_t output_offset, - const int32_t input_offset, - const int32_t filter_offset, - const int32_t output_activation_min, - const int32_t output_activation_max) -{ - (void)output_ch; - int i_out = 0; - for (int i_out_y = 0; i_out_y < output_y; i_out_y++) - { - const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; - for (int i_out_x = 0; i_out_x < output_x; i_out_x++) - { - const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; - for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) - { - for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) - { - const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; - int32_t acc_0; - /* Condition for kernel start dimension: (base_idx_ + ker__start) >= 0 */ - const int ker_y_start = MAX(0, -base_idx_y); - const int ker_x_start = MAX(0, -base_idx_x); - /* Condition for kernel end dimension: (base_idx_ + ker__end) < input_ */ - const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); - const int ker_x_end = MIN(kernel_x, input_x - base_idx_x); - acc_0 = 0; - - for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) - { - const int32_t idx_y = base_idx_y + i_ker_y; - for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) - { - const int32_t idx_x = base_idx_x + i_ker_x; - int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; - int32_t ker_idx_0 = (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; - - acc_0 += (input[idx_0] + input_offset) * (kernel[ker_idx_0] + filter_offset); - } - } - if (bias != NULL) - { - acc_0 += bias[idx_out_ch]; - } - - /* Requantize and clamp output to provided range */ - acc_0 = arm_nn_requantize(acc_0, output_mult, output_shift); - acc_0 += output_offset; - acc_0 = MAX(acc_0, output_activation_min); - acc_0 = MIN(acc_0, output_activation_max); - - output[i_out++] = acc_0; - } - } - } - } -} - -/** - * @brief uint8 depthwise convolution function with asymmetric quantization - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_ch Channels in input tensor - * @param[in] kernel Pointer to kernel weights - * @param[in] kernel_x Width of kernel - * @param[in] kernel_y Height of kernel - * @param[in] ch_mult Number of channel multiplier - * @param[in] pad_x Padding sizes x - * @param[in] pad_y Padding sizes y - * @param[in] stride_x Convolution stride along the width - * @param[in] stride_y Convolution stride along the height - * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement. - * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement. - * @param[in] bias Pointer to optional bias values. If no bias is - * availble, NULL is expected - * @param[in] input_offset Input tensor zero offset - * @param[in] filter_offset Kernel tensor zero offset - * @param[in] output_offset Output tensor zero offset - * @param[in,out] output Pointer to output tensor - * @param[in] output_x Width of output tensor - * @param[in] output_y Height of output tensor - * @param[in] output_activation_min Minimum value to clamp the output to. Range : {0, 255} - * @param[in] output_activation_max Minimum value to clamp the output to. Range : {0, 255} - * @param[in] output_shift Amount of right-shift for output - * @param[in] output_mult Output multiplier for requantization - * @return The function returns one of the following - * ARM_MATH_SIZE_MISMATCH - Not supported dimension of tensors - * ARM_MATH_SUCCESS - Successful operation - * ARM_MATH_ARGUMENT_ERROR - Implementation not available - * - * - */ - -arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_ch, - const uint8_t *kernel, - const uint16_t kernel_x, - const uint16_t kernel_y, - const int16_t ch_mult, - const int16_t pad_x, - const int16_t pad_y, - const int16_t stride_x, - const int16_t stride_y, - const int16_t dilation_x, - const int16_t dilation_y, - const int32_t *bias, - const int32_t input_offset, - const int32_t filter_offset, - const int32_t output_offset, - uint8_t *output, - const uint16_t output_x, - const uint16_t output_y, - const int32_t output_activation_min, - const int32_t output_activation_max, - const int32_t output_shift, - const int32_t output_mult) -{ - (void)dilation_x; - (void)dilation_y; - - if (ch_mult % 4 == 0) - { - depthwise_conv_u8_mult_4(input, input_x, input_y, input_ch, kernel, ch_mult * input_ch, ch_mult, - kernel_x, kernel_y, pad_x, pad_y, stride_x, stride_y, bias, output, - output_shift, output_mult, output_x, output_y, output_offset, input_offset, - filter_offset, output_activation_min, output_activation_max); - } - else - { - depthwise_conv_u8_generic(input, input_x, input_y, input_ch, kernel, ch_mult * input_ch, ch_mult, - kernel_x, kernel_y, pad_x, pad_y, stride_x, stride_y, bias, - output, output_shift, output_mult, output_x, output_y, output_offset, - input_offset, filter_offset, output_activation_min, output_activation_max); - } - - /* Return to application */ - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c deleted file mode 100644 index eed2036..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_depthwise_conv_wrapper_s8.c - * Description: Wrapper API to select appropriate depthwise conv API based - * on dimensions. - * - * $Date: May 29, 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nnfunctions.h" -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/* - * s8 Depthwise conv wrapper function - * - * Refer header file for details. - * - */ -arm_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *filter, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) -{ - arm_status status = ARM_MATH_SUCCESS; - if (1 == dw_conv_params->ch_mult) - { -#if !defined(ARM_MATH_MVEI) - if ((filter_dims->w == 3) && (filter_dims->h == 3) && (dw_conv_params->padding.h <= 1)) - { - status = arm_depthwise_conv_3x3_s8(ctx, - dw_conv_params, - quant_params, - input_dims, - input, - filter_dims, - filter, - bias_dims, - bias, - output_dims, - output); - } - else -#endif - { - status = arm_depthwise_conv_s8_opt(ctx, - dw_conv_params, - quant_params, - input_dims, - input, - filter_dims, - filter, - bias_dims, - bias, - output_dims, - output); - } - } - else - { - status = arm_depthwise_conv_s8(ctx, - dw_conv_params, - quant_params, - input_dims, - input, - filter_dims, - filter, - bias_dims, - bias, - output_dims, - output); - } - - /* Return to application */ - return status; -} - -int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_dims *input_dims, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims) -{ - (void)dw_conv_params; - int32_t size = 0; - - if (input_dims->c == output_dims->c) - { - size = arm_depthwise_conv_s8_opt_get_buffer_size(input_dims, filter_dims); - } - - return size; -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c deleted file mode 100644 index a7eaf93..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_depthwise_separable_conv_HWC_q7.c - * Description: Q7 depthwise separable convolution function - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/** - * @brief Q7 depthwise separable convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in equals ch_im_out - * - * Implementation: - * There are 3 nested loop here: - * Inner loop: calculate each output value with MAC instruction over an accumulator - * Mid loop: loop over different output channel - * Outer loop: loop over different output (x, y) - */ - -arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out, - q15_t * bufferA, - q7_t * bufferB) -{ - (void)bufferB; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - int16_t i_out_y, i_out_x; - int16_t i_ker_y, i_ker_x; - q7_t *colBuffer = (q7_t *) bufferA; - q7_t *pBuffer = colBuffer; - const q7_t *pBias = bias; - q7_t *pOut = Im_out; - uint16_t rowCnt; - uint16_t row_shift; - - /* do some checking here, basically ch_im_in == ch_im_out */ - if (ch_im_in != ch_im_out) - { - return ARM_MATH_SIZE_MISMATCH; - } - - for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) - { - /* we first do im2col here */ - for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) - { - for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) - { - /* arm_fill_q7(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, ch_im_in); - } else - { - /* arm_copy_q7((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ - memcpy(pBuffer, (q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - /* we will do the computation here for each channel */ - rowCnt = ch_im_out >> 2; - row_shift = 0; - pBias = bias; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = (dim_kernel * dim_kernel) >> 1; - q7_t *pB = colBuffer + row_shift; - const q7_t *pA = wt + row_shift; - row_shift += 4; - -#ifdef USE_INTRINSIC - -#ifndef ARM_MATH_BIG_ENDIAN - - while (colCnt) - { - q31_t inA1, inA2, inB1, inB2, opA, opB; - - inB1 = arm_nn_read_q7x4(pB); - pB += ch_im_in; - opB = arm_nn_read_q7x4(pB); - pB += ch_im_in; - inB2 = __PKHTB(opB, inB1, 16); - inB1 = __PKHBT(inB1, opB, 16); - inA1 = arm_nn_read_q7x4(pA); - pA += ch_im_in; - opB = arm_nn_read_q7x4(pA); - pA += ch_im_in; - inA2 = __PKHTB(opB, inA1, 16); - inA1 = __PKHBT(inA1, opB, 16); - opA = __SXTB16(inA1); - opB = __SXTB16(inB1); - sum = __SMLAD(opA, opB, sum); - opA = __SXTB16(__ROR(inA1, 8)); - opB = __SXTB16(__ROR(inB1, 8)); - sum2 = __SMLAD(opA, opB, sum2); - opA = __SXTB16(inA2); - opB = __SXTB16(inB2); - sum3 = __SMLAD(opA, opB, sum3); - opA = __SXTB16(__ROR(inA2, 8)); - opB = __SXTB16(__ROR(inB2, 8)); - sum4 = __SMLAD(opA, opB, sum4); - colCnt--; - } -#else - - while (colCnt) - { - q31_t inA1, inA2, inB1, inB2, opA, opB; - - inB1 = arm_nn_read_q7x4(pB); - pB += ch_im_in; - opB = arm_nn_read_q7x4(pB); - pB += ch_im_in; - inB2 = __PKHBT(opB, inB1, 16); - inB1 = __PKHTB(inB1, opB, 16); - inA1 = arm_nn_read_q7x4(pA); - pA += ch_im_in; - opB = arm_nn_read_q7x4(pA); - pA += ch_im_in; - inA2 = __PKHBT(opB, inA1, 16); - inA1 = __PKHTB(inA1, opB, 16); - opA = __SXTB16(inA1); - opB = __SXTB16(inB1); - sum2 = __SMLAD(opA, opB, sum2); - opA = __SXTB16(__ROR(inA1, 8)); - opB = __SXTB16(__ROR(inB1, 8)); - sum = __SMLAD(opA, opB, sum); - opA = __SXTB16(inA2); - opB = __SXTB16(inB2); - sum4 = __SMLAD(opA, opB, sum4); - opA = __SXTB16(__ROR(inA2, 8)); - opB = __SXTB16(__ROR(inB2, 8)); - sum3 = __SMLAD(opA, opB, sum3); - colCnt--; - } - -#endif /* ARM_MATH_BIG_ENDIAN */ - -#else - -#ifndef ARM_MATH_BIG_ENDIAN - /* - * r0 r1 r2 r3 r4 r5 - * inA1, inA2, inB1, inB2, opA, opB - */ - - asm volatile ("COL_LOOP_%=:\n" - "ldr.w r2, [%[pB], #0]\n" - "add.w %[pB], %[pB], %[ch_im_in]\n" - "ldr.w r5, [%[pB], #0]\n" - "add.w %[pB], %[pB], %[ch_im_in]\n" - "pkhtb r3, r5, r2, ASR #16\n" - "pkhbt r2, r2, r5, LSL #16\n" - "ldr.w r0, [%[pA], #0]\n" - "add.w %[pA], %[pA], %[ch_im_in]\n" - "ldr.w r5, [%[pA], #0]\n" - "add.w %[pA], %[pA], %[ch_im_in]\n" - "pkhtb r1, r5, r0, ASR #16\n" - "pkhbt r0, r0, r5, LSL #16\n" - "sxtb16 r4, r0\n" - "sxtb16 r5, r2\n" - "smlad %[sum], r4, r5, %[sum]\n" - "mov.w r4, r0, ror #8\n" - "mov.w r5, r2, ror #8\n" - "sxtb16 r4, r4\n" - "sxtb16 r5, r5\n" - "smlad %[sum2], r4, r5, %[sum2]\n" - "sxtb16 r4, r1\n" - "sxtb16 r5, r3\n" - "smlad %[sum3], r4, r5, %[sum3]\n" - "mov.w r4, r1, ror #8\n" - "mov.w r5, r3, ror #8\n" - "sxtb16 r4, r4\n" - "sxtb16 r5, r5\n" - "smlad %[sum4], r4, r5, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n":[sum] - "+r"(sum),[sum2] "+r"(sum2), - [sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB), - [pA] "+r"(pA):[colCnt] - "r"(colCnt),[ch_im_in] "r"(ch_im_in):"r0", "r1", "r2", "r3", "r4", "r5"); -#else - /* - * r0 r1 r2 r3 r4 r5 - * inA1, inA2, inB1, inB2, opA, opB - */ - asm volatile ("COL_LOOP_%=:\n" - "ldr.w r2, [%[pB], #0]\n" - "add.w %[pB], %[pB], %[ch_im_in]\n" - "ldr.w r5, [%[pB], #0]\n" - "add.w %[pB], %[pB], %[ch_im_in]\n" - "pkhbt r3, r5, r2, LSL #16\n" - "pkhtb r2, r2, r5, ASR #16\n" - "ldr.w r0, [%[pA], #0]\n" - "add.w %[pA], %[pA], %[ch_im_in]\n" - "ldr.w r5, [%[pA], #0]\n" - "add.w %[pA], %[pA], %[ch_im_in]\n" - "pkhbt r1, r5, r0, LSL #16\n" - "pkhtb r0, r0, r5, ASR #16\n" - "sxtb16 r4, r0\n" - "sxtb16 r5, r2\n" - "smlad %[sum2], r4, r5, %[sum2]\n" - "mov.w r4, r0, ror #8\n" - "mov.w r5, r2, ror #8\n" - "sxtb16 r4, r4\n" - "sxtb16 r5, r5\n" - "smlad %[sum], r4, r5, %[sum]\n" - "sxtb16 r4, r1\n" - "sxtb16 r5, r3\n" - "smlad %[sum4], r4, r5, %[sum4]\n" - "mov.w r4, r1, ror #8\n" - "mov.w r5, r3, ror #8\n" - "sxtb16 r4, r4\n" - "sxtb16 r5, r5\n" - "smlad %[sum3], r4, r5, %[sum3]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n":[sum] - "+r"(sum),[sum2] "+r"(sum2), - [sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB), - [pA] "+r"(pA):[colCnt] - "r"(colCnt),[ch_im_in] "r"(ch_im_in):"r0", "r1", "r2", "r3", "r4", "r5"); - -#endif /* ARM_MATH_BIG_ENDIAN */ - -#endif /* USE_INTRINSIC */ - - colCnt = (dim_kernel * dim_kernel) & 0x1; - while (colCnt) - { - union arm_nnword inA, inB; - inA.word = arm_nn_read_q7x4(pA); - pA += ch_im_in; - inB.word = arm_nn_read_q7x4(pB); - pB += ch_im_in; - sum += inA.bytes[0] * inB.bytes[0]; - sum2 += inA.bytes[1] * inB.bytes[1]; - sum3 += inA.bytes[2] * inB.bytes[2]; - sum4 += inA.bytes[3] * inB.bytes[3]; - colCnt--; - } - - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - *pOut++ = (q7_t) __SSAT((sum2 >> out_shift), 8); - *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); - *pOut++ = (q7_t) __SSAT((sum4 >> out_shift), 8); - - rowCnt--; - } - - rowCnt = ch_im_out & 0x3; - while (rowCnt) - { - q7_t *pB = colBuffer + row_shift; - const q7_t *pA = wt + row_shift; - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - uint16_t colCnt = (dim_kernel * dim_kernel); - - row_shift += 1; - - while (colCnt) - { - q7_t A1 = *pA; - q7_t B1 = *pB; - pA += ch_im_in; - pB += ch_im_in; - sum += A1 * B1; - - colCnt--; - } - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - rowCnt--; - } - - /* clear counter and pointers */ - pBuffer = colBuffer; - } - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - int i_out_y, i_out_x, i_ch_out, i_ker_x, i_ker_y; - int conv_out; - - /* do some checking here, basically ch_im_in == ch_im_out */ - if (ch_im_in != ch_im_out) - { - return ARM_MATH_SIZE_MISMATCH; - } - - for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) - { - for (i_ch_out = 0; i_ch_out < ch_im_out; i_ch_out++) - { - // for each output - conv_out = ((q31_t)(bias[i_ch_out]) << bias_shift) + NN_ROUND(out_shift); - for (i_ker_y = 0; i_ker_y < dim_kernel; i_ker_y++) - { - for (i_ker_x = 0; i_ker_x < dim_kernel; i_ker_x++) - { - int in_row = stride * i_out_y + i_ker_y - padding; - int in_col = stride * i_out_x + i_ker_x - padding; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) - { - conv_out += - Im_in[(in_row * - dim_im_in + - in_col) * - ch_im_in + - i_ch_out] * wt[(i_ker_y * dim_kernel + i_ker_x) * ch_im_out + i_ch_out]; - } - } - } - Im_out[(i_out_y * dim_im_out + - i_out_x) * ch_im_out + i_ch_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); - } - } - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return ARM_MATH_SUCCESS; - -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c deleted file mode 100644 index 53143da..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_depthwise_separable_conv_HWC_q7_nonsquare.c - * Description: Q7 depthwise separable convolution function (non-square shape) - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup NNConv - * @{ - */ - -/** - * @brief Q7 depthwise separable convolution function (non-square shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding sizes x - * @param[in] padding_y padding sizes y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is equal to ch_im_out - * - */ - -arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t * Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t * wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t * bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t * Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t * bufferA, - q7_t * bufferB) -{ - - (void)bufferB; - -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - -/* - * Implementation: - * There are 3 nested loop here: - * Inner loop: calculate each output value with MAC instruction over an accumulator - * Mid loop: loop over different output channel - * Outer loop: loop over different output (x, y) - * - */ - - int16_t i_out_y, i_out_x; - int16_t i_ker_y, i_ker_x; - q7_t *colBuffer = (q7_t *) bufferA; - q7_t *pBuffer = colBuffer; - const q7_t *pBias = bias; - q7_t *pOut = Im_out; - uint16_t rowCnt; - uint16_t row_shift; - - /* do some checking here, basically ch_im_in == ch_im_out */ - if (ch_im_in != ch_im_out) - { - return ARM_MATH_SIZE_MISMATCH; - } - - for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) - { - /* we first do im2col here */ - for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; - i_ker_y++) - { - for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; - i_ker_x++) - { - if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) - { - /* arm_fill_q7(0, pBuffer, ch_im_in); */ - memset(pBuffer, 0, ch_im_in); - } else - { - /* arm_copy_q7((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ - memcpy(pBuffer, (q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, ch_im_in); - } - pBuffer += ch_im_in; - } - } - - /* we will do the computation here for each channel */ - rowCnt = ch_im_out >> 2; - row_shift = 0; - pBias = bias; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = (dim_kernel_x * dim_kernel_y) >> 1; - q7_t *pB = colBuffer + row_shift; - const q7_t *pA = wt + row_shift; - row_shift += 4; - -#ifdef USE_INTRINSIC - -#ifndef ARM_MATH_BIG_ENDIAN - - while (colCnt) - { - q31_t inA1, inA2, inB1, inB2, opA, opB; - - inB1 = arm_nn_read_q7x4(pB); - pB += ch_im_in; - opB = arm_nn_read_q7x4(pB); - pB += ch_im_in; - inB2 = __PKHTB(opB, inB1, 16); - inB1 = __PKHBT(inB1, opB, 16); - inA1 = arm_nn_read_q7x4(pA); - pA += ch_im_in; - opB = arm_nn_read_q7x4(pA); - pA += ch_im_in; - inA2 = __PKHTB(opB, inA1, 16); - inA1 = __PKHBT(inA1, opB, 16); - opA = __SXTB16(inA1); - opB = __SXTB16(inB1); - sum = __SMLAD(opA, opB, sum); - opA = __SXTB16(__ROR(inA1, 8)); - opB = __SXTB16(__ROR(inB1, 8)); - sum2 = __SMLAD(opA, opB, sum2); - opA = __SXTB16(inA2); - opB = __SXTB16(inB2); - sum3 = __SMLAD(opA, opB, sum3); - opA = __SXTB16(__ROR(inA2, 8)); - opB = __SXTB16(__ROR(inB2, 8)); - sum4 = __SMLAD(opA, opB, sum4); - colCnt--; - } -#else - - while (colCnt) - { - q31_t inA1, inA2, inB1, inB2, opA, opB; - - inB1 = arm_nn_read_q7x4(pB); - pB += ch_im_in; - opB = arm_nn_read_q7x4(pB); - pB += ch_im_in; - inB2 = __PKHBT(opB, inB1, 16); - inB1 = __PKHTB(inB1, opB, 16); - inA1 = arm_nn_read_q7x4(pA); - pA += ch_im_in; - opB = arm_nn_read_q7x4(pA); - pA += ch_im_in; - inA2 = __PKHBT(opB, inA1, 16); - inA1 = __PKHTB(inA1, opB, 16); - opA = __SXTB16(inA1); - opB = __SXTB16(inB1); - sum2 = __SMLAD(opA, opB, sum2); - opA = __SXTB16(__ROR(inA1, 8)); - opB = __SXTB16(__ROR(inB1, 8)); - sum = __SMLAD(opA, opB, sum); - opA = __SXTB16(inA2); - opB = __SXTB16(inB2); - sum4 = __SMLAD(opA, opB, sum4); - opA = __SXTB16(__ROR(inA2, 8)); - opB = __SXTB16(__ROR(inB2, 8)); - sum3 = __SMLAD(opA, opB, sum3); - colCnt--; - } - -#endif /* ARM_MATH_BIG_ENDIAN */ - -#else - -#ifndef ARM_MATH_BIG_ENDIAN - // r0 r1 r2 r3 r4 r5 - // inA1, inA2, inB1, inB2, opA, opB - asm volatile ("COL_LOOP:\n" - "ldr.w r2, [%[pB], #0]\n" - "add.w %[pB], %[pB], %[ch_im_in]\n" - "ldr.w r5, [%[pB], #0]\n" - "add.w %[pB], %[pB], %[ch_im_in]\n" - "pkhtb r3, r5, r2, ASR #16\n" - "pkhbt r2, r2, r5, LSL #16\n" - "ldr.w r0, [%[pA], #0]\n" - "add.w %[pA], %[pA], %[ch_im_in]\n" - "ldr.w r5, [%[pA], #0]\n" - "add.w %[pA], %[pA], %[ch_im_in]\n" - "pkhtb r1, r5, r0, ASR #16\n" - "pkhbt r0, r0, r5, LSL #16\n" - "sxtb16 r4, r0\n" - "sxtb16 r5, r2\n" - "smlad %[sum], r4, r5, %[sum]\n" - "mov.w r4, r0, ror #8\n" - "mov.w r5, r2, ror #8\n" - "sxtb16 r4, r4\n" - "sxtb16 r5, r5\n" - "smlad %[sum2], r4, r5, %[sum2]\n" - "sxtb16 r4, r1\n" - "sxtb16 r5, r3\n" - "smlad %[sum3], r4, r5, %[sum3]\n" - "mov.w r4, r1, ror #8\n" - "mov.w r5, r3, ror #8\n" - "sxtb16 r4, r4\n" - "sxtb16 r5, r5\n" - "smlad %[sum4], r4, r5, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP\n":[sum] "+r"(sum),[sum2] "+r"(sum2),[sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt), - [ch_im_in] "r"(ch_im_in):"r0", "r1", "r2", "r3", "r4", "r5"); -#else - // r0 r1 r2 r3 r4 r5 - // inA1, inA2, inB1, inB2, opA, opB - asm volatile ("COL_LOOP:\n" - "ldr.w r2, [%[pB], #0]\n" - "add.w %[pB], %[pB], %[ch_im_in]\n" - "ldr.w r5, [%[pB], #0]\n" - "add.w %[pB], %[pB], %[ch_im_in]\n" - "pkhbt r3, r5, r2, LSL #16\n" - "pkhtb r2, r2, r5, ASR #16\n" - "ldr.w r0, [%[pA], #0]\n" - "add.w %[pA], %[pA], %[ch_im_in]\n" - "ldr.w r5, [%[pA], #0]\n" - "add.w %[pA], %[pA], %[ch_im_in]\n" - "pkhbt r1, r5, r0, LSL #16\n" - "pkhtb r0, r0, r5, ASR #16\n" - "sxtb16 r4, r0\n" - "sxtb16 r5, r2\n" - "smlad %[sum2], r4, r5, %[sum2]\n" - "mov.w r4, r0, ror #8\n" - "mov.w r5, r2, ror #8\n" - "sxtb16 r4, r4\n" - "sxtb16 r5, r5\n" - "smlad %[sum], r4, r5, %[sum]\n" - "sxtb16 r4, r1\n" - "sxtb16 r5, r3\n" - "smlad %[sum4], r4, r5, %[sum4]\n" - "mov.w r4, r1, ror #8\n" - "mov.w r5, r3, ror #8\n" - "sxtb16 r4, r4\n" - "sxtb16 r5, r5\n" - "smlad %[sum3], r4, r5, %[sum3]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP\n":[sum] "+r"(sum),[sum2] "+r"(sum2),[sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt), - [ch_im_in] "r"(ch_im_in):"r0", "r1", "r2", "r3", "r4", "r5"); -#endif /*ARM_MATH_BIG_ENDIAN */ - -#endif /* USE_INTRINSIC */ - - colCnt = (dim_kernel_x * dim_kernel_y) & 0x1; - while (colCnt) - { - union arm_nnword inA, inB; - inA.word = arm_nn_read_q7x4(pA); - pA += ch_im_in; - inB.word = arm_nn_read_q7x4(pB); - pB += ch_im_in; - sum += inA.bytes[0] * inB.bytes[0]; - sum2 += inA.bytes[1] * inB.bytes[1]; - sum3 += inA.bytes[2] * inB.bytes[2]; - sum4 += inA.bytes[3] * inB.bytes[3]; - colCnt--; - } - - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - *pOut++ = (q7_t) __SSAT((sum2 >> out_shift), 8); - *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); - *pOut++ = (q7_t) __SSAT((sum4 >> out_shift), 8); - - rowCnt--; - } - - rowCnt = ch_im_out & 0x3; - while (rowCnt) - { - q7_t *pB = colBuffer + row_shift; - const q7_t *pA = wt + row_shift; - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - uint16_t colCnt = (dim_kernel_x * dim_kernel_y); - - row_shift += 1; - - while (colCnt) - { - q7_t A1 = *pA; - q7_t B1 = *pB; - pA += ch_im_in; - pB += ch_im_in; - sum += A1 * B1; - - colCnt--; - } - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - rowCnt--; - } - - // clear counter and pointers - pBuffer = colBuffer; - } - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - int i_out_y, i_out_x, i_ch_out; - int i_ker_y, i_ker_x; - - /* do some checking here, basically ch_im_in == ch_im_out */ - if (ch_im_in != ch_im_out) - { - return ARM_MATH_SIZE_MISMATCH; - } - - for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) - { - for (i_ch_out = 0; i_ch_out < ch_im_out; i_ch_out++) - { - // for each output - int conv_out = ((q31_t)(bias[i_ch_out]) << bias_shift) + NN_ROUND(out_shift); - for (i_ker_y = 0; i_ker_y < dim_kernel_y; i_ker_y++) - { - for (i_ker_x = 0; i_ker_x < dim_kernel_x; i_ker_x++) - { - int in_row = stride_y * i_out_y + i_ker_y - padding_y; - int in_col = stride_x * i_out_x + i_ker_x - padding_x; - if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) - { - conv_out += Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + i_ch_out] * - wt[(i_ker_y * dim_kernel_x + i_ker_x) * ch_im_out + i_ch_out]; - } - } - } - Im_out[(i_out_y * dim_im_out_x + i_out_x) * ch_im_out + i_ch_out] = - (q7_t) __SSAT((conv_out >> out_shift), 8); - } - } - } - -#endif /* ARM_MATH_DSP */ - - - /* Return to application */ - return ARM_MATH_SUCCESS; - -} - -/** - * @} end of NNConv group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c deleted file mode 100644 index f03e5ff..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mult_kernel_q7_q15.c - * Description: Matrix-multiplication function for convolution - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - - /** - * @brief Matrix-multiplication function for convolution. - * - * @details Refer to header file for details. - * - */ - -q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t * pA, - const q15_t * pInBuffer, - const uint16_t ch_im_out, - const uint16_t numCol_A, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q7_t * pOut) -{ -#if defined (ARM_MATH_DSP) - /* set up the second output pointers */ - q7_t *pOut2 = pOut + ch_im_out; - const q7_t *pBias = bias; - - uint16_t rowCnt = ch_im_out >> 1; - /* this loop over rows in A */ - while (rowCnt) - { - /* setup pointers for B */ - const q15_t *pB = pInBuffer; - const q15_t *pB2 = pB + numCol_A; - - /* align the second pointer for A */ - const q7_t *pA2 = pA + numCol_A; - - /* init the sum with bias */ - q31_t sum = ((q31_t)(*pBias) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = numCol_A >> 2; - /* accumulate over the vector */ - while (colCnt) - { - q31_t inA11, inA12, inA21, inA22; - - q31_t inB1 = arm_nn_read_q15x2_ia(&pB); - q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); - - pA = read_and_pad(pA, &inA11, &inA12); - pA2 = read_and_pad(pA2, &inA21, &inA22); - - sum = __SMLAD(inA11, inB1, sum); - sum2 = __SMLAD(inA11, inB2, sum2); - sum3 = __SMLAD(inA21, inB1, sum3); - sum4 = __SMLAD(inA21, inB2, sum4); - - inB1 = arm_nn_read_q15x2_ia(&pB); - inB2 = arm_nn_read_q15x2_ia(&pB2); - - sum = __SMLAD(inA12, inB1, sum); - sum2 = __SMLAD(inA12, inB2, sum2); - sum3 = __SMLAD(inA22, inB1, sum3); - sum4 = __SMLAD(inA22, inB2, sum4); - - colCnt--; - } /* while over colCnt */ - colCnt = numCol_A & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - q7_t inA2 = *pA2++; - q15_t inB2 = *pB2++; - - sum += inA1 * inB1; - sum2 += inA1 * inB2; - sum3 += inA2 * inB1; - sum4 += inA2 * inB2; - colCnt--; - } /* while over colCnt */ - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); - *pOut2++ = (q7_t) __SSAT((sum2 >> out_shift), 8); - *pOut2++ = (q7_t) __SSAT((sum4 >> out_shift), 8); - - /* skip the row computed with A2 */ - pA += numCol_A; - rowCnt--; - } /* for over ch_im_out */ - - /* compute left-over row if any */ - if (ch_im_out & 0x1) - { - /* setup pointers for B */ - const q15_t *pB = pInBuffer; - const q15_t *pB2 = pB + numCol_A; - - /* load the bias */ - q31_t sum = ((q31_t)(*pBias) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = numCol_A >> 2; - while (colCnt) - { - q31_t inA11, inA12; - - q31_t inB1 = arm_nn_read_q15x2_ia(&pB); - q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); - - pA = read_and_pad(pA, &inA11, &inA12); - - sum = __SMLAD(inA11, inB1, sum); - sum2 = __SMLAD(inA11, inB2, sum2); - - inB1 = arm_nn_read_q15x2_ia(&pB); - inB2 = arm_nn_read_q15x2_ia(&pB2); - - sum = __SMLAD(inA12, inB1, sum); - sum2 = __SMLAD(inA12, inB2, sum2); - - colCnt--; - } - colCnt = numCol_A & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - q15_t inB2 = *pB2++; - - sum += inA1 * inB1; - sum2 += inA1 * inB2; - colCnt--; - } - - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - *pOut2++ = (q7_t) __SSAT((sum2 >> out_shift), 8); - } - - pOut += ch_im_out; - - /* return the new output pointer with offset */ - return pOut; -#else - /* To be completed */ - return NULL; -#endif /* ARM_MATH_DSP */ - -} diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c deleted file mode 100644 index 31aa334..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mult_kernel_q7_q15_reordered.c - * Description: Matrix-multiplication function for convolution with reordered columns - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" -#include "arm_math.h" - - /** - * @brief Matrix-multiplication function for convolution with re-ordered input. - * - * @details Refer to header file for details. - * - */ - -q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t * pA, - const q15_t * pInBuffer, - const uint16_t ch_im_out, - const uint16_t numCol_A, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q7_t * pOut) -{ - -#if defined (ARM_MATH_DSP) - /* set up the second output pointers */ - q7_t *pOut2 = pOut + ch_im_out; - int i; - - /* this loop over rows in A */ - for (i = 0; i < ch_im_out; i += 2) - { - /* setup pointers for B */ - const q15_t *pB = pInBuffer; - const q15_t *pB2 = pB + numCol_A; - - /* align the second pointer for A */ - const q7_t *pA2 = pA + numCol_A; - - /* init the sum with bias */ - q31_t sum = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(bias[i + 1]) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(bias[i + 1]) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = numCol_A >> 2; - /* accumulate over the vector */ - while (colCnt) - { - q31_t inA11, inA12, inA21, inA22; - - q31_t inB1 = arm_nn_read_q15x2_ia(&pB); - q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); - - pA = read_and_pad_reordered(pA, &inA11, &inA12); - pA2 = read_and_pad_reordered(pA2, &inA21, &inA22); - - sum = __SMLAD(inA11, inB1, sum); - sum2 = __SMLAD(inA11, inB2, sum2); - sum3 = __SMLAD(inA21, inB1, sum3); - sum4 = __SMLAD(inA21, inB2, sum4); - - inB1 = arm_nn_read_q15x2_ia(&pB); - inB2 = arm_nn_read_q15x2_ia(&pB2); - - sum = __SMLAD(inA12, inB1, sum); - sum2 = __SMLAD(inA12, inB2, sum2); - sum3 = __SMLAD(inA22, inB1, sum3); - sum4 = __SMLAD(inA22, inB2, sum4); - - colCnt--; - } /* while over colCnt */ - colCnt = numCol_A & 0x3; - while (colCnt) - { - q7_t inA1 = *pA++; - q15_t inB1 = *pB++; - q7_t inA2 = *pA2++; - q15_t inB2 = *pB2++; - - sum += inA1 * inB1; - sum2 += inA1 * inB2; - sum3 += inA2 * inB1; - sum4 += inA2 * inB2; - colCnt--; - } /* while over colCnt */ - *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); - *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); - *pOut2++ = (q7_t) __SSAT((sum2 >> out_shift), 8); - *pOut2++ = (q7_t) __SSAT((sum4 >> out_shift), 8); - - /* skip the row computed with A2 */ - pA += numCol_A; - } /* for over ch_im_out */ - - pOut += ch_im_out; - - /* return the new output pointer with offset */ - return pOut; -#else - /* To be completed */ - return NULL; -#endif /* ARM_MATH_DSP */ -} diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c deleted file mode 100644 index ebdfaaa..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c +++ /dev/null @@ -1,391 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mult_kernel_s8_s16.c - * Description: Matrix-multiplication function for convolution - * - * $Date: May 29, 2020 - * $Revision: V.1.0.2 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/* - * Matrix-multiplication function for convolution with per-channel requantization. - * - * Refer header file for details. - * - */ - -q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, - const q15_t *input_b, - const uint16_t output_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int16_t activation_min, - const int16_t activation_max, - const uint16_t num_col_a, - const int32_t *const output_bias, - q7_t *out_0) -{ -#if defined(ARM_MATH_MVEI) -#define ROW_PER_LOOP (4) -#define COL_PER_LOOP (8) - - const q7_t *ip_a0_s8 = input_a; - q7_t *out_1 = out_0 + output_ch; - - const int32_t *bias = output_bias; - - int32_t row_count = output_ch / ROW_PER_LOOP; - - while (row_count) - { - const q15_t *ip_b0_s16 = input_b; - const q15_t *ip_b1_s16 = input_b + num_col_a; - - const q7_t *ip_a1_s8 = ip_a0_s8 + num_col_a; - const q7_t *ip_a2_s8 = ip_a0_s8 + num_col_a * 2; - const q7_t *ip_a3_s8 = ip_a0_s8 + num_col_a * 3; - - q31_t ch_0_out_n = bias[0]; - q31_t ch_1_out_n = bias[1]; - q31_t ch_2_out_n = bias[2]; - q31_t ch_3_out_n = bias[3]; - - q31_t ch_0_out_n1 = ch_0_out_n; - q31_t ch_1_out_n1 = ch_1_out_n; - q31_t ch_2_out_n1 = ch_2_out_n; - q31_t ch_3_out_n1 = ch_3_out_n; - bias += 4; - - int32_t col_count = num_col_a / COL_PER_LOOP; - - while (col_count) - { - // Load inputs - const int16x8_t ip_b0 = vld1q_s16(ip_b0_s16); - ip_b0_s16 += COL_PER_LOOP; - const int16x8_t ip_b1 = vld1q_s16(ip_b1_s16); - ip_b1_s16 += COL_PER_LOOP; - - // Load filters - const int16x8_t ip_a0 = vldrbq_s16(ip_a0_s8); - ip_a0_s8 += COL_PER_LOOP; - const int16x8_t ip_a1 = vldrbq_s16(ip_a1_s8); - ip_a1_s8 += COL_PER_LOOP; - const int16x8_t ip_a2 = vldrbq_s16(ip_a2_s8); - ip_a2_s8 += COL_PER_LOOP; - const int16x8_t ip_a3 = vldrbq_s16(ip_a3_s8); - ip_a3_s8 += COL_PER_LOOP; - - // MAC - ch_0_out_n += vmladavq_s16(ip_b0, ip_a0); - ch_1_out_n += vmladavq_s16(ip_b0, ip_a1); - ch_2_out_n += vmladavq_s16(ip_b0, ip_a2); - ch_3_out_n += vmladavq_s16(ip_b0, ip_a3); - ch_0_out_n1 += vmladavq_s16(ip_b1, ip_a0); - ch_1_out_n1 += vmladavq_s16(ip_b1, ip_a1); - ch_2_out_n1 += vmladavq_s16(ip_b1, ip_a2); - ch_3_out_n1 += vmladavq_s16(ip_b1, ip_a3); - - col_count--; - } - - /* Handle tail */ - col_count = (num_col_a & (COL_PER_LOOP - 1)) - 1; - while (col_count >= 0) - { - const int32_t b0 = ip_b0_s16[col_count]; - const int32_t b1 = ip_b1_s16[col_count]; - - ch_0_out_n += b0 * ip_a0_s8[col_count]; - ch_1_out_n += b0 * ip_a1_s8[col_count]; - ch_2_out_n += b0 * ip_a2_s8[col_count]; - ch_3_out_n += b0 * ip_a3_s8[col_count]; - - ch_0_out_n1 += b1 * ip_a0_s8[col_count]; - ch_1_out_n1 += b1 * ip_a1_s8[col_count]; - ch_2_out_n1 += b1 * ip_a2_s8[col_count]; - ch_3_out_n1 += b1 * ip_a3_s8[col_count]; - col_count--; - } - ip_a0_s8 += (num_col_a & (COL_PER_LOOP - 1)); - - int32x4_t out_vec_0; - int32x4_t out_vec_1; - out_vec_0[0] = ch_0_out_n; - out_vec_0[1] = ch_1_out_n; - out_vec_0[2] = ch_2_out_n; - out_vec_0[3] = ch_3_out_n; - - out_vec_1[0] = ch_0_out_n1; - out_vec_1[1] = ch_1_out_n1; - out_vec_1[2] = ch_2_out_n1; - out_vec_1[3] = ch_3_out_n1; - - int32x4_t mult = vldrwq_s32(out_mult); - int32x4_t shift = vldrwq_s32(out_shift); - out_mult += ROW_PER_LOOP; - out_shift += ROW_PER_LOOP; - - out_vec_0 = arm_requantize_mve_32x4(out_vec_0, mult, shift); - out_vec_1 = arm_requantize_mve_32x4(out_vec_1, mult, shift); - - out_vec_0 = vaddq_n_s32(out_vec_0, out_offset); - out_vec_0 = vmaxq_s32(out_vec_0, vdupq_n_s32(activation_min)); - out_vec_0 = vminq_s32(out_vec_0, vdupq_n_s32(activation_max)); - vstrbq_s32(out_0, out_vec_0); - out_0 += ROW_PER_LOOP; - - out_vec_1 = vaddq_n_s32(out_vec_1, out_offset); - out_vec_1 = vmaxq_s32(out_vec_1, vdupq_n_s32(activation_min)); - out_vec_1 = vminq_s32(out_vec_1, vdupq_n_s32(activation_max)); - vstrbq_s32(out_1, out_vec_1); - out_1 += ROW_PER_LOOP; - row_count--; - ip_a0_s8 += (num_col_a * 3); - } - - row_count = output_ch & (ROW_PER_LOOP - 1); - - if (row_count) - { - ip_a0_s8 = input_a + num_col_a * (output_ch & ~3); - const mve_pred16_t p = vctp32q((uint32_t)row_count); - int32x4_t out_vec_0 = vdupq_n_s32(0); - int32x4_t out_vec_1 = vdupq_n_s32(0); - int32x4_t mult_tail; - int32x4_t shift_tail; - - for (int i_ch = 0; i_ch < row_count; i_ch++) - { - int32_t output_0 = bias[i_ch]; - int32_t output_1 = bias[i_ch]; - const q15_t *ip_b0_s16 = input_b; - const q15_t *ip_b1_s16 = input_b + num_col_a; - - for (int i_idx = 0; i_idx < num_col_a; i_idx++) - { - output_0 += ip_b0_s16[i_idx] * ip_a0_s8[i_idx]; - output_1 += ip_b1_s16[i_idx] * ip_a0_s8[i_idx]; - } - - ip_a0_s8 += num_col_a; - out_vec_0[i_ch] = output_0; - out_vec_1[i_ch] = output_1; - mult_tail[i_ch] = out_mult[i_ch]; - shift_tail[i_ch] = out_shift[i_ch]; - } - out_vec_0 = arm_requantize_mve_32x4(out_vec_0, mult_tail, shift_tail); - out_vec_1 = arm_requantize_mve_32x4(out_vec_1, mult_tail, shift_tail); - - out_vec_0 = vaddq_n_s32(out_vec_0, out_offset); - out_vec_0 = vmaxq_s32(out_vec_0, vdupq_n_s32(activation_min)); - out_vec_0 = vminq_s32(out_vec_0, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out_0, out_vec_0, p); - - out_vec_1 = vaddq_n_s32(out_vec_1, out_offset); - out_vec_1 = vmaxq_s32(out_vec_1, vdupq_n_s32(activation_min)); - out_vec_1 = vminq_s32(out_vec_1, vdupq_n_s32(activation_max)); - - vstrbq_p_s32(out_1, out_vec_1, p); - out_1 += row_count; - } - - return out_1; - -#elif defined(ARM_MATH_DSP) - /* set up the second output pointers */ - q7_t *out_1 = out_0 + output_ch; - const int32_t *bias = output_bias; - - uint16_t row_count = output_ch / 2; - const q7_t *ip_a0 = input_a; - /* this loop over rows in A */ - while (row_count) - { - /* setup pointers for B */ - const q15_t *ip_b0 = input_b; - const q15_t *ip_b1 = ip_b0 + num_col_a; - - /* align the second pointer for A */ - const q7_t *ip_a1 = ip_a0 + num_col_a; - - /* Init accumulator with bias for channel N and N + 1 */ - q31_t ch_0_out_0 = *bias; - q31_t ch_0_out_1 = *bias++; - q31_t ch_1_out_0 = *bias; - q31_t ch_1_out_1 = *bias++; - - uint16_t col_count = num_col_a / 4; - /* accumulate over the vector */ - while (col_count) - { - q31_t a01, a02, a11, a12; - q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); - q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ip_a0 = read_and_pad(ip_a0, &a01, &a02); - ip_a1 = read_and_pad(ip_a1, &a11, &a12); - - ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); - ch_1_out_0 = __SMLAD(a11, b0, ch_1_out_0); - ch_1_out_1 = __SMLAD(a11, b1, ch_1_out_1); - - b0 = arm_nn_read_q15x2_ia(&ip_b0); - b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); - ch_1_out_0 = __SMLAD(a12, b0, ch_1_out_0); - ch_1_out_1 = __SMLAD(a12, b1, ch_1_out_1); - - col_count--; - } /* while over col_count */ - col_count = num_col_a & 0x3; - while (col_count) - { - q7_t a0 = *ip_a0++; - q15_t b0 = *ip_b0++; - q7_t a1 = *ip_a1++; - q15_t b1 = *ip_b1++; - - ch_0_out_0 += a0 * b0; - ch_0_out_1 += a0 * b1; - ch_1_out_0 += a1 * b0; - ch_1_out_1 += a1 * b1; - col_count--; - } /* while over col_count */ - - ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); - ch_0_out_0 += out_offset; - ch_0_out_0 = MAX(ch_0_out_0, activation_min); - ch_0_out_0 = MIN(ch_0_out_0, activation_max); - *out_0++ = (q7_t)ch_0_out_0; - - ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); - ch_0_out_1 += out_offset; - ch_0_out_1 = MAX(ch_0_out_1, activation_min); - ch_0_out_1 = MIN(ch_0_out_1, activation_max); - *out_1++ = (q7_t)ch_0_out_1; - out_mult++; - out_shift++; - - ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); - ch_1_out_0 += out_offset; - ch_1_out_0 = MAX(ch_1_out_0, activation_min); - ch_1_out_0 = MIN(ch_1_out_0, activation_max); - *out_0++ = (q7_t)ch_1_out_0; - - ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); - ch_1_out_1 += out_offset; - ch_1_out_1 = MAX(ch_1_out_1, activation_min); - ch_1_out_1 = MIN(ch_1_out_1, activation_max); - *out_1++ = (q7_t)ch_1_out_1; - out_mult++; - out_shift++; - - /* skip row */ - ip_a0 += num_col_a; - row_count--; - } - - /* compute the last odd numbered row if any */ - if (output_ch & 0x1) - { - /* setup pointers for B */ - const q15_t *ip_b0 = input_b; - const q15_t *ip_b1 = ip_b0 + num_col_a; - - /* load the bias */ - q31_t ch_0_out_0 = *bias; - q31_t ch_0_out_1 = *bias++; - - uint16_t col_count = num_col_a >> 2; - while (col_count) - { - q31_t a01, a02; - q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); - q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ip_a0 = read_and_pad(ip_a0, &a01, &a02); - - ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); - - b0 = arm_nn_read_q15x2_ia(&ip_b0); - b1 = arm_nn_read_q15x2_ia(&ip_b1); - ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); - - col_count--; - } - col_count = num_col_a & 0x3; - while (col_count) - { - q7_t a0 = *ip_a0++; - q15_t b0 = *ip_b0++; - q15_t b1 = *ip_b1++; - - ch_0_out_0 += a0 * b0; - ch_0_out_1 += a0 * b1; - col_count--; - } - ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); - ch_0_out_0 += out_offset; - ch_0_out_0 = MAX(ch_0_out_0, activation_min); - ch_0_out_0 = MIN(ch_0_out_0, activation_max); - *out_0++ = (q7_t)ch_0_out_0; - - ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); - ch_0_out_1 += out_offset; - ch_0_out_1 = MAX(ch_0_out_1, activation_min); - ch_0_out_1 = MIN(ch_0_out_1, activation_max); - *out_1++ = (q7_t)ch_0_out_1; - out_mult++; - out_shift++; - } - - out_0 += output_ch; - - /* return the new output pointer with offset */ - return out_0; -#else - (void)input_a; - (void)input_b; - (void)output_ch; - (void)out_shift; - (void)out_mult; - (void)out_offset; - (void)activation_min; - (void)activation_max; - (void)num_col_a; - (void)output_bias; - (void)out_0; - /* To be completed */ - return NULL; -#endif -} diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c deleted file mode 100644 index a25b1ff..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mult_kernel_s8_s16_reordered.c - * Description: Matrix-multiplication function for convolution with reordered columns - * - * $Date: February 27, 2020 - * $Revision: V.1.0.2 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" -#include "arm_math.h" - -/* - * Matrix-multiplication with re-ordered input and bias inputs for convolution with per-channel - * requantization. The re-ordering is a consequence of sign extension is done by the SXTB16 command. - * - * Refer header file for details. This function differs from arm_nn_mat_mult_kernel_s8_s16(), in that it uses - * read_and_pad_reordered() instead of arm_nn_mat_mult_kernel_s8_s16(). Investigating the cycles impact and - * unifying these two functions is a potential future improvement. - * - */ - -q7_t *arm_nn_mat_mult_kernel_s8_s16_reordered(const q7_t *input_a, - const q15_t *input_b, - const uint16_t output_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int16_t activation_min, - const int16_t activation_max, - const uint16_t num_col_a, - const int32_t *const output_bias, - q7_t *out_0) -{ -#if defined(ARM_MATH_DSP) - /* set up the second output pointers */ - q7_t *out_1 = out_0 + output_ch; - const int32_t *bias = output_bias; - - uint16_t row_count = output_ch / 2; - const q7_t *ip_a0 = input_a; - /* this loop over rows in A */ - while (row_count) - { - /* setup pointers for B */ - const q15_t *ip_b0 = input_b; - const q15_t *ip_b1 = ip_b0 + num_col_a; - - /* align the second pointer for A */ - const q7_t *ip_a1 = ip_a0 + num_col_a; - - /* Init accumulator with bias for channel N and N + 1 */ - q31_t ch_0_out_0 = *bias; - q31_t ch_0_out_1 = *bias++; - q31_t ch_1_out_0 = *bias; - q31_t ch_1_out_1 = *bias++; - - uint16_t col_count = num_col_a / 4; - /* accumulate over the vector */ - while (col_count) - { - q31_t a01, a02, a11, a12; - q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); - q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); - ip_a1 = read_and_pad_reordered(ip_a1, &a11, &a12); - - ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); - ch_1_out_0 = __SMLAD(a11, b0, ch_1_out_0); - ch_1_out_1 = __SMLAD(a11, b1, ch_1_out_1); - - b0 = arm_nn_read_q15x2_ia(&ip_b0); - b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); - ch_1_out_0 = __SMLAD(a12, b0, ch_1_out_0); - ch_1_out_1 = __SMLAD(a12, b1, ch_1_out_1); - - col_count--; - } /* while over col_count */ - - ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); - ch_0_out_0 += out_offset; - ch_0_out_0 = MAX(ch_0_out_0, activation_min); - ch_0_out_0 = MIN(ch_0_out_0, activation_max); - *out_0++ = (q7_t)ch_0_out_0; - - ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); - ch_0_out_1 += out_offset; - ch_0_out_1 = MAX(ch_0_out_1, activation_min); - ch_0_out_1 = MIN(ch_0_out_1, activation_max); - *out_1++ = (q7_t)ch_0_out_1; - out_mult++; - out_shift++; - - ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); - ch_1_out_0 += out_offset; - ch_1_out_0 = MAX(ch_1_out_0, activation_min); - ch_1_out_0 = MIN(ch_1_out_0, activation_max); - *out_0++ = (q7_t)ch_1_out_0; - - ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); - ch_1_out_1 += out_offset; - ch_1_out_1 = MAX(ch_1_out_1, activation_min); - ch_1_out_1 = MIN(ch_1_out_1, activation_max); - *out_1++ = (q7_t)ch_1_out_1; - out_mult++; - out_shift++; - - /* skip row */ - ip_a0 += num_col_a; - row_count--; - } - - if (output_ch & 1) - { - /* setup pointers for B */ - const q15_t *ip_b0 = input_b; - const q15_t *ip_b1 = ip_b0 + num_col_a; - - /* Init accumulator with bias for channel N + 1 */ - q31_t ch_0_out_0 = *bias; - q31_t ch_0_out_1 = ch_0_out_0; - - int32_t col_count = num_col_a / 4; - while (col_count) - { - q31_t a01, a02; - q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); - q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); - - ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); - - b0 = arm_nn_read_q15x2_ia(&ip_b0); - b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); - - col_count--; - } /* while over col_count */ - - ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); - ch_0_out_0 += out_offset; - ch_0_out_0 = MAX(ch_0_out_0, activation_min); - ch_0_out_0 = MIN(ch_0_out_0, activation_max); - *out_0++ = (q7_t)ch_0_out_0; - - ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); - ch_0_out_1 += out_offset; - ch_0_out_1 = MAX(ch_0_out_1, activation_min); - ch_0_out_1 = MIN(ch_0_out_1, activation_max); - *out_1++ = (q7_t)ch_0_out_1; - } - - out_0 += output_ch; - - /* return the new output pointer with offset */ - return out_0; -#else - (void)input_a; - (void)input_b; - (void)output_ch; - (void)out_shift; - (void)out_mult; - (void)out_offset; - (void)activation_min; - (void)activation_max; - (void)num_col_a; - (void)output_bias; - (void)out_0; - /* To be completed */ - return NULL; -#endif -} diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c deleted file mode 100644 index 6af509f..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mult_s8.c - * Description: General Matrix-multiplication function - * - * $Date: July 27, 2020 - * $Revision: V.2.0.4 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/* - * s8 General matrix multiplication function with per-channel requantization for upto 4 column batches. - * - * Refer header file for details. - * - */ - -q7_t *arm_nn_mat_mult_s8(const q7_t *input_row, - const q7_t *input_col, - const uint16_t output_ch, - const uint16_t col_batches, - const int32_t *output_shift, - const int32_t *output_mult, - const int32_t out_offset, - const int32_t col_offset, - const int32_t row_offset, - const int16_t activation_min, - const int16_t activation_max, - const uint16_t row_len, - const int32_t *const bias, - q7_t *out) -{ -#if defined(ARM_MATH_MVEI) - (void)row_offset; - if (col_batches == 4) - { - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32_t row_len_tmp = row_len; - const int8_t *ip_r0 = input_row + (i_out_ch * row_len); - const int8_t *ip_c0 = input_col; - const int8_t *ip_c1 = input_col + row_len; - const int8_t *ip_c2 = input_col + (2 * row_len); - const int8_t *ip_c3 = input_col + (3 * row_len); - - int32_t acc_0 = 0; - int32_t acc_1 = 0; - int32_t acc_2 = 0; - int32_t acc_3 = 0; - const int32_t row_loop_cnt = (row_len + 7) / 8; - - for (int i_row_loop = 0; i_row_loop < row_loop_cnt; i_row_loop++) - { - mve_pred16_t p = vctp16q((uint32_t)row_len_tmp); - const int16x8_t offset = vdupq_m_n_s16(vuninitializedq_s16(), col_offset, p); - row_len_tmp -= 8; - - int16x8_t r0 = vldrbq_z_s16(ip_r0, p); - ip_r0 += 8; - - int16x8_t c0 = vldrbq_z_s16(ip_c0, p); - ip_c0 += 8; - c0 = vaddq_m_s16(vuninitializedq_s16(), c0, offset, p); - - int16x8_t c1 = vldrbq_z_s16(ip_c1, p); - ip_c1 += 8; - c1 = vaddq_m_s16(vuninitializedq_s16(), c1, offset, p); - - int16x8_t c2 = vldrbq_z_s16(ip_c2, p); - ip_c2 += 8; - c2 = vaddq_m_s16(vuninitializedq_s16(), c2, offset, p); - - int16x8_t c3 = vldrbq_z_s16(ip_c3, p); - ip_c3 += 8; - c3 = vaddq_m_s16(vuninitializedq_s16(), c3, offset, p); - - acc_0 = vmladavaq_p_s16(acc_0, r0, c0, p); - acc_1 = vmladavaq_p_s16(acc_1, r0, c1, p); - acc_2 = vmladavaq_p_s16(acc_2, r0, c2, p); - acc_3 = vmladavaq_p_s16(acc_3, r0, c3, p); - } - - int32x4_t res = {acc_0, acc_1, acc_2, acc_3}; - if (bias) - { - res = vaddq_n_s32(res, bias[i_out_ch]); - } - res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); - res = vaddq_n_s32(res, out_offset); - - res = vmaxq_s32(res, vdupq_n_s32(activation_min)); - res = vminq_s32(res, vdupq_n_s32(activation_max)); - - const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; - vstrbq_scatter_offset_s32(&out[i_out_ch], scatter_offset, res); - } - out += 4 * output_ch; - } - else - { - for (int i_col_batch = (col_batches & ~0x3); i_col_batch < (col_batches & 0x3); i_col_batch++) - { - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32_t row_len_tmp = row_len; - - const int8_t *ip_r0 = input_row + (i_out_ch * row_len); - const int8_t *ip_c0 = input_col + (i_col_batch * row_len); - int32_t acc_0 = 0; - const int32_t row_loop_cnt = (row_len + 7) / 8; - - for (int i_row_loop = 0; i_row_loop < row_loop_cnt; i_row_loop++) - { - const mve_pred16_t p = vctp16q((uint32_t)row_len_tmp); - const int16x8_t offset = vdupq_m_n_s16(vuninitializedq_s16(), col_offset, p); - row_len_tmp -= 8; - - int16x8_t r0 = vldrbq_z_s16(ip_r0, p); - ip_r0 += 8; - int16x8_t c0 = vldrbq_z_s16(ip_c0, p); - ip_c0 += 8; - - c0 = vaddq_m_s16(vuninitializedq_s16(), c0, offset, p); - acc_0 = vmladavaq_p_s16(acc_0, r0, c0, p); - } - - if (bias) - { - acc_0 += bias[i_out_ch]; - } - acc_0 = arm_nn_requantize(acc_0, output_mult[i_out_ch], output_shift[i_out_ch]); - acc_0 += out_offset; - acc_0 = MAX(acc_0, activation_min); - acc_0 = MIN(acc_0, activation_max); - out[i_out_ch] = (q7_t)acc_0; - } - out += output_ch; - } - } - return out; - -#else - (void)input_row; - (void)input_col; - (void)output_ch; - (void)col_batches; - (void)output_shift; - (void)output_mult; - (void)out_offset; - (void)col_offset; - (void)row_offset; - (void)activation_min; - (void)activation_max; - (void)row_len; - (void)bias; - (void)out; - return NULL; -#endif -} diff --git a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c b/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c deleted file mode 100644 index ae2287c..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_fully_connected_mat_q7_vec_q15.c - * Description: Mixed Q15-Q7 fully-connected layer function - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup FC - * @{ - */ - - /** - * @brief Mixed Q15-Q7 fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * vec_buffer size: 0 - * - * Q7_Q15 version of the fully connected layer - * - * Weights are in q7_t and Activations are in q15_t - * - */ - -arm_status -arm_fully_connected_mat_q7_vec_q15(const q15_t * pV, - const q7_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q15_t * pOut, - q15_t * vec_buffer) -{ - (void)vec_buffer; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - const q7_t *pB = pM; - const q7_t *pB2; - q15_t *pO = pOut; - const q7_t *pBias = bias; - const q15_t *pA = pV; - - uint16_t rowCnt = num_of_rows >> 1; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - uint16_t colCnt = dim_vec >> 2; - - pA = pV; - pB2 = pB + dim_vec; - - while (colCnt) - { - q31_t inV, inM11, inM12, inM21, inM22; - pB = read_and_pad(pB, &inM11, &inM12); - pB2 = read_and_pad(pB2, &inM21, &inM22); - - inV = arm_nn_read_q15x2_ia(&pA); - - sum = __SMLAD(inV, inM11, sum); - sum2 = __SMLAD(inV, inM21, sum2); - - inV = arm_nn_read_q15x2_ia(&pA); - - sum = __SMLAD(inV, inM12, sum); - sum2 = __SMLAD(inV, inM22, sum2); - - colCnt--; - } - colCnt = dim_vec & 0x3; - while (colCnt) - { - q15_t inV = *pA++; - q7_t inM = *pB++; - q7_t inM2 = *pB2++; - - sum += inV * inM; - sum2 += inV * inM2; - colCnt--; - } /* while over colCnt */ - *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); - *pO++ = (q15_t) (__SSAT((sum2 >> out_shift), 16)); - - /*adjust the pointers and counters */ - pB += dim_vec; - rowCnt--; - } - - /* left-over part of the rows */ - rowCnt = num_of_rows & 0x1; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - uint16_t colCnt = dim_vec >> 2; - - pA = pV; - - while (colCnt) - { - q31_t inV1, inV2, inM11, inM12; - - pB = read_and_pad(pB, &inM11, &inM12); - - inV1 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV1, inM11, sum); - - inV2 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV2, inM12, sum); - - colCnt--; - } - - /* left-over of the vector */ - colCnt = dim_vec & 0x3; - while (colCnt) - { - q15_t inV = *pA++; - q7_t inM = *pB++; - sum += inV * inM; - colCnt--; - } - - *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); - - rowCnt--; - } - -#else - int i, j; - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - for (i = 0; i < num_of_rows; i++) - { - int ip_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); - for (j = 0; j < dim_vec; j++) - { - ip_out += pV[j] * pM[i * dim_vec + j]; - } - pOut[i] = (q15_t) __SSAT((ip_out >> out_shift), 16); - } - -#endif /* ARM_MATH_DSP */ - - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); - -} - -/** - * @} end of FC group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c b/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c deleted file mode 100644 index b01f9e3..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c +++ /dev/null @@ -1,404 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_fully_connected_mat_q7_vec_q15_opt.c - * Description: Mixed Q15-Q7 opt fully-connected layer function - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup FC - * @{ - */ - - /** - * @brief Mixed Q15-Q7 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * vec_buffer size: 0 - * - * Q7_Q15 version of the fully connected layer - * - * Weights are in q7_t and Activations are in q15_t - * - * Limitation: x4 version requires weight reordering to work - * - * Here we use only one pointer to read 4 rows in the weight - * matrix. So if the original q7_t matrix looks like this: - * - * | a11 | a12 | a13 | a14 | a15 | a16 | a17 | - * - * | a21 | a22 | a23 | a24 | a25 | a26 | a27 | - * - * | a31 | a32 | a33 | a34 | a35 | a36 | a37 | - * - * | a41 | a42 | a43 | a44 | a45 | a46 | a47 | - * - * | a51 | a52 | a53 | a54 | a55 | a56 | a57 | - * - * | a61 | a62 | a63 | a64 | a65 | a66 | a67 | - * - * We operates on multiple-of-4 rows, so the first four rows becomes - * - * | a11 | a21 | a12 | a22 | a31 | a41 | a32 | a42 | - * - * | a13 | a23 | a14 | a24 | a33 | a43 | a34 | a44 | - * - * | a15 | a25 | a16 | a26 | a35 | a45 | a36 | a46 | - * - * The column left over will be in-order. - * which is: - * | a17 | a27 | a37 | a47 | - * - * For the left-over rows, we do 1x1 computation, so the data remains - * as its original order. - * - * So the stored weight matrix looks like this: - * - * | a11 | a21 | a12 | a22 | a31 | a41 | - * - * | a32 | a42 | a13 | a23 | a14 | a24 | - * - * | a33 | a43 | a34 | a44 | a15 | a25 | - * - * | a16 | a26 | a35 | a45 | a36 | a46 | - * - * | a17 | a27 | a37 | a47 | a51 | a52 | - * - * | a53 | a54 | a55 | a56 | a57 | a61 | - * - * | a62 | a63 | a64 | a65 | a66 | a67 | - * - */ - -arm_status -arm_fully_connected_mat_q7_vec_q15_opt(const q15_t * pV, - const q7_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, const q7_t * bias, q15_t * pOut, q15_t * vec_buffer) -{ - - (void)vec_buffer; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - const q7_t *pB = pM; - q15_t *pO = pOut; - const q7_t *pBias = bias; - const q15_t *pA = pV; - - uint16_t rowCnt = num_of_rows >> 2; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 1; - - pA = pV; - -#ifdef USE_INTRINSIC - -#ifndef ARM_MATH_BIG_ENDIAN - - while (colCnt) - { - q31_t inM11, inM12, inM13, inM14; - q31_t inV; - - inV = arm_nn_read_q15x2_ia(&pA); - inM11 = arm_nn_read_q7x4_ia(&pB); - inM12 = __SXTB16(__ROR(inM11, 8)); - inM11 = __SXTB16(inM11); - sum = __SMLAD(inM11, inV, sum); - sum2 = __SMLAD(inM12, inV, sum2); - inM13 = arm_nn_read_q7x4_ia(&pB); - inM14 = __SXTB16(__ROR(inM13, 8)); - inM13 = __SXTB16(inM13); - sum3 = __SMLAD(inM13, inV, sum3); - sum4 = __SMLAD(inM14, inV, sum4); - colCnt--; - } - -#else - - while (colCnt) - { - q31_t inM11, inM12, inM13, inM14; - q31_t inV; - - inV = *__SIMD32(pA)++; - inM11 = arm_nn_read_q7x4_ia(&pB); - inM12 = __SXTB16(__ROR(inM11, 8)); - inM11 = __SXTB16(inM11); - sum = __SMLAD(inM12, inV, sum); - sum2 = __SMLAD(inM11, inV, sum2); - inM13 = arm_nn_read_q7x4_ia(&pB); - inM14 = __SXTB16(__ROR(inM13, 8)); - inM13 = __SXTB16(inM13); - sum3 = __SMLAD(inM14, inV, sum3); - sum4 = __SMLAD(inM13, inV, sum4); - colCnt--; - } - -#endif /* ARM_MATH_BIG_ENDIAN */ - -#else - - /* - * register needed: - * loop counter: colCnt - * accumulators: sum, sum2, sum3, sum4 - * pointers: pB, pA - * weight data: inM11, inM12, inM13, inM14 - * activation data: inV - */ - -#ifndef ARM_MATH_BIG_ENDIAN - asm volatile ("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #4\n" - "ldr.w r1, [%[pB]], #8\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r1, %[sum]\n" - "smlad %[sum2], r4, r0, %[sum2]\n" - "ldr.w r3, [%[pB], #-4]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r3, %[sum3]\n" - "smlad %[sum4], r4, r2, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n":[sum] "+r"(sum), - [sum2] "+r"(sum2),[sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); -#else - asm volatile ("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #4\n" - "ldr.w r1, [%[pB]], #8\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r0, %[sum]\n" - "smlad %[sum2], r4, r1, %[sum2]\n" - "ldr.w r3, [%[pB], #-4]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r2, %[sum3]\n" - "smlad %[sum4], r4, r3, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n":[sum] "+r"(sum), - [sum2] "+r"(sum2),[sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); -#endif /* ARM_MATH_BIG_ENDIAN */ - -#endif /* USE_INTRINSIC */ - - colCnt = dim_vec & 0x1; - while (colCnt) - { - q15_t inV = *pA++; - q7_t inM = *pB++; - q7_t inM2 = *pB++; - q7_t inM3 = *pB++; - q7_t inM4 = *pB++; - - sum += inV * inM; - sum2 += inV * inM2; - sum3 += inV * inM3; - sum4 += inV * inM4; - colCnt--; - } /* while over colCnt */ - *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); - *pO++ = (q15_t) (__SSAT((sum2 >> out_shift), 16)); - *pO++ = (q15_t) (__SSAT((sum3 >> out_shift), 16)); - *pO++ = (q15_t) (__SSAT((sum4 >> out_shift), 16)); - - /* adjust the pointers and counters */ - rowCnt--; - } - - /* left-over part of the rows */ - rowCnt = num_of_rows & 0x3; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 2; - - pA = pV; - - while (colCnt) - { - q31_t inV1, inV2, inM11, inM12; - - pB = read_and_pad(pB, &inM11, &inM12); - - inV1 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV1, inM11, sum); - - inV2 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV2, inM12, sum); - - colCnt--; - } - - /* left-over of the vector */ - colCnt = dim_vec & 0x3; - while (colCnt) - { - q15_t inV = *pA++; - q7_t inM = *pB++; - sum += inV * inM; - colCnt--; - } - - *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); - - rowCnt--; - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - uint16_t rowCnt = num_of_rows >> 2; - const q7_t *pB = pM; - const q15_t *pA; - q15_t *pO = pOut; - const q7_t *pBias = bias; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - uint16_t colCnt = dim_vec >> 1; - - pA = pV; - - while (colCnt) - { - q15_t inA1 = *pA++; - q15_t inA2 = *pA++; - - q7_t inB1 = *pB++; - q7_t inB3 = *pB++; - q7_t inB2 = *pB++; - q7_t inB4 = *pB++; - - sum += inA1 * inB1 + inA2 * inB2; - sum2 += inA1 * inB3 + inA2 * inB4; - - inB1 = *pB++; - inB3 = *pB++; - inB2 = *pB++; - inB4 = *pB++; - - sum3 += inA1 * inB1 + inA2 * inB2; - sum4 += inA1 * inB3 + inA2 * inB4; - - colCnt--; - } - - colCnt = dim_vec & 0x1; - while (colCnt) - { - q15_t inA = *pA++; - q7_t inB = *pB++; - sum += inA * inB; - inB = *pB++; - sum2 += inA * inB; - inB = *pB++; - sum3 += inA * inB; - inB = *pB++; - sum4 += inA * inB; - - colCnt--; - } - *pO++ = (q15_t) __SSAT((sum >> out_shift), 16); - *pO++ = (q15_t) __SSAT((sum2 >> out_shift), 16); - *pO++ = (q15_t) __SSAT((sum3 >> out_shift), 16); - *pO++ = (q15_t) __SSAT((sum4 >> out_shift), 16); - - rowCnt--; - } - - rowCnt = num_of_rows & 0x3; - - while (rowCnt) - { - int ip_out = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - int j; - - pA = pV; - for (j = 0; j < dim_vec; j++) - { - q15_t inA = *pA++; - q7_t inB = *pB++; - ip_out += inA * inB; - } - *pO++ = (q15_t) __SSAT((ip_out >> out_shift), 16); - - rowCnt--; - } - -#endif /* ARM_MATH_DSP */ - - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); - -} - -/** - * @} end of FC group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15.c b/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15.c deleted file mode 100644 index 9a69a96..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15.c +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_fully_connected_q15.c - * Description: Q15 basic fully-connected layer function - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup FC - * @{ - */ - - /** - * @brief Q15 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * - * @details - * - * Buffer size: - * - * vec_buffer size: 0 - * - */ - -arm_status -arm_fully_connected_q15(const q15_t * pV, - const q15_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q15_t * bias, - q15_t * pOut, - q15_t * vec_buffer) -{ - (void)vec_buffer; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - const q15_t *pB = pM; - const q15_t *pB2 = pB + dim_vec; - q15_t *pO = pOut; - const q15_t *pA; - const q15_t *pBias = bias; - uint16_t rowCnt = num_of_rows >> 1; - - /* this loop loops over different output */ - while (rowCnt) { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 2; - - pA = pV; - pB2 = pB + dim_vec; - - while (colCnt) - { - q31_t inV1, inM1, inM2; - inV1 = arm_nn_read_q15x2_ia(&pA); - inM1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inV1, inM1, sum); - inM2 = arm_nn_read_q15x2_ia(&pB2); - sum2 = __SMLAD(inV1, inM2, sum2); - - inV1 = arm_nn_read_q15x2_ia(&pA); - inM1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inV1, inM1, sum); - inM2 = arm_nn_read_q15x2_ia(&pB2); - sum2 = __SMLAD(inV1, inM2, sum2); - - colCnt--; - } - colCnt = dim_vec & 0x3; - while (colCnt) - { - q15_t inV = *pA++; - q15_t inM = *pB++; - q15_t inM2 = *pB2++; - - sum += inV * inM; - sum2 += inV * inM2; - colCnt--; - } /* while over colCnt */ - *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); - *pO++ = (q15_t) (__SSAT((sum2>> out_shift), 16)); - - /* adjust the pointers and counters */ - pB = pB + dim_vec; - rowCnt --; - } - - rowCnt = num_of_rows & 0x1; - - while (rowCnt) { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 2; - - pA = pV; - - while (colCnt) { - q31_t inV1, inM1; - inV1 = arm_nn_read_q15x2_ia(&pA); - inM1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inV1, inM1, sum); - - inV1 = arm_nn_read_q15x2_ia(&pA); - inM1 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inV1, inM1, sum); - - colCnt--; - } - - /* left-over of the vector */ - colCnt = dim_vec & 0x3; - while(colCnt) { - q15_t inV = *pA++; - q15_t inM = *pB++; - - sum += inV * inM; - - colCnt--; - } - - *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); - - rowCnt --; - } - -#else - int i, j; - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - for (i = 0; i < num_of_rows; i++) - { - int ip_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); - for (j = 0; j < dim_vec; j++) - { - ip_out += pV[j] * pM[i * dim_vec + j]; - } - pOut[i] = (q15_t) __SSAT((ip_out >> out_shift), 16); - } - -#endif /* ARM_MATH_DSP */ - - /* Return to application */ - return (ARM_MATH_SUCCESS); - -} - -/** - * @} end of FC group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15_opt.c b/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15_opt.c deleted file mode 100644 index 72a39dd..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15_opt.c +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_fully_connected_q15_opt.c - * Description: Q15 opt fully-connected layer function - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup FC - * @{ - */ - - /** - * @brief Q15 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * - * @details - * - * Buffer size: - * - * vec_buffer size: 0 - * - * Here we use only one pointer to read 4 rows in the weight - * matrix. So if the original matrix looks like this: - * - * | a11 | a12 | a13 | - * - * | a21 | a22 | a23 | - * - * | a31 | a32 | a33 | - * - * | a41 | a42 | a43 | - * - * | a51 | a52 | a53 | - * - * | a61 | a62 | a63 | - * - * We operates on multiple-of-4 rows, so the first four rows becomes - * - * | a11 | a12 | a21 | a22 | a31 | a32 | a41 | a42 | - * - * | a13 | a23 | a33 | a43 | - * - * Remaining rows are kept the same original order. - * - * So the stored weight matrix looks like this: - * - * - * | a11 | a12 | a21 | a22 | a31 | a32 | a41 | a42 | - * - * | a13 | a23 | a33 | a43 | a51 | a52 | a53 | a61 | - * - * | a62 | a63 | - */ - -arm_status -arm_fully_connected_q15_opt(const q15_t * pV, - const q15_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q15_t * bias, - q15_t * pOut, - q15_t * vec_buffer) -{ - (void)vec_buffer; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - const q15_t *pB = pM; - q15_t *pO = pOut; - const q15_t *pBias = bias; - const q15_t *pA = pV; - - uint16_t rowCnt = num_of_rows >> 2; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 1; - - pA = pV; - -#ifdef USE_INTRINSIC - - while (colCnt) - { - q31_t inM11, inM12, inM13, inM14; - q31_t inV; - - inV = arm_nn_read_q15x2_ia(&pA); - inM11 = arm_nn_read_q15x2_ia(&pB); - sum = __SMLAD(inV, inM11, sum); - inM12 = arm_nn_read_q15x2_ia(&pB); - sum2 = __SMLAD(inV, inM12, sum2); - inM13 = arm_nn_read_q15x2_ia(&pB); - sum3 = __SMLAD(inV, inM13, sum3); - inM14 = arm_nn_read_q15x2_ia(&pB); - sum4 = __SMLAD(inV, inM14, sum4); - colCnt--; - } - -#else - - /* - * register needed: - * loop counter: colCnt - * accumulators: sum, sum2, sum3, sum4 - * pointers: pB, pA - * weight data: inM11, inM12, inM13, inM14 - * activation data: inV - */ - - asm volatile ("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #4\n" - "ldr.w r0, [%[pB]], #16\n" - "smlad %[sum], r4, r0, %[sum]\n" - "ldr.w r1, [%[pB] , #-12]\n" - "smlad %[sum2], r4, r1, %[sum2]\n" - "ldr.w r2, [%[pB] , #-8]\n" - "smlad %[sum3], r4, r2, %[sum3]\n" - "ldr.w r3, [%[pB] , #-4]\n" - "smlad %[sum4], r4, r3, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n":[sum] "+r"(sum), - [sum2] "+r"(sum2),[sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); - -#endif /* USE_INTRINSIC */ - - colCnt = dim_vec & 0x1; - while (colCnt) - { - - q15_t inV = *pA++; - q15_t inM = *pB++; - q15_t inM2 = *pB++; - q15_t inM3 = *pB++; - q15_t inM4 = *pB++; - - sum += inV * inM; - sum2 += inV * inM2; - sum3 += inV * inM3; - sum4 += inV * inM4; - colCnt--; - } /* while over colCnt */ - *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); - *pO++ = (q15_t) (__SSAT((sum2 >> out_shift), 16)); - *pO++ = (q15_t) (__SSAT((sum3 >> out_shift), 16)); - *pO++ = (q15_t) (__SSAT((sum4 >> out_shift), 16)); - - /* adjust the pointers and counters */ - rowCnt--; - } - - /* left-over part of the rows */ - rowCnt = num_of_rows & 0x3; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 2; - - pA = pV; - - while (colCnt) - { - q31_t inV1, inV2, inM1, inM2; - - inM1 = arm_nn_read_q15x2_ia(&pB); - inV1 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV1, inM1, sum); - - inM2 = arm_nn_read_q15x2_ia(&pB); - inV2 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV2, inM2, sum); - - colCnt--; - } - - /* left-over of the vector */ - colCnt = dim_vec & 0x3; - while (colCnt) - { - q15_t inV = *pA++; - q15_t inM = *pB++; - sum += inV * inM; - colCnt--; - } - - *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); - - rowCnt--; - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - uint16_t rowCnt = num_of_rows >> 2; - const q15_t *pB = pM; - const q15_t *pA; - q15_t *pO = pOut; - const q15_t *pBias = bias; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 1; - - pA = pV; - while (colCnt) - { - q15_t inA1 = *pA++; - q15_t inA2 = *pA++; - - q15_t inB1 = *pB++; - q15_t inB2 = *pB++; - sum += inA1 * inB1 + inA2 * inB2; - - inB1 = *pB++; - inB2 = *pB++; - sum2 += inA1 * inB1 + inA2 * inB2; - - inB1 = *pB++; - inB2 = *pB++; - sum3 += inA1 * inB1 + inA2 * inB2; - - inB1 = *pB++; - inB2 = *pB++; - sum4 += inA1 * inB1 + inA2 * inB2; - - colCnt--; - } - colCnt = dim_vec & 0x1; - while (colCnt) - { - q15_t inA = *pA++; - q15_t inB = *pB++; - sum += inA * inB; - inB = *pB++; - sum2 += inA * inB; - inB = *pB++; - sum3 += inA * inB; - inB = *pB++; - sum4 += inA * inB; - colCnt--; - } - *pO++ = (q15_t) __SSAT((sum >> out_shift), 16); - *pO++ = (q15_t) __SSAT((sum2 >> out_shift), 16); - *pO++ = (q15_t) __SSAT((sum3 >> out_shift), 16); - *pO++ = (q15_t) __SSAT((sum4 >> out_shift), 16); - - rowCnt--; - } - rowCnt = num_of_rows & 0x3; - - while (rowCnt) - { - int ip_out = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - int j; - - pA = pV; - for (j = 0; j < dim_vec; j++) - { - q15_t inA = *pA++; - q15_t inB = *pB++; - ip_out += inA * inB; - } - *pO++ = (q15_t) __SSAT((ip_out >> out_shift), 16); - - rowCnt--; - } - -#endif /* ARM_MATH_DSP */ - - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); - -} - -/** - * @} end of FC group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7.c b/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7.c deleted file mode 100644 index 8e499bc..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7.c +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_fully_connected_q7.c - * Description: Q7 basic fully-connected layer function - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup FC - * @{ - */ - - /** - * @brief Q7 basic fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * vec_buffer size: dim_vec - * - * This basic function is designed to work with regular weight - * matrix without interleaving. - * - */ - -arm_status -arm_fully_connected_q7(const q7_t * pV, - const q7_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, const q7_t * bias, q7_t * pOut, q15_t * vec_buffer) -{ - -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - const q7_t *pB = pM; - const q7_t *pB2; - q7_t *pO = pOut; - const q7_t *pBias = bias; - const q15_t *pA; - uint16_t rowCnt = num_of_rows >> 1; - - /* expand the vector into the buffer */ - arm_q7_to_q15_reordered_no_shift(pV, vec_buffer, dim_vec); - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - uint16_t colCnt = dim_vec >> 2; - - pA = vec_buffer; - pB2 = pB + dim_vec; - - while (colCnt) - { - q31_t inV, inM11, inM12, inM21, inM22; - pB = read_and_pad_reordered(pB, &inM11, &inM12); - pB2 = read_and_pad_reordered(pB2, &inM21, &inM22); - - inV = arm_nn_read_q15x2_ia(&pA); - - sum = __SMLAD(inV, inM11, sum); - sum2 = __SMLAD(inV, inM21, sum2); - - inV = arm_nn_read_q15x2_ia(&pA); - - sum = __SMLAD(inV, inM12, sum); - sum2 = __SMLAD(inV, inM22, sum2); - - colCnt--; - } - colCnt = dim_vec & 0x3; - while (colCnt) - { - q7_t inV = *pA++; - q15_t inM = *pB++; - q15_t inM2 = *pB2++; - - sum += inV * inM; - sum2 += inV * inM2; - colCnt--; - } /* while over colCnt */ - *pO++ = (q7_t) (__SSAT((sum >> out_shift), 8)); - *pO++ = (q7_t) (__SSAT((sum2 >> out_shift), 8)); - - /* adjust the pointers and counters */ - pB += dim_vec; - rowCnt--; - } - - /* left-over part of the rows */ - rowCnt = num_of_rows & 0x1; - - while (rowCnt) - { - uint16_t colCnt = dim_vec >> 2; - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - pA = vec_buffer; - - while (colCnt) - { - q31_t inV1, inV2, inM11, inM12; - - pB = read_and_pad_reordered(pB, &inM11, &inM12); - - inV1 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV1, inM11, sum); - - inV2 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV2, inM12, sum); - - colCnt--; - } - - /* left-over of the vector */ - colCnt = dim_vec & 0x3; - while (colCnt) - { - q7_t inV = *pA++; - q15_t inM = *pB++; - sum += inV * inM; - colCnt--; - } - - *pO++ = (q7_t) (__SSAT((sum >> out_shift), 8)); - - rowCnt--; - } - -#else - int i, j; - - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - for (i = 0; i < num_of_rows; i++) - { - int ip_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); - for (j = 0; j < dim_vec; j++) - { - ip_out += pV[j] * pM[i * dim_vec + j]; - } - pOut[i] = (q7_t) __SSAT((ip_out >> out_shift), 8); - } - -#endif /* ARM_MATH_DSP */ - - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); - -} - -/** - * @} end of FC group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7_opt.c b/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7_opt.c deleted file mode 100644 index 5ddf7ca..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7_opt.c +++ /dev/null @@ -1,484 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_fully_connected_q7_opt.c - * Description: Q7 basic fully-connected layer function - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup FC - * @{ - */ - - /** - * @brief Q7 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * vec_buffer size: dim_vec - * - * This opt function is designed to work with interleaved weight - * matrix. The vector input is assumed in q7_t format, we call - * arm_q7_to_q15_no_shift_shuffle function to expand into - * q15_t format with certain weight re-ordering, refer to the function - * comments for more details. - * Here we use only one pointer to read 4 rows in the weight - * matrix. So if the original q7_t matrix looks like this: - * - * | a11 | a12 | a13 | a14 | a15 | a16 | a17 | - * - * | a21 | a22 | a23 | a24 | a25 | a26 | a27 | - * - * | a31 | a32 | a33 | a34 | a35 | a36 | a37 | - * - * | a41 | a42 | a43 | a44 | a45 | a46 | a47 | - * - * | a51 | a52 | a53 | a54 | a55 | a56 | a57 | - * - * | a61 | a62 | a63 | a64 | a65 | a66 | a67 | - * - * - * We operates on multiple-of-4 rows, so the first four rows becomes - * - * | a11 | a21 | a13 | a23 | a31 | a41 | a33 | a43 | - * - * | a12 | a22 | a14 | a24 | a32 | a42 | a34 | a44 | - * - * | a15 | a25 | a35 | a45 | a16 | a26 | a36 | a46 | - * - * So within the kernel, we first read the re-ordered vector in as: - * - * | b1 | b3 | and | b2 | b4 | - * - * the four q31_t weights will look like - * - * | a11 | a13 |, | a21 | a23 |, | a31 | a33 |, | a41 | a43 | - * - * | a12 | a14 |, | a22 | a24 |, | a32 | a34 |, | a42 | a44 | - * - * The column left over will be in-order. - * which is: - * - * | a17 | a27 | a37 | a47 | - * - * For the left-over rows, we do 1x1 computation, so the data remains - * as its original order. - * - * So the stored weight matrix looks like this: - * - * | a11 | a21 | a13 | a23 | a31 | a41 | - * - * | a33 | a43 | a12 | a22 | a14 | a24 | - * - * | a32 | a42 | a34 | a44 | a15 | a25 | - * - * | a35 | a45 | a16 | a26 | a36 | a46 | - * - * | a17 | a27 | a37 | a47 | a51 | a52 | - * - * | a53 | a54 | a55 | a56 | a57 | a61 | - * - * | a62 | a63 | a64 | a65 | a66 | a67 | - * - * - */ - -arm_status -arm_fully_connected_q7_opt(const q7_t * pV, - const q7_t * pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t * bias, - q7_t * pOut, - q15_t * vec_buffer) -{ - -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - const q7_t *pB = pM; - q7_t *pO = pOut; - const q7_t *pBias = bias; - const q15_t *pA; - uint16_t rowCnt = num_of_rows >> 2; - - arm_q7_to_q15_reordered_no_shift(pV, vec_buffer, dim_vec); - - while (rowCnt) - { - - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 2; - - pA = vec_buffer; - -#ifdef USE_INTRINSIC - -#ifndef ARM_MATH_BIG_ENDIAN - while (colCnt) - { - q31_t inM11, inM12, inM13, inM14; - q31_t inV; - - inV = arm_nn_read_q15x2_ia(&pA); - inM11 = arm_nn_read_q7x4_ia(&pB); - inM12 = __SXTB16(__ROR(inM11, 8)); - inM11 = __SXTB16(inM11); - sum = __SMLAD(inM11, inV, sum); - sum2 = __SMLAD(inM12, inV, sum2); - inM13 = arm_nn_read_q7x4_ia(&pB); - inM14 = __SXTB16(__ROR(inM13, 8)); - inM13 = __SXTB16(inM13); - sum3 = __SMLAD(inM13, inV, sum3); - sum4 = __SMLAD(inM14, inV, sum4); - - inV = arm_nn_read_q15x2_ia(&pA); - inM11 = arm_nn_read_q7x4_ia(&pB); - inM12 = __SXTB16(__ROR(inM11, 8)); - inM11 = __SXTB16(inM11); - sum = __SMLAD(inM11, inV, sum); - sum2 = __SMLAD(inM12, inV, sum2); - inM13 = arm_nn_read_q7x4_ia(&pB); - inM14 = __SXTB16(__ROR(inM13, 8)); - inM13 = __SXTB16(inM13); - sum3 = __SMLAD(inM13, inV, sum3); - sum4 = __SMLAD(inM14, inV, sum4); - colCnt--; - } -#else - while (colCnt) - { - q31_t inM11, inM12, inM13, inM14; - q31_t inV; - - inV = arm_nn_read_q15x2_ia(&pA); - inM11 = arm_nn_read_q7x4_ia(&pB); - inM12 = __SXTB16(__ROR(inM11, 8)); - inM11 = __SXTB16(inM11); - sum = __SMLAD(inM12, inV, sum); - sum2 = __SMLAD(inM11, inV, sum2); - inM13 = arm_nn_read_q7x4_ia(&pB); - inM14 = __SXTB16(__ROR(inM13, 8)); - inM13 = __SXTB16(inM13); - sum3 = __SMLAD(inM14, inV, sum3); - sum4 = __SMLAD(inM13, inV, sum4); - - inV = arm_nn_read_q15x2_ia(&pA); - inM11 = arm_nn_read_q7x4_ia(&pB); - inM12 = __SXTB16(__ROR(inM11, 8)); - inM11 = __SXTB16(inM11); - sum = __SMLAD(inM12, inV, sum); - sum2 = __SMLAD(inM11, inV, sum2); - inM13 = arm_nn_read_q7x4_ia(&pB); - inM14 = __SXTB16(__ROR(inM13, 8)); - inM13 = __SXTB16(inM13); - sum3 = __SMLAD(inM14, inV, sum3); - sum4 = __SMLAD(inM13, inV, sum4); - colCnt--; - } -#endif /* ARM_MATH_BIG_ENDIAN */ - -#else - - /* - * register needed: - * loop counter: colCnt - * accumulators: sum, sum2, sum3, sum4 - * pointers: pB, pA - * weight data: inM11, inM12, inM13, inM14 - * activation data: inV - */ - -#ifndef ARM_MATH_BIG_ENDIAN - asm volatile ("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #8\n" - "ldr.w r1, [%[pB]], #16\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r1, %[sum]\n" - "smlad %[sum2], r4, r0, %[sum2]\n" - "ldr.w r3, [%[pB], #-12]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r3, %[sum3]\n" - "smlad %[sum4], r4, r2, %[sum4]\n" - "ldr.w r4, [%[pA], #-4]\n" - "ldr.w r1, [%[pB], #-8]\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r1, %[sum]\n" - "smlad %[sum2], r4, r0, %[sum2]\n" - "ldr.w r3, [%[pB], #-4]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r3, %[sum3]\n" - "smlad %[sum4], r4, r2, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n":[sum] "+r"(sum), - [sum2] "+r"(sum2),[sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); -#else - asm volatile ("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #8\n" - "ldr.w r1, [%[pB]], #16\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r0, %[sum]\n" - "smlad %[sum2], r4, r1, %[sum2]\n" - "ldr.w r3, [%[pB], #-12]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r2, %[sum3]\n" - "smlad %[sum4], r4, r3, %[sum4]\n" - "ldr.w r4, [%[pA], #-4]\n" - "ldr.w r1, [%[pB], #-8]\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r0, %[sum]\n" - "smlad %[sum2], r4, r1, %[sum2]\n" - "ldr.w r3, [%[pB], #-4]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r2, %[sum3]\n" - "smlad %[sum4], r4, r3, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n":[sum] "+r"(sum), - [sum2] "+r"(sum2),[sum3] "+r"(sum3), - [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); -#endif /* ARM_MATH_BIG_ENDIAN */ - -#endif /* USE_INTRINSIC */ - - colCnt = dim_vec & 0x3; - while (colCnt) - { - q15_t inV = *pA++; - q7_t inM = *pB++; - q7_t inM2 = *pB++; - q7_t inM3 = *pB++; - q7_t inM4 = *pB++; - - sum += inV * inM; - sum2 += inV * inM2; - sum3 += inV * inM3; - sum4 += inV * inM4; - colCnt--; - } /* while over colCnt */ - *pO++ = (q7_t) (__SSAT((sum >> out_shift), 8)); - *pO++ = (q7_t) (__SSAT((sum2 >> out_shift), 8)); - *pO++ = (q7_t) (__SSAT((sum3 >> out_shift), 8)); - *pO++ = (q7_t) (__SSAT((sum4 >> out_shift), 8)); - - /* adjust the pointers and counters */ - rowCnt--; - } - - /* left-over part of the rows */ - rowCnt = num_of_rows & 0x3; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - uint16_t colCnt = dim_vec >> 2; - - pA = vec_buffer; - - while (colCnt) - { - q31_t inV1, inV2, inM11, inM12; - - pB = read_and_pad_reordered(pB, &inM11, &inM12); - - inV1 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV1, inM11, sum); - - inV2 = arm_nn_read_q15x2_ia(&pA); - sum = __SMLAD(inV2, inM12, sum); - - colCnt--; - } - - /* left-over of the vector */ - colCnt = dim_vec & 0x3; - while (colCnt) - { - q15_t inV = *pA++; - q7_t inM = *pB++; - sum += inV * inM; - colCnt--; - } - - *pO++ = (q7_t) (__SSAT((sum >> out_shift), 8)); - - rowCnt--; - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - uint16_t rowCnt = num_of_rows >> 2; - const q7_t *pB = pM; - const q7_t *pA; - q7_t *pO = pOut; - const q7_t *pBias = bias; - - while (rowCnt) - { - q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - uint16_t colCnt = dim_vec >> 2; - - pA = pV; - - while (colCnt) - { - q7_t inA1 = *pA++; - q7_t inA3 = *pA++; - q7_t inA2 = *pA++; - q7_t inA4 = *pA++; - - q7_t inB1 = *pB++; - q7_t inB3 = *pB++; - q7_t inB2 = *pB++; - q7_t inB4 = *pB++; - - sum += inA1 * inB1 + inA2 * inB2; - sum2 += inA1 * inB3 + inA2 * inB4; - - inB1 = *pB++; - inB3 = *pB++; - inB2 = *pB++; - inB4 = *pB++; - - sum3 += inA1 * inB1 + inA2 * inB2; - sum4 += inA1 * inB3 + inA2 * inB4; - - inB1 = *pB++; - inB3 = *pB++; - inB2 = *pB++; - inB4 = *pB++; - - sum += inA3 * inB1 + inA4 * inB2; - sum2 += inA3 * inB3 + inA4 * inB4; - - inB1 = *pB++; - inB3 = *pB++; - inB2 = *pB++; - inB4 = *pB++; - - sum3 += inA3 * inB1 + inA4 * inB2; - sum4 += inA3 * inB3 + inA4 * inB4; - - colCnt--; - } - colCnt = dim_vec & 0x3; - while (colCnt) - { - q7_t inA = *pA++; - q7_t inB = *pB++; - sum += inA * inB; - inB = *pB++; - sum2 += inA * inB; - inB = *pB++; - sum3 += inA * inB; - inB = *pB++; - sum4 += inA * inB; - - colCnt--; - } - *pO++ = (q7_t) __SSAT((sum >> out_shift), 8); - *pO++ = (q7_t) __SSAT((sum2 >> out_shift), 8); - *pO++ = (q7_t) __SSAT((sum3 >> out_shift), 8); - *pO++ = (q7_t) __SSAT((sum4 >> out_shift), 8); - - rowCnt--; - } - - rowCnt = num_of_rows & 0x3; - - while (rowCnt) - { - int ip_out = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); - - int j; - - pA = pV; - for (j = 0; j < dim_vec; j++) - { - q7_t inA = *pA++; - q7_t inB = *pB++; - ip_out += inA * inB; - } - *pO++ = (q7_t) __SSAT((ip_out >> out_shift), 8); - - rowCnt--; - } - -#endif /* ARM_MATH_DSP */ - - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); - -} - -/** - * @} end of FC group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s8.c deleted file mode 100644 index eace95f..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s8.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_fully_connected_s8 - * Description: Fully connected function compatible with TF Lite. - * - * $Date: May 2, 2020 - * $Revision: V.2.0.0 - * - * Target Processor: Cortex-M and Cortex-A cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup FC - * @{ - */ - -/* - * S8 basic fully-connected and matrix multiplication layer function for TensorFlow Lite - * - * Refer header file for details. - * - */ - -arm_status -arm_fully_connected_s8(const cmsis_nn_context *ctx, - const cmsis_nn_fc_params *fc_params, - const cmsis_nn_per_tensor_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *kernel, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) -{ - (void)bias_dims; - (void)ctx; - int32_t batch_cnt = input_dims->n; - - while (batch_cnt) - { - arm_nn_vec_mat_mult_t_s8(input, - kernel, - bias, - output, - fc_params->input_offset, - fc_params->filter_offset, - fc_params->output_offset, - quant_params->multiplier, - quant_params->shift, - filter_dims->n, /* col_dim or accum_depth */ - output_dims->c, /* row_dim or output_depth */ - fc_params->activation.min, - fc_params->activation.max); - input += filter_dims->n; - output += output_dims->c; - batch_cnt--; - } - return (ARM_MATH_SUCCESS); -} - -int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims) -{ - (void)filter_dims; - return 0; -} - -/** - * @} end of FC group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c deleted file mode 100755 index c302bd7..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_accumulate_q7_to_q15.c - * Description: Accumulate q7 vector into q15 one. - * - * $Date: May 29, 2020 - * $Revision: V.1.0.1 - * - * pSrc Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -void arm_nn_accumulate_q7_to_q15(q15_t *pDst, const q7_t *pSrc, uint32_t length) -{ - q15_t *pCnt = pDst; - const q7_t *pV = pSrc; - q31_t v1, v2, vo1, vo2; - int32_t cnt = length >> 2; - q31_t in; - - while (cnt > 0l) - { - q31_t value = arm_nn_read_q7x4_ia(&pV); - v1 = __SXTB16(__ROR((uint32_t)value, 8)); - v2 = __SXTB16(value); -#ifndef ARM_MATH_BIG_ENDIAN - vo2 = (q31_t)__PKHTB(v1, v2, 16); - vo1 = (q31_t)__PKHBT(v2, v1, 16); -#else - vo1 = (q31_t)__PKHTB(v1, v2, 16); - vo2 = (q31_t)__PKHBT(v2, v1, 16); -#endif - - in = arm_nn_read_q15x2(pCnt); - write_q15x2_ia(&pCnt, __QADD16(vo1, in)); - - in = arm_nn_read_q15x2(pCnt); - write_q15x2_ia(&pCnt, __QADD16(vo2, in)); - - cnt--; - } - cnt = length & 0x3; - while (cnt > 0l) - { - *pCnt++ += *pV++; - cnt--; - } -} - -/** - * @} end of NNBasicMath group - */ \ No newline at end of file diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c deleted file mode 100644 index 331a2d7..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_add_q7.c - * Description: Non saturating addition of elements of a q7 vector. - * - * $Date: July 2019 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size) -{ - uint32_t block_count; - q31_t result = 0; -#if defined(ARM_MATH_DSP) - /* Loop unrolling: Compute 4 outputs at a time */ - block_count = block_size >> 2U; - - while (block_count > 0U) - { - const int32_t mult_q15x2 = (1UL << 16) | 1UL; - q31_t in_q7x4 = arm_nn_read_q7x4_ia(&input); - q31_t temp_q15x2 = __SXTAB16(__SXTB16(in_q7x4), - __ROR((uint32_t)in_q7x4, 8)); - - result = __SMLAD(temp_q15x2, mult_q15x2, result); - - /* Decrement loop counter */ - block_count--; - } - - /* Loop unrolling: Compute remaining outputs */ - block_count = block_size & 0x3; -#else - block_count = block_size; -#endif - while (block_count > 0U) - { - /* Add and store result in destination buffer. */ - result += *input++; - - /* Decrement loop counter */ - block_count--; - } - - *output = result; -} - -/** - * @} end of NNBasicMath group - */ \ No newline at end of file diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c deleted file mode 100644 index 3ecd8e5..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_depthwise_conv_nt_t_padded_s8.c - * Description: Depthwise convolution with padded matrices. - * - * $Date: March 17, 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M processors with MVE extension - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -/* - * Depthwise convolution of transposed rhs matrix with 4 lhs matrices. One or more of the rhs matrices are padded. - * Dimensions are the same for lhs and rhs. - * - * Refer header file for details. - * - */ - -q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, - const q7_t *rhs, - const int32_t input_offset, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t row_x_col, - const int32_t *const output_bias, - q7_t *out) -{ -#if defined(ARM_MATH_MVEI) - int32_t loop_count = (num_ch + 3) / 4; - const int32_t *bias = output_bias; - uint32_t num_ch_to_process = num_ch; - - for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; - num_ch_to_process -= 4, out += 4, offset += 4, i_loop_cnt++) - { - int32x4_t out_0 = vldrwq_s32(bias); - int32x4_t out_1 = out_0; - int32x4_t out_2 = out_0; - int32x4_t out_3 = out_0; - bias += 4; - - const int8_t *rhs_0 = rhs + offset; - const int8_t *lhs_0 = lhs + offset; - const int8_t *lhs_1 = lhs + row_x_col * num_ch + offset; - const int8_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset; - const int8_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset; - - for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) - { - const int32x4_t ker_0 = vldrbq_s32(rhs_0); - - int32x4_t ip_0 = vldrbq_s32(lhs_0); - ip_0 = vaddq_n_s32(ip_0, input_offset); - out_0 += vmulq_s32(ip_0, ker_0); - - int32x4_t ip_1 = vldrbq_s32(lhs_1); - ip_1 = vaddq_n_s32(ip_1, input_offset); - out_1 += vmulq_s32(ip_1, ker_0); - - int32x4_t ip_2 = vldrbq_s32(lhs_2); - ip_2 = vaddq_n_s32(ip_2, input_offset); - out_2 += vmulq_s32(ip_2, ker_0); - - int32x4_t ip_3 = vldrbq_s32(lhs_3); - ip_3 = vaddq_n_s32(ip_3, input_offset); - - out_3 += vmulq_s32(ip_3, ker_0); - - lhs_0 += num_ch; - lhs_1 += num_ch; - lhs_2 += num_ch; - lhs_3 += num_ch; - - rhs_0 += num_ch; - } - - const int32x4_t mult = vldrwq_s32(out_mult); - const int32x4_t shift = vldrwq_s32(out_shift); - out_mult += 4; - out_shift += 4; - - out_0 = arm_requantize_mve_32x4(out_0, mult, shift); - out_0 = vaddq_n_s32(out_0, out_offset); - out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); - out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); - mve_pred16_t p = vctp32q(num_ch_to_process); - vstrbq_p_s32(out, out_0, p); - - out_1 = arm_requantize_mve_32x4(out_1, mult, shift); - out_1 = vaddq_n_s32(out_1, out_offset); - out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); - out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + num_ch, out_1, p); - - out_2 = arm_requantize_mve_32x4(out_2, mult, shift); - out_2 = vaddq_n_s32(out_2, out_offset); - out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); - out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + 2 * num_ch, out_2, p); - - out_3 = arm_requantize_mve_32x4(out_3, mult, shift); - out_3 = vaddq_n_s32(out_3, out_offset); - out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); - out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + 3 * num_ch, out_3, p); - } - - const int tail_ch = num_ch & 0x3; - if (tail_ch != 0) - { - out -= (4 - tail_ch); - } - return out + (3 * num_ch); - -#else - (void)lhs; - (void)rhs; - (void)input_offset; - (void)num_ch; - (void)out_shift; - (void)out_mult; - (void)out_offset; - (void)activation_min; - (void)activation_max; - (void)row_x_col; - (void)output_bias; - (void)out; - return NULL; -#endif -} - -/** - * @} end of NNBasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c deleted file mode 100644 index 1bff9c4..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_depthwise_conv_nt_t_s8.c - * Description: Depthwise convolution on matrices with no padding. - * - * $Date: March 17, 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M processors with MVE extension. - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -/* - * Depthwise convolution of rhs matrix with 4 lhs matrices with no padding. Dimensions are the same for lhs and rhs. - * - * Refer header file for details. - * - */ - -q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, - const q7_t *rhs, - const int32_t input_offset, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t row_x_col, - const int32_t *const output_bias, - q7_t *out) -{ -#if defined(ARM_MATH_MVEI) - const int32_t *bias = output_bias; - int32_t loop_count = (num_ch + 3) / 4; - uint32_t num_ch_to_process = num_ch; - - for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; - num_ch_to_process -= 4, offset += 4, out += 4, i_loop_cnt++) - { - int32x4_t out_0 = vldrwq_s32(bias); - int32x4_t out_1 = out_0; - int32x4_t out_2 = out_0; - int32x4_t out_3 = out_0; - bias += 4; - - const int8_t *rhs_0 = rhs + offset; - const int8_t *lhs_0 = lhs + offset; - const int8_t *lhs_1 = lhs + row_x_col * num_ch + offset; - const int8_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset; - const int8_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset; - int32x4_t ker_sum = vdupq_n_s32(0); - - for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) - { - const int32x4_t ker_0 = vldrbq_s32(rhs_0); - ker_sum = vaddq_s32(ker_sum, ker_0); - - int32x4_t ip_0 = vldrbq_s32(lhs_0); - out_0 += vmulq_s32(ip_0, ker_0); - - int32x4_t ip_1 = vldrbq_s32(lhs_1); - out_1 += vmulq_s32(ip_1, ker_0); - - int32x4_t ip_2 = vldrbq_s32(lhs_2); - out_2 += vmulq_s32(ip_2, ker_0); - - int32x4_t ip_3 = vldrbq_s32(lhs_3); - out_3 += vmulq_s32(ip_3, ker_0); - - lhs_0 += num_ch; - lhs_1 += num_ch; - lhs_2 += num_ch; - lhs_3 += num_ch; - - rhs_0 += num_ch; - } - - ker_sum = vmulq_n_s32(ker_sum, input_offset); - out_0 = ker_sum + out_0; - out_1 = ker_sum + out_1; - out_2 = ker_sum + out_2; - out_3 = ker_sum + out_3; - - const int32x4_t mult = vldrwq_s32(out_mult); - const int32x4_t shift = vldrwq_s32(out_shift); - out_mult += 4; - out_shift += 4; - mve_pred16_t p = vctp32q(num_ch_to_process); - - out_0 = arm_requantize_mve_32x4(out_0, mult, shift); - out_0 = vaddq_n_s32(out_0, out_offset); - out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); - out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out, out_0, p); - - out_1 = arm_requantize_mve_32x4(out_1, mult, shift); - out_1 = vaddq_n_s32(out_1, out_offset); - out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); - out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + num_ch, out_1, p); - - out_2 = arm_requantize_mve_32x4(out_2, mult, shift); - out_2 = vaddq_n_s32(out_2, out_offset); - out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); - out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + 2 * num_ch, out_2, p); - - out_3 = arm_requantize_mve_32x4(out_3, mult, shift); - out_3 = vaddq_n_s32(out_3, out_offset); - out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); - out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + 3 * num_ch, out_3, p); - } - - const int tail_ch = num_ch & 0x3; - if (tail_ch != 0) - { - out -= (4 - tail_ch); - } - - return out + (3 * num_ch); -#else - (void)lhs; - (void)rhs; - (void)input_offset; - (void)num_ch; - (void)out_shift; - (void)out_mult; - (void)out_offset; - (void)activation_min; - (void)activation_max; - (void)row_x_col; - (void)output_bias; - (void)out; - return NULL; -#endif -} - -/** - * @} end of NNBasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c deleted file mode 100644 index 6077658..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mul_core_1x_s8.c - * Description: General Matrix-multiplication function - * - * $Date: January 20, 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -/* - * s8 matrix multiplication to process 1 row - * - * Refer header file for details. - * - */ - -arm_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, - const int8_t *row_base, - const int8_t *col_base, - int32_t *const sum_col, - int32_t *const output) -{ - int32_t acc_n0 = 0; - int32_t sum_tmp = 0; - -#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) - - __asm volatile ( - " vldrb.8 q0, [%[col]], 16 \n" - " wlstp.8 lr, %[cnt], 1f \n" - "2: \n" - " vaddva.s8 %[sum], q0 \n" - " vldrb.8 q1, [%[row0]], 16 \n" - " vmladava.s8 %[out0], q0, q1 \n" - " vldrb.8 q0, [%[col]], 16 \n" - " letp lr, 2b \n" - "1: \n" - :[col] "+r"(col_base) - ,[sum] "+Te"(sum_tmp) - ,[row0] "+r"(row_base) - ,[out0] "+Te"(acc_n0) - :[cnt] "r"(row_elements) - :"q0","q1", "memory", "r14"); -#else - for (int i = 0; i < row_elements; i++) - { - sum_tmp += col_base[i]; - acc_n0 += row_base[i] * col_base[i]; - } -#endif - - *sum_col = sum_tmp; - *output = acc_n0; - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNBasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c deleted file mode 100644 index 39ad529..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mul_core_4x_s8.c - * Description: General matrix multiplication function for MVE extension - * - * $Date: January 20, 2020 - * $Revision: V.2.0.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -/* - * s8 matrix multiplication to process 4 rows and one column - * - * Refer header file for details. - * - */ -arm_status arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, - const int32_t offset, - const int8_t *row_base, - const int8_t *col_base, - int32_t *const sum_col, - int32_t *const output) -{ - int32_t acc_n0 = 0; - int32_t acc_n1 = 0; - int32_t acc_n2 = 0; - int32_t acc_n3 = 0; - - const int8_t *ip_row_0 = row_base; - const int8_t *ip_row_1 = row_base + offset; - const int8_t *ip_row_2 = row_base + (2 * offset); - const int8_t *ip_row_3 = row_base + (3 * offset); - int32_t sum_tmp = 0; - -#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) - __asm volatile( - " vldrb.8 q0, [%[col]], 16 \n" - " wlstp.8 lr, %[cnt], 1f \n" - "2: \n" - " vaddva.s8 %[sum], q0 \n" - " vldrb.8 q1, [%[row0]], 16 \n" - " vmladava.s8 %[out0], q0, q1 \n" - " vldrb.8 q2, [%[row1]], 16 \n" - " vmladava.s8 %[out1], q0, q2 \n" - " vldrb.8 q3, [%[row2]], 16 \n" - " vmladava.s8 %[out2], q0, q3 \n" - " vldrb.8 q4, [%[row3]], 16 \n" - " vmladava.s8 %[out3], q0, q4 \n" - " vldrb.8 q0, [%[col]], 16 \n" - " letp lr, 2b \n" - "1: \n" - :[col] "+r"(col_base) - ,[sum] "+Te"(sum_tmp) - ,[row0] "+r"(ip_row_0) - ,[row1] "+r"(ip_row_1) - ,[row2] "+r"(ip_row_2) - ,[row3] "+r"(ip_row_3) - ,[out0] "+Te"(acc_n0) - ,[out1] "+Te"(acc_n1) - ,[out2] "+Te"(acc_n2) - ,[out3] "+Te"(acc_n3) - : [cnt] "r"(row_elements) - : "q0", "q1", "q2", "q3", "q4", "memory", "r14"); -#else - for (int i = 0; i < row_elements; i++) - { - int32_t col = col_base[i]; - sum_tmp += col; - acc_n0 += ip_row_0[i] * col; - acc_n1 += ip_row_1[i] * col; - acc_n2 += ip_row_2[i] * col; - acc_n3 += ip_row_3[i] * col; - } -#endif - output[0] = acc_n0; - output[1] = acc_n1; - output[2] = acc_n2; - output[3] = acc_n3; - - *sum_col = sum_tmp; - - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNBasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c deleted file mode 100644 index 6ad8999..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c +++ /dev/null @@ -1,584 +0,0 @@ -/* - * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mult_s8_nt_t_s8 - * Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed - * - * $Date: July 27 2020 - * $Revision: V.1.0.2 - * - * Target Processor: Cortex-M - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -/* - * s8 matrix multiplication with the right-hand-side matrix transposed - * - * Refer header file for details. - * - */ -arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, - const q7_t *rhs, - const q31_t *bias, - q7_t *dst, - const int32_t *dst_multipliers, - const int32_t *dst_shifts, - const int32_t lhs_rows, - const int32_t rhs_rows, - const int32_t rhs_cols, - const int32_t lhs_offset, - const int32_t dst_offset, - const int32_t activation_min, - const int32_t activation_max) -{ -#if defined(ARM_MATH_DSP) - const int32_t off0 = rhs_cols - 4; - - for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) - { - const q7_t *lhs_ptr = &lhs[0]; - q7_t *dst_ptr = &dst[0]; - - q31_t lhs_offset_contribution0 = 0; - q31_t lhs_offset_contribution1 = 0; - - for (int32_t x = 0; x < rhs_cols; ++x) - { - lhs_offset_contribution0 += rhs[x]; - lhs_offset_contribution1 += rhs[x + rhs_cols]; - } - - lhs_offset_contribution0 *= lhs_offset; - lhs_offset_contribution1 *= lhs_offset; - if (bias) - { - lhs_offset_contribution0 += bias[rhs_rows_idx]; - lhs_offset_contribution1 += bias[rhs_rows_idx + 1]; - } - - int32_t lhs_rows_idx = lhs_rows >> 1; - - while (lhs_rows_idx) - { - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = lhs_offset_contribution0; - q31_t res01 = lhs_offset_contribution1; - q31_t res10 = lhs_offset_contribution0; - q31_t res11 = lhs_offset_contribution1; - - int32_t rhs_cols_idx = 0; - - q31_t val0, val1, val2, val3, val4, val5; - - for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) - { - val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val2 = __SXTB16(val1); - val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val3 = __SXTB16(val0); - val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); - val1 = __SXTB16_RORn(val1, 8); - val0 = __SXTB16_RORn(val0, 8); - - // 4 x MAC res00, res01 - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTB16(val4); - res00 = __SMLAD(val0, val1, res00); - val4 = __SXTB16_RORn(val4, 8); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val0, val4, res01); - - // 4 x MAC res10, res11 - val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]); - val3 = __SXTB16(val0); - val0 = __SXTB16_RORn(val0, 8); - res10 = __SMLAD(val3, val2, res10); - res11 = __SMLAD(val3, val5, res11); - res10 = __SMLAD(val0, val1, res10); - val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res11 = __SMLAD(val0, val4, res11); - - val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); - val2 = __SXTB16(val1); - val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val3 = __SXTB16(val0); - val1 = __SXTB16_RORn(val1, 8); - val0 = __SXTB16_RORn(val0, 8); - - // 4 x MAC res00, res01 - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTB16(val4); - res00 = __SMLAD(val0, val1, res00); - val4 = __SXTB16_RORn(val4, 8); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val0, val4, res01); - - // 4 x MAC res10, res11 - val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]); - val3 = __SXTB16(val0); - val0 = __SXTB16_RORn(val0, 8); - res10 = __SMLAD(val3, val2, res10); - res11 = __SMLAD(val3, val5, res11); - res10 = __SMLAD(val0, val1, res10); - val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res11 = __SMLAD(val0, val4, res11); - - val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); - val2 = __SXTB16(val1); - val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val3 = __SXTB16(val0); - val1 = __SXTB16_RORn(val1, 8); - val0 = __SXTB16_RORn(val0, 8); - - // 4 x MAC res00, res01 - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTB16(val4); - res00 = __SMLAD(val0, val1, res00); - val4 = __SXTB16_RORn(val4, 8); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val0, val4, res01); - - // 4 x MAC res10, res11 - val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]); - val3 = __SXTB16(val0); - val0 = __SXTB16_RORn(val0, 8); - res10 = __SMLAD(val3, val2, res10); - res11 = __SMLAD(val3, val5, res11); - res10 = __SMLAD(val0, val1, res10); - val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res11 = __SMLAD(val0, val4, res11); - - val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); - val2 = __SXTB16(val1); - val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val3 = __SXTB16(val0); - val1 = __SXTB16_RORn(val1, 8); - val0 = __SXTB16_RORn(val0, 8); - - // 4 x MAC res00, res01 - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTB16(val4); - res00 = __SMLAD(val0, val1, res00); - val4 = __SXTB16_RORn(val4, 8); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val0, val4, res01); - - // 4 x MAC res10, res11 - val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]); - val3 = __SXTB16(val0); - val0 = __SXTB16_RORn(val0, 8); - res10 = __SMLAD(val3, val2, res10); - res11 = __SMLAD(val3, val5, res11); - res10 = __SMLAD(val0, val1, res10); - res11 = __SMLAD(val0, val4, res11); - } - - for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q7_t rhs_value0 = rhs_ptr[0]; - q7_t rhs_value1 = rhs_ptr[rhs_cols]; - q7_t lhs_value = lhs_ptr[0]; - - res00 += lhs_value * rhs_value0; - res01 += lhs_value * rhs_value1; - - lhs_value = lhs_ptr[rhs_cols]; - res10 += lhs_value * rhs_value0; - res11 += lhs_value * rhs_value1; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); - res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); - res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); - res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); - - // Add offset - res00 += dst_offset; - res01 += dst_offset; - res10 += dst_offset; - res11 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - res01 = MAX(res01, activation_min); - res01 = MIN(res01, activation_max); - res10 = MAX(res10, activation_min); - res10 = MIN(res10, activation_max); - res11 = MAX(res11, activation_min); - res11 = MIN(res11, activation_max); - - dst_ptr[0] = (q7_t)res00; - dst_ptr[1] = (q7_t)res01; - dst_ptr += rhs_rows; - dst_ptr[0] = (q7_t)res10; - dst_ptr[1] = (q7_t)res11; - dst_ptr += rhs_rows; - - lhs_ptr += rhs_cols; - - lhs_rows_idx--; - } - - // Left-over rows - if (lhs_rows % 2) - { - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = lhs_offset_contribution0; - q31_t res01 = lhs_offset_contribution1; - - int32_t rhs_cols_idx = 0; - - q31_t val0, val1, val2, val3, val4, val5; - for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) - { - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val3 = __SXTB16(val0); - val5 = __SXTB16(val2); - val4 = __SXTB16(val1); - val0 = __SXTB16_RORn(val0, 8); - val2 = __SXTB16_RORn(val2, 8); - val1 = __SXTB16_RORn(val1, 8); - - // 4 x MAC res00, res01 - res00 = __SMLAD(val5, val3, res00); - res00 = __SMLAD(val2, val0, res00); - res01 = __SMLAD(val5, val4, res01); - res01 = __SMLAD(val2, val1, res01); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val3 = __SXTB16(val0); - val5 = __SXTB16(val2); - val4 = __SXTB16(val1); - val0 = __SXTB16_RORn(val0, 8); - val2 = __SXTB16_RORn(val2, 8); - val1 = __SXTB16_RORn(val1, 8); - - // 4 x MAC res00, res01 - res00 = __SMLAD(val5, val3, res00); - res00 = __SMLAD(val2, val0, res00); - res01 = __SMLAD(val5, val4, res01); - res01 = __SMLAD(val2, val1, res01); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val3 = __SXTB16(val0); - val5 = __SXTB16(val2); - val4 = __SXTB16(val1); - val0 = __SXTB16_RORn(val0, 8); - val2 = __SXTB16_RORn(val2, 8); - val1 = __SXTB16_RORn(val1, 8); - - // 4 x MAC res00, res01 - res00 = __SMLAD(val5, val3, res00); - res00 = __SMLAD(val2, val0, res00); - res01 = __SMLAD(val5, val4, res01); - res01 = __SMLAD(val2, val1, res01); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val3 = __SXTB16(val0); - val5 = __SXTB16(val2); - val4 = __SXTB16(val1); - val0 = __SXTB16_RORn(val0, 8); - val2 = __SXTB16_RORn(val2, 8); - val1 = __SXTB16_RORn(val1, 8); - - // 4 x MAC res00, res01 - res00 = __SMLAD(val5, val3, res00); - res00 = __SMLAD(val2, val0, res00); - res01 = __SMLAD(val5, val4, res01); - res01 = __SMLAD(val2, val1, res01); - } - - // Left-over accumulations - for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q7_t rhs_value0 = rhs_ptr[0]; - q7_t rhs_value1 = rhs_ptr[rhs_cols]; - q7_t lhs_value = lhs_ptr[0]; - - res00 += lhs_value * rhs_value0; - res01 += lhs_value * rhs_value1; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); - res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); - - // Add offset - res00 += dst_offset; - res01 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - res01 = MAX(res01, activation_min); - res01 = MIN(res01, activation_max); - - dst_ptr[0] = (q7_t)res00; - dst_ptr[1] = (q7_t)res01; - } - - rhs += 2 * rhs_cols; - dst += 2; - } - - if (rhs_rows % 2) - { - const q7_t *lhs_ptr = &lhs[0]; - q7_t *dst_ptr = &dst[0]; - - for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx) - { - const q7_t *rhs_ptr = &rhs[0]; - q31_t res00 = 0; - if (bias) - { - res00 = bias[rhs_rows - 1]; - } - - for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q31_t rhs_value = rhs_ptr[0]; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; - - res00 += lhs_value * rhs_value; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows - 1], dst_shifts[rhs_rows - 1]); - - // Add offset - res00 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - - dst_ptr[0] = (q7_t)res00; - dst_ptr += rhs_rows; - } - } -#else - for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) - { - const q7_t *lhs_ptr = &lhs[0]; - q7_t *dst_ptr = &dst[0]; - - q31_t lhs_offset_contribution0 = 0; - q31_t lhs_offset_contribution1 = 0; - - for (int32_t x = 0; x < rhs_cols; ++x) - { - lhs_offset_contribution0 += rhs[x]; - lhs_offset_contribution1 += rhs[x + rhs_cols]; - } - - lhs_offset_contribution0 *= lhs_offset; - lhs_offset_contribution1 *= lhs_offset; - if (bias) - { - lhs_offset_contribution0 += bias[rhs_rows_idx]; - lhs_offset_contribution1 += bias[rhs_rows_idx + 1]; - } - - int32_t lhs_rows_idx = lhs_rows >> 1; - - while (lhs_rows_idx) - { - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = lhs_offset_contribution0; - q31_t res01 = lhs_offset_contribution1; - q31_t res10 = lhs_offset_contribution0; - q31_t res11 = lhs_offset_contribution1; - - for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q7_t rhs_value0 = rhs_ptr[0]; - q7_t rhs_value1 = rhs_ptr[rhs_cols]; - q7_t lhs_value = lhs_ptr[0]; - - res00 += lhs_value * rhs_value0; - res01 += lhs_value * rhs_value1; - - lhs_value = lhs_ptr[rhs_cols]; - res10 += lhs_value * rhs_value0; - res11 += lhs_value * rhs_value1; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); - res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); - res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); - res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); - - // Add offset - res00 += dst_offset; - res01 += dst_offset; - res10 += dst_offset; - res11 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - res01 = MAX(res01, activation_min); - res01 = MIN(res01, activation_max); - res10 = MAX(res10, activation_min); - res10 = MIN(res10, activation_max); - res11 = MAX(res11, activation_min); - res11 = MIN(res11, activation_max); - - dst_ptr[0] = (q7_t)res00; - dst_ptr[1] = (q7_t)res01; - dst_ptr += rhs_rows; - dst_ptr[0] = (q7_t)res10; - dst_ptr[1] = (q7_t)res11; - dst_ptr += rhs_rows; - - lhs_ptr += rhs_cols; - - lhs_rows_idx--; - } - - // Left-over rows - if (lhs_rows % 2) - { - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = lhs_offset_contribution0; - q31_t res01 = lhs_offset_contribution1; - - for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q7_t rhs_value0 = rhs_ptr[0]; - q7_t rhs_value1 = rhs_ptr[rhs_cols]; - q7_t lhs_value = lhs_ptr[0]; - - res00 += lhs_value * rhs_value0; - res01 += lhs_value * rhs_value1; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); - res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); - - // Add offset - res00 += dst_offset; - res01 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - res01 = MAX(res01, activation_min); - res01 = MIN(res01, activation_max); - - dst_ptr[0] = (q7_t)res00; - dst_ptr[1] = (q7_t)res01; - } - - rhs += 2 * rhs_cols; - dst += 2; - } - - if (rhs_rows % 2) - { - const q7_t *lhs_ptr = &lhs[0]; - q7_t *dst_ptr = &dst[0]; - - for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx) - { - const q7_t *rhs_ptr = &rhs[0]; - q31_t res00 = 0; - if (bias) - { - res00 = bias[rhs_rows - 1]; - } - - for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q31_t rhs_value = rhs_ptr[0]; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; - - res00 += lhs_value * rhs_value; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows - 1], dst_shifts[rhs_rows - 1]); - - // Add offset - res00 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - - dst_ptr[0] = (q7_t)res00; - dst_ptr += rhs_rows; - } - } -#endif - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNBasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c deleted file mode 100644 index 8373660..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mult_q15.c - * Description: Q15 vector multiplication with variable output shifts - * - * $Date: 29. April 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - - -/** - * @brief Q7 vector multiplication with variable output shifts - * @param[in] *pSrcA pointer to the first input vector - * @param[in] *pSrcB pointer to the second input vector - * @param[out] *pDst pointer to the output vector - * @param[in] out_shift amount of right-shift for output - * @param[in] blockSize number of samples in each vector - * - * Scaling and Overflow Behavior: - * \par - * The function uses saturating arithmetic. - * Results outside of the allowable Q15 range [0x8000 0x7FFF] will be saturated. - */ - -void arm_nn_mult_q15( - q15_t * pSrcA, - q15_t * pSrcB, - q15_t * pDst, - const uint16_t out_shift, - uint32_t blockSize) -{ - uint32_t blkCnt; /* loop counters */ - -#if defined (ARM_MATH_DSP) - -/* Run the below code for Cortex-M4 and Cortex-M3 */ - q31_t inA1, inA2, inB1, inB2; /* temporary input variables */ - q15_t out1, out2, out3, out4; /* temporary output variables */ - q31_t mul1, mul2, mul3, mul4; /* temporary variables */ - - /* loop Unrolling */ - blkCnt = blockSize >> 2U; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. - ** a second loop below computes the remaining 1 to 3 samples. */ - while (blkCnt > 0U) - { - /* read two samples at a time from sourceA */ - inA1 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcA); - /* read two samples at a time from sourceB */ - inB1 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcB); - /* read two samples at a time from sourceA */ - inA2 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcA); - /* read two samples at a time from sourceB */ - inB2 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcB); - - /* multiply mul = sourceA * sourceB */ - mul1 = (q31_t) ((q15_t) (inA1 >> 16) * (q15_t) (inB1 >> 16)); - mul2 = (q31_t) ((q15_t) inA1 * (q15_t) inB1); - mul3 = (q31_t) ((q15_t) (inA2 >> 16) * (q15_t) (inB2 >> 16)); - mul4 = (q31_t) ((q15_t) inA2 * (q15_t) inB2); - - /* saturate result to 16 bit */ - out1 = (q15_t) __SSAT((q31_t) (mul1 + NN_ROUND(out_shift)) >> out_shift, 16); - out2 = (q15_t) __SSAT((q31_t) (mul2 + NN_ROUND(out_shift)) >> out_shift, 16); - out3 = (q15_t) __SSAT((q31_t) (mul3 + NN_ROUND(out_shift)) >> out_shift, 16); - out4 = (q15_t) __SSAT((q31_t) (mul4 + NN_ROUND(out_shift)) >> out_shift, 16); - - /* store the result */ -#ifndef ARM_MATH_BIG_ENDIAN - - *__SIMD32(pDst)++ = __PKHBT(out2, out1, 16); - *__SIMD32(pDst)++ = __PKHBT(out4, out3, 16); - -#else - - *__SIMD32(pDst)++ = __PKHBT(out2, out1, 16); - *__SIMD32(pDst)++ = __PKHBT(out4, out3, 16); - -#endif /* #ifndef ARM_MATH_BIG_ENDIAN */ - - /* Decrement the blockSize loop counter */ - blkCnt--; - } - - /* If the blockSize is not a multiple of 4, compute any remaining output samples here. - ** No loop unrolling is used. */ - blkCnt = blockSize % 0x4U; - -#else - - /* Run the below code for Cortex-M0 */ - - /* Initialize blkCnt with number of samples */ - blkCnt = blockSize; - -#endif /* #if defined (ARM_MATH_DSP) */ - - - while (blkCnt > 0U) - { - /* C = A * B */ - /* Multiply the inputs and store the result in the destination buffer */ - *pDst++ = (q15_t) __SSAT(((q31_t) ((q31_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 16); - - /* Decrement the blockSize loop counter */ - blkCnt--; - } -} - -/** - * @} end of NNBasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c deleted file mode 100644 index cdb5385..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mult_q7.c - * Description: Q7 vector multiplication with variable output shifts - * - * $Date: 29. April 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -/** - * @brief Q7 vector multiplication with variable output shifts - * @param[in] *pSrcA pointer to the first input vector - * @param[in] *pSrcB pointer to the second input vector - * @param[out] *pDst pointer to the output vector - * @param[in] out_shift amount of right-shift for output - * @param[in] blockSize number of samples in each vector - * - * Scaling and Overflow Behavior: - * \par - * The function uses saturating arithmetic. - * Results outside of the allowable Q7 range [0x80 0x7F] will be saturated. - */ - -void arm_nn_mult_q7( - q7_t * pSrcA, - q7_t * pSrcB, - q7_t * pDst, - const uint16_t out_shift, - uint32_t blockSize) -{ - uint32_t blkCnt; /* loop counters */ - -#if defined (ARM_MATH_DSP) - -/* Run the below code for Cortex-M4 and Cortex-M3 */ - q7_t out1, out2, out3, out4; /* Temporary variables to store the product */ - - /* loop Unrolling */ - blkCnt = blockSize >> 2U; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. - ** a second loop below computes the remaining 1 to 3 samples. */ - while (blkCnt > 0U) - { - /* C = A * B */ - /* Multiply the inputs and store the results in temporary variables */ - out1 = (q7_t) __SSAT(((q15_t) ((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - out2 = (q7_t) __SSAT(((q15_t) ((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - out3 = (q7_t) __SSAT(((q15_t) ((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - out4 = (q7_t) __SSAT(((q15_t) ((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - - /* Store the results of 4 inputs in the destination buffer in single cycle by packing */ - *__SIMD32(pDst)++ = __PACKq7(out1, out2, out3, out4); - - /* Decrement the blockSize loop counter */ - blkCnt--; - } - - /* If the blockSize is not a multiple of 4, compute any remaining output samples here. - ** No loop unrolling is used. */ - blkCnt = blockSize % 0x4U; - -#else - - /* Run the below code for Cortex-M0 */ - - /* Initialize blkCnt with number of samples */ - blkCnt = blockSize; - -#endif /* #if defined (ARM_MATH_DSP) */ - - - while (blkCnt > 0U) - { - /* C = A * B */ - /* Multiply the inputs and store the result in the destination buffer */ - *pDst++ = (q7_t) __SSAT(((q15_t) ((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - - /* Decrement the blockSize loop counter */ - blkCnt--; - } -} - -/** - * @} end of NNBasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c deleted file mode 100644 index b235787..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c +++ /dev/null @@ -1,462 +0,0 @@ -/* - * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_vec_mat_mult_t_s8 - * Description: s8 vector by matrix (transposed) multiplication - * - * $Date: April 2, 2020 - * $Revision: V.1.5.0 - * - * Target Processor: Cortex-M - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup NNBasicMath - * @{ - */ - -/* - * s8 vector(lhs) by matrix (transposed) multiplication - * - * Refer header file for details. - * - */ -arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, - const q7_t *rhs, - const q31_t *bias, - q7_t *dst, - const int32_t lhs_offset, - const int32_t rhs_offset, - const int32_t dst_offset, - const int32_t dst_multiplier, - const int32_t dst_shift, - const int32_t rhs_cols, - const int32_t rhs_rows, - const int32_t activation_min, - const int32_t activation_max) -{ -#if defined(ARM_MATH_MVEI) - - const int16x8_t rhs_offset_vec = vdupq_n_s16((int16_t)rhs_offset); - const int16x8_t lhs_offset_vec = vdupq_n_s16((int16_t)lhs_offset); - - int32_t row_loop_cnt = rhs_rows / 4; - - for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) - { - int32_t acc1 = bias[0]; - int32_t acc2 = bias[1]; - int32_t acc3 = bias[2]; - int32_t acc4 = bias[3]; - bias += 4; - - int32x4_t acc; - const int32_t col_loop_cnt = (rhs_cols + 7) / 8; - - const int8_t *vec = lhs; - const int8_t *rhs_0 = rhs; - const int8_t *rhs_1 = rhs + rhs_cols; - const int8_t *rhs_2 = rhs + 2 * rhs_cols; - const int8_t *rhs_3 = rhs + 3 * rhs_cols; - - uint32_t col_cnt = (uint32_t)rhs_cols; - - for (int i = 0; i < col_loop_cnt; i++) - { - mve_pred16_t p = vctp16q(col_cnt); - col_cnt -= 8; - const int16x8_t tmp_b = vaddq_m_s16(vuninitializedq_s16(), - vldrbq_z_s16(vec, p), lhs_offset_vec, p); - - const int16x8_t tmp_a0 = vaddq_m_s16(vuninitializedq_s16(), - vldrbq_z_s16(rhs_0, p), rhs_offset_vec, p); - acc1 = vmladavaq_p_s16(acc1, tmp_a0, tmp_b, p); - - const int16x8_t tmp_a1 = vaddq_m_s16(vuninitializedq_s16(), - vldrbq_z_s16(rhs_1, p), rhs_offset_vec, p); - acc2 = vmladavaq_p_s16(acc2, tmp_a1, tmp_b, p); - - const int16x8_t tmp_a2 = vaddq_m_s16(vuninitializedq_s16(), - vldrbq_z_s16(rhs_2, p), rhs_offset_vec, p); - acc3 = vmladavaq_p_s16(acc3, tmp_a2, tmp_b, p); - - const int16x8_t tmp_a3 = vaddq_m_s16(vuninitializedq_s16(), - vldrbq_z_s16(rhs_3, p), rhs_offset_vec, p); - acc4 = vmladavaq_p_s16(acc4, tmp_a3, tmp_b, p); - - vec += 8; - rhs_0 += 8; - rhs_1 += 8; - rhs_2 += 8; - rhs_3 += 8; - } - rhs += 4 * rhs_cols; - - acc[0] = acc1; - acc[1] = acc2; - acc[2] = acc3; - acc[3] = acc4; - - acc = arm_requantize_mve(acc, dst_multiplier, dst_shift); - acc = vaddq_s32(acc, vdupq_n_s32(dst_offset)); - acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); - acc = vminq_s32(acc, vdupq_n_s32(activation_max)); - - vstrbq_s32(dst, acc); - dst += 4; - } - - row_loop_cnt = rhs_rows & 3; - - for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; - i_row_loop_cnt++) - { - int32_t acc = *bias++; - const int32_t col_loop_cnt = (rhs_cols + 7) / 8; - const int8_t *vec = lhs; - const int8_t *kernel_cur = rhs; - - uint32_t col_cnt = (uint32_t)rhs_cols; - - for (int i = 0; i < col_loop_cnt; i++) - { - mve_pred16_t p = vctp16q(col_cnt); - col_cnt -= 8; - const int16x8_t tmp_b = vaddq_m_s16(vuninitializedq_s16(), - vldrbq_z_s16(vec, p), lhs_offset_vec, p); - - const int16x8_t tmp_a = vaddq_m_s16(vuninitializedq_s16(), - vldrbq_z_s16(kernel_cur, p), rhs_offset_vec, p); - acc = vmladavaq_p_s16(acc, tmp_a, tmp_b, p); - vec += 8; - kernel_cur += 8; - } - rhs += rhs_cols; - - acc = arm_nn_requantize(acc, dst_multiplier, dst_shift); - acc += dst_offset; - - acc = MAX(acc, activation_min); - acc = MIN(acc, activation_max); - *dst++ = (int8_t)(acc); - } - -#elif defined(ARM_MATH_DSP) - const int32_t off0 = rhs_cols - 4; - const int16_t lhs_offset_s16 = lhs_offset; - const int16_t rhs_offset_s16 = rhs_offset; - - const uint32_t lhs_offset_s16x2 = __PKHBT(lhs_offset_s16, lhs_offset_s16, 16); - const uint32_t rhs_offset_s16x2 = __PKHBT(rhs_offset_s16, rhs_offset_s16, 16); - - for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) - { - const q7_t *lhs_ptr = &lhs[0]; - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = *bias++; - q31_t res01 = *bias++; - - int32_t rhs_cols_idx = 0; - - q31_t val0, val1, val2, val3, val4, val5; - for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) - { - // Read 4 x int8 values from the RHS matrix - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val2 = __SXTAB16(rhs_offset_s16x2, val0); - // Read 4 x int8 values from the LHS vector - val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val1); - // Read 4 x int8 values from the RHS matrix - val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); - val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); - - // Perform the accumulations - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTAB16(rhs_offset_s16x2, val4); - res00 = __SMLAD(val1, val0, res00); - val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); - // Read 4 x int8 values from the RHS matrix - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val1, val4, res01); - - val2 = __SXTAB16(rhs_offset_s16x2, val0); - // Read 4 x int8 values from the LHS vector - val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val1); - // Read 4 x int8 values from the RHS matrix - val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); - val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); - - // Perform the accumulations - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTAB16(rhs_offset_s16x2, val4); - res00 = __SMLAD(val1, val0, res00); - val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); - // Read 4 x int8 values from the RHS matrix - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val1, val4, res01); - - val2 = __SXTAB16(rhs_offset_s16x2, val0); - // Read 4 x int8 values from the LHS vector - val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val1); - // Read 4 x int8 values from the RHS matrix - val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); - val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); - - // Perform the accumulations - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTAB16(rhs_offset_s16x2, val4); - res00 = __SMLAD(val1, val0, res00); - val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); - // Read 4 x int8 values from the RHS matrix - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val1, val4, res01); - - val2 = __SXTAB16(rhs_offset_s16x2, val0); - // Read 4 x int8 values from the LHS vector - val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val1); - // Read 4 x int8 values from the RHS matrix - val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); - val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); - - // Perform the accumulations - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTAB16(rhs_offset_s16x2, val4); - res00 = __SMLAD(val1, val0, res00); - val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val1, val4, res01); - } - - for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; - q31_t rhs_value1 = rhs_ptr[rhs_cols] + rhs_offset; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; - - res00 += lhs_value * rhs_value0; - res01 += lhs_value * rhs_value1; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); - res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); - - // Add offset - res00 += dst_offset; - res01 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - res01 = MAX(res01, activation_min); - res01 = MIN(res01, activation_max); - - *dst++ = (q7_t)res00; - *dst++ = (q7_t)res01; - - rhs += 2 * rhs_cols; - } - - if (rhs_rows % 2) - { - const q7_t *lhs_ptr = &lhs[0]; - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = *bias++; - - int32_t rhs_cols_idx = 0; - - q31_t val0, val1, val2, val3; - for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) - { - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = __SXTAB16(rhs_offset_s16x2, val0); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val2); - val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); - - // Partial accumulations - res00 = __SMLAD(val3, val1, res00); - res00 = __SMLAD(val2, val0, res00); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = __SXTAB16(rhs_offset_s16x2, val0); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val2); - val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); - - // Partial accumulations - res00 = __SMLAD(val3, val1, res00); - res00 = __SMLAD(val2, val0, res00); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = __SXTAB16(rhs_offset_s16x2, val0); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val2); - val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); - - // Partial accumulations - res00 = __SMLAD(val3, val1, res00); - res00 = __SMLAD(val2, val0, res00); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = __SXTAB16(rhs_offset_s16x2, val0); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val2); - val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); - - // Partial accumulations - res00 = __SMLAD(val3, val1, res00); - res00 = __SMLAD(val2, val0, res00); - } - - for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; - - res00 += lhs_value * rhs_value0; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); - - // Add offset - res00 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - - *dst = (q7_t)res00; - } - -#else - - for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) - { - const q7_t *lhs_ptr = &lhs[0]; - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = *bias++; - q31_t res01 = *bias++; - - for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; - q31_t rhs_value1 = rhs_ptr[rhs_cols] + rhs_offset; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; - - res00 += lhs_value * rhs_value0; - res01 += lhs_value * rhs_value1; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); - res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); - - // Add offset - res00 += dst_offset; - res01 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - res01 = MAX(res01, activation_min); - res01 = MIN(res01, activation_max); - - *dst++ = (q7_t)res00; - *dst++ = (q7_t)res01; - - rhs += 2 * rhs_cols; - } - - if (rhs_rows % 2) - { - const q7_t *lhs_ptr = &lhs[0]; - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = *bias++; - - for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; - - res00 += lhs_value * rhs_value0; - - ++rhs_ptr; - ++lhs_ptr; - } - - // Quantize down - res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); - - // Add offset - res00 += dst_offset; - - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - - *dst = (q7_t)res00; - } -#endif - - return ARM_MATH_SUCCESS; -} - -/** - * @} end of NNBasicMath group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c deleted file mode 100644 index c28f1a6..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nntables.c - * Description: Converts the elements of the Q7 vector to Q15 vector without left-shift - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnsupportfunctions.h" - -/** - * @brief tables for various activation functions - * - * This file include the declaration of common tables. - * Most of them are used for activation functions - * - * Assumption: - * Unified table: input is 3.x format, i.e, range of [-8, 8) - * sigmoid(8) = 0.9996646498695336 - * tanh(8) = 0.9999997749296758 - * The accuracy here should be good enough - * - * 2-stage HL table: - * - * The entire input range is divided into two parts: - * - * Low range table: 0x000x xxxx or 0x111x xxxx - * table entry will be the binary number excluding the first - * two digits, i.e., 0x0x xxxx or 0x1x xxxx - * - * - * - * High range table 0x0010 0000 -- 0x0111 1111 - * 0x1000 0000 -- 0x1101 1111 - * - * For positive numbers, table entry will be - * 0x0010 0000 -- 0x0111 1111 minus 0x0010 0000 - * i.e., 0x0000 0000 - 0x0101 11111 - * - * same thing for the negative numbers, table entry will be - * 0x1000 0000 -- 0x1101 1111 minux 0x0010 0000 - * i.e., 0x0110 0000 - 0x1011 1111 - */ - -const q7_t sigmoidTable_q7[256] = { - 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, - 0x50, 0x52, 0x53, 0x55, 0x57, 0x59, 0x5a, 0x5c, - 0x5e, 0x5f, 0x61, 0x62, 0x63, 0x65, 0x66, 0x67, - 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, - 0x71, 0x72, 0x72, 0x73, 0x74, 0x74, 0x75, 0x76, - 0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, - 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, - 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7d, 0x7e, - 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, - 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x06, - 0x06, 0x06, 0x07, 0x07, 0x08, 0x08, 0x09, 0x09, - 0x0a, 0x0a, 0x0b, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, - 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, - 0x17, 0x19, 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, 0x21, - 0x22, 0x24, 0x26, 0x27, 0x29, 0x2b, 0x2d, 0x2e, - 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, -}; - -const q15_t sigmoidTable_q15[256] = { - 0x4000, 0x4200, 0x43ff, 0x45fc, 0x47f5, 0x49eb, 0x4bdc, 0x4dc8, - 0x4fad, 0x518a, 0x5360, 0x552c, 0x56ef, 0x58a8, 0x5a57, 0x5bfb, - 0x5d93, 0x5f20, 0x60a1, 0x6216, 0x637f, 0x64db, 0x662b, 0x676f, - 0x68a6, 0x69d2, 0x6af1, 0x6c05, 0x6d0d, 0x6e09, 0x6efb, 0x6fe2, - 0x70be, 0x7190, 0x7258, 0x7316, 0x73cc, 0x7478, 0x751b, 0x75b7, - 0x764a, 0x76d6, 0x775b, 0x77d8, 0x784f, 0x78c0, 0x792a, 0x798f, - 0x79ee, 0x7a48, 0x7a9d, 0x7aed, 0x7b39, 0x7b80, 0x7bc4, 0x7c03, - 0x7c3f, 0x7c78, 0x7cad, 0x7ce0, 0x7d0f, 0x7d3c, 0x7d66, 0x7d8d, - 0x7db3, 0x7dd6, 0x7df7, 0x7e16, 0x7e33, 0x7e4f, 0x7e69, 0x7e81, - 0x7e98, 0x7eae, 0x7ec2, 0x7ed5, 0x7ee7, 0x7ef8, 0x7f08, 0x7f17, - 0x7f25, 0x7f32, 0x7f3e, 0x7f4a, 0x7f55, 0x7f5f, 0x7f69, 0x7f72, - 0x7f7b, 0x7f83, 0x7f8a, 0x7f91, 0x7f98, 0x7f9e, 0x7fa4, 0x7faa, - 0x7faf, 0x7fb4, 0x7fb8, 0x7fbd, 0x7fc1, 0x7fc5, 0x7fc8, 0x7fcc, - 0x7fcf, 0x7fd2, 0x7fd5, 0x7fd7, 0x7fda, 0x7fdc, 0x7fde, 0x7fe0, - 0x7fe2, 0x7fe4, 0x7fe6, 0x7fe7, 0x7fe9, 0x7fea, 0x7feb, 0x7fed, - 0x7fee, 0x7fef, 0x7ff0, 0x7ff1, 0x7ff2, 0x7ff3, 0x7ff4, 0x7ff4, - 0x000b, 0x000c, 0x000c, 0x000d, 0x000e, 0x000f, 0x0010, 0x0011, - 0x0012, 0x0013, 0x0015, 0x0016, 0x0017, 0x0019, 0x001a, 0x001c, - 0x001e, 0x0020, 0x0022, 0x0024, 0x0026, 0x0029, 0x002b, 0x002e, - 0x0031, 0x0034, 0x0038, 0x003b, 0x003f, 0x0043, 0x0048, 0x004c, - 0x0051, 0x0056, 0x005c, 0x0062, 0x0068, 0x006f, 0x0076, 0x007d, - 0x0085, 0x008e, 0x0097, 0x00a1, 0x00ab, 0x00b6, 0x00c2, 0x00ce, - 0x00db, 0x00e9, 0x00f8, 0x0108, 0x0119, 0x012b, 0x013e, 0x0152, - 0x0168, 0x017f, 0x0197, 0x01b1, 0x01cd, 0x01ea, 0x0209, 0x022a, - 0x024d, 0x0273, 0x029a, 0x02c4, 0x02f1, 0x0320, 0x0353, 0x0388, - 0x03c1, 0x03fd, 0x043c, 0x0480, 0x04c7, 0x0513, 0x0563, 0x05b8, - 0x0612, 0x0671, 0x06d6, 0x0740, 0x07b1, 0x0828, 0x08a5, 0x092a, - 0x09b6, 0x0a49, 0x0ae5, 0x0b88, 0x0c34, 0x0cea, 0x0da8, 0x0e70, - 0x0f42, 0x101e, 0x1105, 0x11f7, 0x12f3, 0x13fb, 0x150f, 0x162e, - 0x175a, 0x1891, 0x19d5, 0x1b25, 0x1c81, 0x1dea, 0x1f5f, 0x20e0, - 0x226d, 0x2405, 0x25a9, 0x2758, 0x2911, 0x2ad4, 0x2ca0, 0x2e76, - 0x3053, 0x3238, 0x3424, 0x3615, 0x380b, 0x3a04, 0x3c01, 0x3e00, -}; - -const q15_t sigmoidLTable_q15[128] = { - 0x4000, 0x4100, 0x4200, 0x42ff, 0x43ff, 0x44fd, 0x45fc, 0x46f9, - 0x47f5, 0x48f1, 0x49eb, 0x4ae5, 0x4bdc, 0x4cd3, 0x4dc8, 0x4ebb, - 0x4fad, 0x509c, 0x518a, 0x5276, 0x5360, 0x5447, 0x552c, 0x560f, - 0x56ef, 0x57cd, 0x58a8, 0x5981, 0x5a57, 0x5b2a, 0x5bfb, 0x5cc9, - 0x5d93, 0x5e5b, 0x5f20, 0x5fe2, 0x60a1, 0x615d, 0x6216, 0x62cc, - 0x637f, 0x642e, 0x64db, 0x6584, 0x662b, 0x66ce, 0x676f, 0x680c, - 0x68a6, 0x693d, 0x69d2, 0x6a63, 0x6af1, 0x6b7c, 0x6c05, 0x6c8a, - 0x6d0d, 0x6d8d, 0x6e09, 0x6e84, 0x6efb, 0x6f70, 0x6fe2, 0x7051, - 0x0f42, 0x0faf, 0x101e, 0x1090, 0x1105, 0x117c, 0x11f7, 0x1273, - 0x12f3, 0x1376, 0x13fb, 0x1484, 0x150f, 0x159d, 0x162e, 0x16c3, - 0x175a, 0x17f4, 0x1891, 0x1932, 0x19d5, 0x1a7c, 0x1b25, 0x1bd2, - 0x1c81, 0x1d34, 0x1dea, 0x1ea3, 0x1f5f, 0x201e, 0x20e0, 0x21a5, - 0x226d, 0x2337, 0x2405, 0x24d6, 0x25a9, 0x267f, 0x2758, 0x2833, - 0x2911, 0x29f1, 0x2ad4, 0x2bb9, 0x2ca0, 0x2d8a, 0x2e76, 0x2f64, - 0x3053, 0x3145, 0x3238, 0x332d, 0x3424, 0x351b, 0x3615, 0x370f, - 0x380b, 0x3907, 0x3a04, 0x3b03, 0x3c01, 0x3d01, 0x3e00, 0x3f00, -}; - -const q15_t sigmoidHTable_q15[192] = { - 0x70be, 0x7190, 0x7258, 0x7316, 0x73cc, 0x7478, 0x751b, 0x75b7, - 0x764a, 0x76d6, 0x775b, 0x77d8, 0x784f, 0x78c0, 0x792a, 0x798f, - 0x79ee, 0x7a48, 0x7a9d, 0x7aed, 0x7b39, 0x7b80, 0x7bc4, 0x7c03, - 0x7c3f, 0x7c78, 0x7cad, 0x7ce0, 0x7d0f, 0x7d3c, 0x7d66, 0x7d8d, - 0x7db3, 0x7dd6, 0x7df7, 0x7e16, 0x7e33, 0x7e4f, 0x7e69, 0x7e81, - 0x7e98, 0x7eae, 0x7ec2, 0x7ed5, 0x7ee7, 0x7ef8, 0x7f08, 0x7f17, - 0x7f25, 0x7f32, 0x7f3e, 0x7f4a, 0x7f55, 0x7f5f, 0x7f69, 0x7f72, - 0x7f7b, 0x7f83, 0x7f8a, 0x7f91, 0x7f98, 0x7f9e, 0x7fa4, 0x7faa, - 0x7faf, 0x7fb4, 0x7fb8, 0x7fbd, 0x7fc1, 0x7fc5, 0x7fc8, 0x7fcc, - 0x7fcf, 0x7fd2, 0x7fd5, 0x7fd7, 0x7fda, 0x7fdc, 0x7fde, 0x7fe0, - 0x7fe2, 0x7fe4, 0x7fe6, 0x7fe7, 0x7fe9, 0x7fea, 0x7feb, 0x7fed, - 0x7fee, 0x7fef, 0x7ff0, 0x7ff1, 0x7ff2, 0x7ff3, 0x7ff4, 0x7ff4, - 0x000b, 0x000c, 0x000c, 0x000d, 0x000e, 0x000f, 0x0010, 0x0011, - 0x0012, 0x0013, 0x0015, 0x0016, 0x0017, 0x0019, 0x001a, 0x001c, - 0x001e, 0x0020, 0x0022, 0x0024, 0x0026, 0x0029, 0x002b, 0x002e, - 0x0031, 0x0034, 0x0038, 0x003b, 0x003f, 0x0043, 0x0048, 0x004c, - 0x0051, 0x0056, 0x005c, 0x0062, 0x0068, 0x006f, 0x0076, 0x007d, - 0x0085, 0x008e, 0x0097, 0x00a1, 0x00ab, 0x00b6, 0x00c2, 0x00ce, - 0x00db, 0x00e9, 0x00f8, 0x0108, 0x0119, 0x012b, 0x013e, 0x0152, - 0x0168, 0x017f, 0x0197, 0x01b1, 0x01cd, 0x01ea, 0x0209, 0x022a, - 0x024d, 0x0273, 0x029a, 0x02c4, 0x02f1, 0x0320, 0x0353, 0x0388, - 0x03c1, 0x03fd, 0x043c, 0x0480, 0x04c7, 0x0513, 0x0563, 0x05b8, - 0x0612, 0x0671, 0x06d6, 0x0740, 0x07b1, 0x0828, 0x08a5, 0x092a, - 0x09b6, 0x0a49, 0x0ae5, 0x0b88, 0x0c34, 0x0cea, 0x0da8, 0x0e70, -}; - -const q7_t tanhTable_q7[256] = { - 0x00, 0x08, 0x10, 0x18, 0x1f, 0x27, 0x2e, 0x35, - 0x3b, 0x41, 0x47, 0x4c, 0x51, 0x56, 0x5a, 0x5e, - 0x61, 0x65, 0x68, 0x6a, 0x6d, 0x6f, 0x71, 0x72, - 0x74, 0x75, 0x76, 0x78, 0x78, 0x79, 0x7a, 0x7b, - 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, - 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, - 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x82, - 0x82, 0x82, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84, - 0x85, 0x85, 0x86, 0x87, 0x88, 0x88, 0x8a, 0x8b, - 0x8c, 0x8e, 0x8f, 0x91, 0x93, 0x96, 0x98, 0x9b, - 0x9f, 0xa2, 0xa6, 0xaa, 0xaf, 0xb4, 0xb9, 0xbf, - 0xc5, 0xcb, 0xd2, 0xd9, 0xe1, 0xe8, 0xf0, 0xf8, -}; - -const q15_t tanhTable_q15[256] = { - 0x0000, 0x07fd, 0x0feb, 0x17b9, 0x1f59, 0x26bf, 0x2ddf, 0x34ae, - 0x3b27, 0x4142, 0x46fd, 0x4c56, 0x514d, 0x55e2, 0x5a1a, 0x5df6, - 0x617c, 0x64b0, 0x6797, 0x6a37, 0x6c95, 0x6eb5, 0x709e, 0x7254, - 0x73dc, 0x753a, 0x7672, 0x7788, 0x787f, 0x795b, 0x7a1e, 0x7acb, - 0x7b65, 0x7bee, 0x7c66, 0x7cd1, 0x7d30, 0x7d84, 0x7dce, 0x7e0f, - 0x7e49, 0x7e7d, 0x7eaa, 0x7ed2, 0x7ef5, 0x7f14, 0x7f30, 0x7f48, - 0x7f5e, 0x7f71, 0x7f82, 0x7f91, 0x7f9e, 0x7fa9, 0x7fb3, 0x7fbc, - 0x7fc4, 0x7fcb, 0x7fd1, 0x7fd7, 0x7fdc, 0x7fe0, 0x7fe4, 0x7fe7, - 0x7fea, 0x7fed, 0x7fef, 0x7ff1, 0x7ff3, 0x7ff4, 0x7ff6, 0x7ff7, - 0x7ff8, 0x7ff9, 0x7ffa, 0x7ffa, 0x7ffb, 0x7ffc, 0x7ffc, 0x7ffd, - 0x7ffd, 0x7ffd, 0x7ffe, 0x7ffe, 0x7ffe, 0x7ffe, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, - 0x8001, 0x8001, 0x8001, 0x8002, 0x8002, 0x8002, 0x8002, 0x8003, - 0x8003, 0x8003, 0x8004, 0x8004, 0x8005, 0x8006, 0x8006, 0x8007, - 0x8008, 0x8009, 0x800a, 0x800c, 0x800d, 0x800f, 0x8011, 0x8013, - 0x8016, 0x8019, 0x801c, 0x8020, 0x8024, 0x8029, 0x802f, 0x8035, - 0x803c, 0x8044, 0x804d, 0x8057, 0x8062, 0x806f, 0x807e, 0x808f, - 0x80a2, 0x80b8, 0x80d0, 0x80ec, 0x810b, 0x812e, 0x8156, 0x8183, - 0x81b7, 0x81f1, 0x8232, 0x827c, 0x82d0, 0x832f, 0x839a, 0x8412, - 0x849b, 0x8535, 0x85e2, 0x86a5, 0x8781, 0x8878, 0x898e, 0x8ac6, - 0x8c24, 0x8dac, 0x8f62, 0x914b, 0x936b, 0x95c9, 0x9869, 0x9b50, - 0x9e84, 0xa20a, 0xa5e6, 0xaa1e, 0xaeb3, 0xb3aa, 0xb903, 0xbebe, - 0xc4d9, 0xcb52, 0xd221, 0xd941, 0xe0a7, 0xe847, 0xf015, 0xf803, -}; - -const q15_t tanhLTable_q15[128] = { - 0x0000, 0x0400, 0x07fd, 0x0bf7, 0x0feb, 0x13d7, 0x17b9, 0x1b90, - 0x1f59, 0x2314, 0x26bf, 0x2a58, 0x2ddf, 0x3151, 0x34ae, 0x37f6, - 0x3b27, 0x3e40, 0x4142, 0x442c, 0x46fd, 0x49b6, 0x4c56, 0x4edd, - 0x514d, 0x53a3, 0x55e2, 0x580a, 0x5a1a, 0x5c13, 0x5df6, 0x5fc4, - 0x617c, 0x6320, 0x64b0, 0x662d, 0x6797, 0x68f0, 0x6a37, 0x6b6e, - 0x6c95, 0x6dac, 0x6eb5, 0x6fb0, 0x709e, 0x717f, 0x7254, 0x731e, - 0x73dc, 0x7490, 0x753a, 0x75da, 0x7672, 0x7701, 0x7788, 0x7807, - 0x787f, 0x78f0, 0x795b, 0x79bf, 0x7a1e, 0x7a77, 0x7acb, 0x7b1b, - 0x849b, 0x84e5, 0x8535, 0x8589, 0x85e2, 0x8641, 0x86a5, 0x8710, - 0x8781, 0x87f9, 0x8878, 0x88ff, 0x898e, 0x8a26, 0x8ac6, 0x8b70, - 0x8c24, 0x8ce2, 0x8dac, 0x8e81, 0x8f62, 0x9050, 0x914b, 0x9254, - 0x936b, 0x9492, 0x95c9, 0x9710, 0x9869, 0x99d3, 0x9b50, 0x9ce0, - 0x9e84, 0xa03c, 0xa20a, 0xa3ed, 0xa5e6, 0xa7f6, 0xaa1e, 0xac5d, - 0xaeb3, 0xb123, 0xb3aa, 0xb64a, 0xb903, 0xbbd4, 0xbebe, 0xc1c0, - 0xc4d9, 0xc80a, 0xcb52, 0xceaf, 0xd221, 0xd5a8, 0xd941, 0xdcec, - 0xe0a7, 0xe470, 0xe847, 0xec29, 0xf015, 0xf409, 0xf803, 0xfc00, -}; - -const q15_t tanhHTable_q15[192] = { - 0x7b65, 0x7bee, 0x7c66, 0x7cd1, 0x7d30, 0x7d84, 0x7dce, 0x7e0f, - 0x7e49, 0x7e7d, 0x7eaa, 0x7ed2, 0x7ef5, 0x7f14, 0x7f30, 0x7f48, - 0x7f5e, 0x7f71, 0x7f82, 0x7f91, 0x7f9e, 0x7fa9, 0x7fb3, 0x7fbc, - 0x7fc4, 0x7fcb, 0x7fd1, 0x7fd7, 0x7fdc, 0x7fe0, 0x7fe4, 0x7fe7, - 0x7fea, 0x7fed, 0x7fef, 0x7ff1, 0x7ff3, 0x7ff4, 0x7ff6, 0x7ff7, - 0x7ff8, 0x7ff9, 0x7ffa, 0x7ffa, 0x7ffb, 0x7ffc, 0x7ffc, 0x7ffd, - 0x7ffd, 0x7ffd, 0x7ffe, 0x7ffe, 0x7ffe, 0x7ffe, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, - 0x8001, 0x8001, 0x8001, 0x8002, 0x8002, 0x8002, 0x8002, 0x8003, - 0x8003, 0x8003, 0x8004, 0x8004, 0x8005, 0x8006, 0x8006, 0x8007, - 0x8008, 0x8009, 0x800a, 0x800c, 0x800d, 0x800f, 0x8011, 0x8013, - 0x8016, 0x8019, 0x801c, 0x8020, 0x8024, 0x8029, 0x802f, 0x8035, - 0x803c, 0x8044, 0x804d, 0x8057, 0x8062, 0x806f, 0x807e, 0x808f, - 0x80a2, 0x80b8, 0x80d0, 0x80ec, 0x810b, 0x812e, 0x8156, 0x8183, - 0x81b7, 0x81f1, 0x8232, 0x827c, 0x82d0, 0x832f, 0x839a, 0x8412, -}; diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_no_shift.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_no_shift.c deleted file mode 100644 index 56e89e8..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_no_shift.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_q7_to_q15_no_shift.c - * Description: Converts the elements of the Q7 vector to Q15 vector without left-shift - * - * $Date: May 29, 2020 - * $Revision: V.1.0.2 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup nndata_convert - * @{ - */ - -/** - * @brief Converts the elements of the Q7 vector to Q15 vector without left-shift - * @param[in] *pSrc points to the Q7 input vector - * @param[out] *pDst points to the Q15 output vector - * @param[in] blockSize length of the input vector - * - * \par Description: - * - * The equation used for the conversion process is: - * - *
- * 	pDst[n] = (q15_t) pSrc[n];   0 <= n < blockSize.
- * 
- * - */ - -void arm_q7_to_q15_no_shift(const q7_t * pSrc, q15_t * pDst, uint32_t blockSize) -{ - const q7_t *pIn = pSrc; - uint32_t blkCnt; - -#if defined(ARM_MATH_DSP) - q31_t in; - q31_t in1, in2; - q31_t out1, out2; - - /*loop Unrolling */ - blkCnt = blockSize >> 2u; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. */ - while (blkCnt > 0u) - { - in = arm_nn_read_q7x4_ia(&pIn); - - /* rotatate in by 8 and extend two q7_t values to q15_t values */ - in1 = __SXTB16(__ROR((uint32_t)in, 8)); - - /* extend remaining two q7_t values to q15_t values */ - in2 = __SXTB16(in); - -#ifndef ARM_MATH_BIG_ENDIAN - out2 = (int32_t)__PKHTB(in1, in2, 16); - out1 = (int32_t)__PKHBT(in2, in1, 16); -#else - out1 = (int32_t)__PKHTB(in1, in2, 16); - out2 = (int32_t)__PKHBT(in2, in1, 16); -#endif - write_q15x2_ia(&pDst, out1); - write_q15x2_ia(&pDst, out2); - - /* Decrement the loop counter */ - blkCnt--; - } - - /* If the blockSize is not a multiple of 4, compute any remaining output samples here. - ** No loop unrolling is used. */ - blkCnt = blockSize % 0x4u; - -#else - - /* Run the below code for Cortex-M0 */ - - /* Loop over blockSize number of values */ - blkCnt = blockSize; - -#endif /* #ifndef ARM_MATH_CM0_FAMILY */ - - while (blkCnt > 0u) - { - /* convert from q7 to q15 and then store the results in the destination buffer */ - *pDst++ = (q15_t)*pIn++; - - /* Decrement the loop counter */ - blkCnt--; - } - -} - -/** - * @} end of nndata_convert group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c deleted file mode 100644 index 41f2cd4..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_q7_to_q15_reordered_no_shift.c - * Description: Converts the elements of the Q7 vector to reordered Q15 vector without left-shift - * - * $Date: May 29, 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup nndata_convert - * @{ - */ - -/** - * @brief Converts the elements of the Q7 vector to reordered Q15 vector without left-shift - * @param[in] *pSrc points to the Q7 input vector - * @param[out] *pDst points to the Q15 output vector - * @param[in] blockSize length of the input vector - * - * @details - * - * This function does the q7 to q15 expansion with re-ordering - * - *
- *                          |   A1   |   A2   |   A3   |   A4   |
- *
- *                           0      7 8     15 16    23 24    31
- * 
- * - * is converted into: - * - *
- *  |       A1       |       A3       |   and  |       A2       |       A4       |
- *
- *   0             15 16            31          0             15 16            31
- * 
- * - * - * This looks strange but is natural considering how sign-extension is done at - * assembly level. - * - * The expansion of other other oprand will follow the same rule so that the end - * results are the same. - * - * The tail (i.e., last (N % 4) elements) will still be in original order. - * - */ - -void arm_q7_to_q15_reordered_no_shift(const q7_t * pSrc, q15_t * pDst, uint32_t blockSize) -{ - const q7_t *pIn = pSrc; /* Src pointer */ - uint32_t blkCnt; /* loop counter */ - -#ifndef ARM_MATH_CM0_FAMILY - q31_t in; - q31_t in1, in2; - - /* Run the below code for Cortex-M4 and Cortex-M3 */ - - /*loop Unrolling */ - blkCnt = blockSize >> 2u; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. - ** a second loop below computes the remaining 1 to 3 samples. */ - while (blkCnt > 0u) - { - /* C = (q15_t) A << 8 */ - /* convert from q7 to q15 and then store the results in the destination buffer */ - in = arm_nn_read_q7x4_ia(&pIn); - - /* rotatate in by 8 and extend two q7_t values to q15_t values */ - in1 = __SXTB16(__ROR((uint32_t)in, 8)); - - /* extend remainig two q7_t values to q15_t values */ - in2 = __SXTB16(in); - -#ifndef ARM_MATH_BIG_ENDIAN - *__SIMD32(pDst)++ = in2; - *__SIMD32(pDst)++ = in1; -#else - *__SIMD32(pDst)++ = in1; - *__SIMD32(pDst)++ = in2; -#endif - - /* Decrement the loop counter */ - blkCnt--; - } - - /* If the blockSize is not a multiple of 4, compute any remaining output samples here. - ** No loop unrolling is used. */ - blkCnt = blockSize % 0x4u; - -#else - - /* Run the below code for Cortex-M0 */ - - /* Loop over blockSize number of values */ - blkCnt = blockSize; - -#endif /* #ifndef ARM_MATH_CM0_FAMILY */ - - while (blkCnt > 0u) - { - /* C = (q15_t) A << 8 */ - /* convert from q7 to q15 and then store the results in the destination buffer */ - *pDst++ = (q15_t) * pIn++; - - /* Decrement the loop counter */ - blkCnt--; - } - -} - -/** - * @} end of q7_to_x group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c deleted file mode 100644 index d2c8dfc..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_q7_to_q15_reordered_with_offset.c - * Description: Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset. The re-ordering - * is a signature of sign extension intrinsic(DSP extension). - * - * $Date: May 29, 2020 - * $Revision: V.2.0.3 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup nndata_convert - * @{ - */ - -/** - * @brief Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset. - * - * @note Refer header file for details. - * - */ - -void arm_q7_to_q15_reordered_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, q15_t offset) -{ - -#if defined(ARM_MATH_DSP) - uint32_t block_cnt; - /* Run the below code for cores that support SIMD instructions */ - q31_t in_q7x4; - q31_t out_q15x2_1; - q31_t out_q15x2_2; - - /*loop unrolling */ - block_cnt = block_size >> 2u; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. */ - const q31_t offset_q15x2 = (q31_t)__PKHBT(offset, offset, 16); - while (block_cnt > 0u) - { - /* convert from q7 to q15 and then store the results in the destination buffer */ - in_q7x4 = arm_nn_read_q7x4_ia(&src); - - /* Extract and sign extend each of the four q7 values to q15 */ - out_q15x2_1 = __SXTAB16(offset_q15x2, __ROR((uint32_t)in_q7x4, 8)); - out_q15x2_2 = __SXTAB16(offset_q15x2, in_q7x4); - - write_q15x2_ia(&dst, out_q15x2_2); - write_q15x2_ia(&dst, out_q15x2_1); - - block_cnt--; - } - /* Handle left over samples */ - block_cnt = block_size % 0x4u; - - while (block_cnt > 0u) - { - *dst++ = (q15_t)*src++ + offset; - - /* Decrement the loop counter */ - block_cnt--; - } -#else - (void)src; - (void)dst; - (void)block_size; - (void)offset; - /* Not available */ -#endif -} - -/** - * @} end of nndata_convert group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c b/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c deleted file mode 100644 index 69c70e3..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in_q7x4 compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in_q7x4 writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_q7_to_q15_with_offset.c - * Description: Converts the elements of the Q7 vector to Q15 vector with an added offset - * - * $Date: March 3, 2020 - * $Revision: V.2.0.2 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupSupport - */ - -/** - * @addtogroup nndata_convert - * @{ - */ - -void arm_q7_to_q15_with_offset(const q7_t *src, - q15_t *dst, - uint32_t block_size, - q15_t offset) -{ - int block_cnt; - -#if defined(ARM_MATH_MVEI) - - int16x8_t source; - const int16x8_t source_offset = vdupq_n_s16(offset); - block_cnt = block_size / 8; - - while (block_cnt > 0) - { - source = vldrbq_s16(src); - source = vaddq_s16(source, source_offset); - vstrhq_s16(dst, source); - dst += 8; - src += 8; - block_cnt--; - } - - block_cnt = block_size & 0x7; - -#elif defined(ARM_MATH_DSP) - /* Run the below code for cores that support SIMD instructions */ - q31_t in_q7x4; - q31_t in_q15x2_1; - q31_t in_q15x2_2; - q31_t out_q15x2_1; - q31_t out_q15x2_2; - - /*loop unrolling */ - block_cnt = block_size >> 2; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. */ - const q31_t offset_q15x2 = __PKHBT(offset, offset, 16); - while (block_cnt > 0) - { - /* convert from q7 to q15 and then store the results in the destination buffer */ - in_q7x4 = arm_nn_read_q7x4_ia(&src); - - /* Extract and sign extend each of the four q7 values to q15 */ - in_q15x2_1 = __SXTAB16(offset_q15x2, __ROR(in_q7x4, 8)); - in_q15x2_2 = __SXTAB16(offset_q15x2, in_q7x4); - - out_q15x2_2 = __PKHTB(in_q15x2_1, in_q15x2_2, 16); - out_q15x2_1 = __PKHBT(in_q15x2_2, in_q15x2_1, 16); - - write_q15x2_ia(&dst, out_q15x2_1); - write_q15x2_ia(&dst, out_q15x2_2); - - block_cnt--; - } - /* Handle left over samples */ - block_cnt = block_size % 0x4; - -#else - /* Run the below code for Cortex-M0 */ - /* Loop over block_size number of values */ - block_cnt = block_size; -#endif - - while (block_cnt > 0) - { - *dst++ = (q15_t)*src++ + offset; - - /* Decrement the loop counter */ - block_cnt--; - } -} - -/** - * @} end of nndata_convert group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c deleted file mode 100755 index 048d32b..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c +++ /dev/null @@ -1,363 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_avgpool_s8.c - * Description: Pooling function implementations - * - * $Date: September 3,2020 - * $Revision: V.2.0.2 - * - * Target Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) - -static void scale_q31_to_q7_and_clamp(const q31_t *buffer, - q7_t *target, - int32_t length, - const int32_t count, - const int act_min, - const int act_max) -{ - const int half_count = count / 2; - for (int i = 0; i < length; i++) - { - int32_t sum = buffer[i] > 0 ? (buffer[i] + half_count) : (buffer[i] - half_count); - sum = sum / count; - sum = MAX(sum, act_min); - sum = MIN(sum, act_max); - - target[i] = (q7_t)sum; - } -} -#endif - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Pooling - * @{ - */ - -/* - * s8 average pooling function - * - * Refer to header file for details. - * - */ - -#if defined(ARM_MATH_MVEI) - -arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *src, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *dst) -{ - (void)ctx; - const int32_t input_y = input_dims->h; - const int32_t input_x = input_dims->w; - const int32_t output_y = output_dims->h; - const int32_t output_x = output_dims->w; - const int32_t stride_y = pool_params->stride.h; - const int32_t stride_x = pool_params->stride.w; - const int32_t kernel_y = filter_dims->h; - const int32_t kernel_x = filter_dims->w; - const int32_t pad_y = pool_params->padding.h; - const int32_t pad_x = pool_params->padding.w; - const int32_t act_min = pool_params->activation.min; - const int32_t act_max = pool_params->activation.max; - const int32_t ch_src = input_dims->c; - - int32_t i_x, i_y; - int32_t k_x, k_y; - - for (i_y = 0; i_y < output_y; i_y++) - { - for (i_x = 0; i_x < output_x; i_x++) - { - - int32_t k_y_start, k_y_end; - int32_t k_x_start, k_x_end; - int32_t chCnt; - const int8_t *pTmp, *pTmpInner; - int8_t *pDst; - - k_y_start = MAX(0, i_y * stride_y - pad_y); - k_y_end = MIN(i_y * stride_y - pad_y + kernel_y, input_y); - - k_x_start = MAX(0, i_x * stride_x - pad_x); - k_x_end = MIN(i_x * stride_x - pad_x + kernel_x, input_x); - - pTmp = src; - pDst = &dst[ch_src * (i_x + i_y * output_x)]; - - chCnt = ch_src >> 4; - while (chCnt > 0) - { - int32x4_t sumV1, sumV2, sumV3, sumV4; - - int8x16_t tempV; - int16x8_t tempVLO, tempVHI; - int32x4_t tempVLOLO, tempVLOHI, tempVHILO, tempVHIHI; - int32_t count = 0; - - sumV1 = vdupq_n_s32(0); - sumV2 = vdupq_n_s32(0); - sumV3 = vdupq_n_s32(0); - sumV4 = vdupq_n_s32(0); - - for (k_y = k_y_start; k_y < k_y_end; k_y++) - { - for (k_x = k_x_start; k_x < k_x_end; k_x++) - { - pTmpInner = pTmp + (ch_src * (k_x + k_y * input_x)); - tempV = vldrbq_s8(pTmpInner); - - tempVLO = vmovlbq_s8(tempV); - tempVHI = vmovltq_s8(tempV); - - tempVLOLO = vmovlbq_s16(tempVLO); - tempVLOHI = vmovltq_s16(tempVLO); - - tempVHILO = vmovlbq_s16(tempVHI); - tempVHIHI = vmovltq_s16(tempVHI); - - sumV1 = vaddq_s32(sumV1, tempVLOLO); - sumV2 = vaddq_s32(sumV2, tempVLOHI); - sumV3 = vaddq_s32(sumV3, tempVHILO); - sumV4 = vaddq_s32(sumV4, tempVHIHI); - - count++; - } - } - - sumV1[0] = sumV1[0] > 0 ? (sumV1[0] + count / 2) / count : (sumV1[0] - count / 2) / count; - sumV1[1] = sumV1[1] > 0 ? (sumV1[1] + count / 2) / count : (sumV1[1] - count / 2) / count; - sumV1[2] = sumV1[2] > 0 ? (sumV1[2] + count / 2) / count : (sumV1[2] - count / 2) / count; - sumV1[3] = sumV1[3] > 0 ? (sumV1[3] + count / 2) / count : (sumV1[3] - count / 2) / count; - - sumV2[0] = sumV2[0] > 0 ? (sumV2[0] + count / 2) / count : (sumV2[0] - count / 2) / count; - sumV2[1] = sumV2[1] > 0 ? (sumV2[1] + count / 2) / count : (sumV2[1] - count / 2) / count; - sumV2[2] = sumV2[2] > 0 ? (sumV2[2] + count / 2) / count : (sumV2[2] - count / 2) / count; - sumV2[3] = sumV2[3] > 0 ? (sumV2[3] + count / 2) / count : (sumV2[3] - count / 2) / count; - - sumV3[0] = sumV3[0] > 0 ? (sumV3[0] + count / 2) / count : (sumV3[0] - count / 2) / count; - sumV3[1] = sumV3[1] > 0 ? (sumV3[1] + count / 2) / count : (sumV3[1] - count / 2) / count; - sumV3[2] = sumV3[2] > 0 ? (sumV3[2] + count / 2) / count : (sumV3[2] - count / 2) / count; - sumV3[3] = sumV3[3] > 0 ? (sumV3[3] + count / 2) / count : (sumV3[3] - count / 2) / count; - - sumV4[0] = sumV4[0] > 0 ? (sumV4[0] + count / 2) / count : (sumV4[0] - count / 2) / count; - sumV4[1] = sumV4[1] > 0 ? (sumV4[1] + count / 2) / count : (sumV4[1] - count / 2) / count; - sumV4[2] = sumV4[2] > 0 ? (sumV4[2] + count / 2) / count : (sumV4[2] - count / 2) / count; - sumV4[3] = sumV4[3] > 0 ? (sumV4[3] + count / 2) / count : (sumV4[3] - count / 2) / count; - - sumV1 = vmaxq_s32(sumV1, vdupq_n_s32(act_min)); - sumV1 = vminq_s32(sumV1, vdupq_n_s32(act_max)); - - sumV2 = vmaxq_s32(sumV2, vdupq_n_s32(act_min)); - sumV2 = vminq_s32(sumV2, vdupq_n_s32(act_max)); - - sumV3 = vmaxq_s32(sumV3, vdupq_n_s32(act_min)); - sumV3 = vminq_s32(sumV3, vdupq_n_s32(act_max)); - - sumV4 = vmaxq_s32(sumV4, vdupq_n_s32(act_min)); - sumV4 = vminq_s32(sumV4, vdupq_n_s32(act_max)); - - tempVLO = vmovnbq_s32(tempVLO, sumV1); - tempVLO = vmovntq_s32(tempVLO, sumV2); - - tempVHI = vmovnbq_s32(tempVHI, sumV3); - tempVHI = vmovntq_s32(tempVHI, sumV4); - - tempV = vmovnbq_s16(tempV, tempVLO); - tempV = vmovntq_s16(tempV, tempVHI); - - vstrbq_s8(pDst, tempV); - pDst += 16; - - chCnt--; - pTmp += 16; - } - - chCnt = ch_src & 0xF; - while (chCnt > 0) - { - int32_t sum = 0; - int32_t count = 0; - - for (k_y = k_y_start; k_y < k_y_end; k_y++) - { - for (k_x = k_x_start; k_x < k_x_end; k_x++) - { - sum += pTmp[ch_src * (k_x + k_y * input_x)]; - count++; - } - } - sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; - sum = MAX(sum, act_min); - sum = MIN(sum, act_max); - - *pDst++ = sum; - - chCnt--; - pTmp++; - } - } - } - return ARM_MATH_SUCCESS; -} - -#else -arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *src, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *dst) -{ - const int32_t input_y = input_dims->h; - const int32_t input_x = input_dims->w; - const int32_t output_y = output_dims->h; - const int32_t output_x = output_dims->w; - const int32_t stride_y = pool_params->stride.h; - const int32_t stride_x = pool_params->stride.w; - const int32_t kernel_y = filter_dims->h; - const int32_t kernel_x = filter_dims->w; - const int32_t pad_y = pool_params->padding.h; - const int32_t pad_x = pool_params->padding.w; - const int32_t act_min = pool_params->activation.min; - const int32_t act_max = pool_params->activation.max; - const int32_t ch_src = input_dims->c; - q31_t *buffer = (q31_t *)ctx->buf; - -#if defined(ARM_MATH_DSP) - - /* Run the following code for CPU's with DSP extension - */ - for (int i_y = 0, idx_y = -pad_y; i_y < output_y; idx_y += stride_y, i_y++) - { - for (int i_x = 0, idx_x = -pad_x; i_x < output_x; idx_x += stride_x, i_x++) - { - /* Condition for kernel start dimension: - (base_idx_ + kernel__start) >= 0 */ - const int32_t kernel_y_start = MAX(0, -idx_y); - const int32_t kernel_x_start = MAX(0, -idx_x); - - /* Condition for kernel end dimension: - (base_idx_ + kernel__end) < dim_src_ */ - const int32_t kernel_y_end = MIN(kernel_y, input_y - idx_y); - const int32_t kernel_x_end = MIN(kernel_x, input_x - idx_x); - - int count = 0; - - for (int k_y = kernel_y_start; k_y < kernel_y_end; k_y++) - { - for (int k_x = kernel_x_start; k_x < kernel_x_end; k_x++) - { - const q7_t *start = src + ch_src * (k_x + idx_x + (k_y + idx_y) * input_x); - - if (count == 0) - { - for (int i = 0; i < ch_src; i++) - { - buffer[i] = start[i]; - } - } - else - { - for (int i = 0; i < ch_src; i++) - { - buffer[i] = __QADD(start[i], buffer[i]); - } - } - count++; - } - } - scale_q31_to_q7_and_clamp(buffer, dst, ch_src, count, act_min, act_max); - dst += ch_src; - } - } -#else - - /* Reference C code adapted from CMSIS-NN arm_avepool_q7_HWC. - */ - (void)buffer; - int16_t i_ch_in, i_x, i_y; - int16_t k_x, k_y; - - for (i_y = 0; i_y < output_y; i_y++) - { - for (i_x = 0; i_x < output_x; i_x++) - { - for (i_ch_in = 0; i_ch_in < ch_src; i_ch_in++) - { - int sum = 0; - int count = 0; - for (k_y = i_y * stride_y - pad_y; k_y < i_y * stride_y - pad_y + kernel_y; k_y++) - { - for (k_x = i_x * stride_x - pad_x; k_x < i_x * stride_x - pad_x + kernel_x; k_x++) - { - if (k_y >= 0 && k_x >= 0 && k_y < input_y && k_x < input_x) - { - sum += src[i_ch_in + ch_src * (k_x + k_y * input_x)]; - count++; - } - } - } - sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; - sum = MAX(sum, act_min); - sum = MIN(sum, act_max); - - dst[i_ch_in + ch_src * (i_x + i_y * output_x)] = sum; - } - } - } - -#endif - return ARM_MATH_SUCCESS; -} - -#endif /* ARM_MATH_MVEI */ - -int32_t arm_avgpool_s8_get_buffer_size(const int output_x, - const int ch_src) -{ - (void)output_x; - -#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) - return (ch_src * sizeof(int32_t)); -#else - (void)ch_src; - return 0; -#endif -} -/** - * @} end of Pooling group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s8.c deleted file mode 100755 index 33a4436..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s8.c +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_max_pool_s8.c - * Description: Pooling function implementations - * - * $Date: June 11, 2020 - * $Revision: V.2.0.0 - * - * Target Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -static void compare_and_replace_if_larger_q7(q7_t *base, - const q7_t *target, - int32_t length) -{ -#if defined(ARM_MATH_MVEI) - int32_t loop_count = (length + 15) / 16; - for (int i = 0; i < loop_count; i++) - { - mve_pred16_t p = vctp16q((uint32_t)length); - const int8x16_t op_1 = vldrbq_z_s8(base, p); - const int8x16_t op_2 = vldrbq_z_s8(target, p); - const int8x16_t max = vmaxq_m_s8(vuninitializedq_s8(), op_1, op_2, p); - vstrbq_p_s8(base, max, p); - base += 16; - target += 16; - length -= 16; - } -#else - q7_t *dst = base; - const q7_t *src = target; - union arm_nnword ref_max; - union arm_nnword comp_max; - int32_t cnt = length >> 2; - - while (cnt > 0l) - { - ref_max.word = arm_nn_read_q7x4(dst); - comp_max.word = arm_nn_read_q7x4_ia(&src); - - if (comp_max.bytes[0] > ref_max.bytes[0]) - { - ref_max.bytes[0] = comp_max.bytes[0]; - } - if (comp_max.bytes[1] > ref_max.bytes[1]) - { - ref_max.bytes[1] = comp_max.bytes[1]; - } - if (comp_max.bytes[2] > ref_max.bytes[2]) - { - ref_max.bytes[2] = comp_max.bytes[2]; - } - if (comp_max.bytes[3] > ref_max.bytes[3]) - { - ref_max.bytes[3] = comp_max.bytes[3]; - } - - write_q7x4_ia(&dst, ref_max.word); - - cnt--; - } - - cnt = length & 0x3; - while (cnt > 0l) - { - if (*src > *dst) - { - *dst = *src; - } - dst++; - src++; - cnt--; - } -#endif -} - -static void -clamp_output(q7_t *source, int32_t length, const int32_t act_min, const int32_t act_max) -{ -#if defined(ARM_MATH_MVEI) - int32_t - loop_count = (length + 15) / 16; - for (int i = 0; i < loop_count; i++) - { - mve_pred16_t p = vctp16q((uint32_t)length); - length -= 16; - const int8x16_t src = vldrbq_z_s8(source, p); - const int8x16_t predicated_min = vdupq_m_n_s8(vuninitializedq_s8(), (int8_t)act_min, p); - const int8x16_t predicated_max = vdupq_m_n_s8(vuninitializedq_s8(), (int8_t)act_max, p); - int8x16_t - res = vmaxq_m_s8(vuninitializedq_s8(), src, predicated_min, p); - res = vminq_m_s8(vuninitializedq_s8(), src, predicated_max, p); - vstrbq_p_s8(source, res, p); - source += 16; - } -#else - union arm_nnword in; - int32_t cnt = length >> 2; - - while (cnt > 0l) - { - in.word = arm_nn_read_q7x4(source); - - in.bytes[0] = MAX(in.bytes[0], act_min); - in.bytes[0] = MIN(in.bytes[0], act_max); - in.bytes[1] = MAX(in.bytes[1], act_min); - in.bytes[1] = MIN(in.bytes[1], act_max); - in.bytes[2] = MAX(in.bytes[2], act_min); - in.bytes[2] = MIN(in.bytes[2], act_max); - in.bytes[3] = MAX(in.bytes[3], act_min); - in.bytes[3] = MIN(in.bytes[3], act_max); - - write_q7x4_ia(&source, in.word); - cnt--; - } - - cnt = length & 0x3; - while (cnt > 0l) - { - int32_t comp = *source; - comp = MAX(comp, act_min); - comp = MIN(comp, act_max); - *source++ = (int8_t)comp; - cnt--; - } -#endif -} - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Pooling - * @{ - */ - -/* - * Optimized s8 max pooling function - * - * Refer to header file for details. - * - */ - -arm_status -arm_max_pool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *src, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *dst) -{ - const int32_t input_y = input_dims->h; - const int32_t input_x = input_dims->w; - const int32_t output_y = output_dims->h; - const int32_t output_x = output_dims->w; - const int32_t stride_y = pool_params->stride.h; - const int32_t stride_x = pool_params->stride.w; - const int32_t kernel_y = filter_dims->h; - const int32_t kernel_x = filter_dims->w; - const int32_t pad_y = pool_params->padding.h; - const int32_t pad_x = pool_params->padding.w; - const int32_t act_min = pool_params->activation.min; - const int32_t act_max = pool_params->activation.max; - const int32_t channel_in = input_dims->c; - (void)ctx; - q7_t *dst_base = dst; - - for (int i_y = 0, base_idx_y = -pad_y; i_y < output_y; base_idx_y += stride_y, i_y++) - { - for (int i_x = 0, base_idx_x = -pad_x; i_x < output_x; base_idx_x += stride_x, i_x++) - { - /* Condition for kernel start dimension: (base_idx_ + kernel__start) >= 0 */ - const int32_t ker_y_start = MAX(0, -base_idx_y); - const int32_t ker_x_start = MAX(0, -base_idx_x); - - /* Condition for kernel end dimension: (base_idx_ + kernel__end) < dim_src_ */ - const int32_t kernel_y_end = MIN(kernel_y, input_y - base_idx_y); - const int32_t kernel_x_end = MIN(kernel_x, input_x - base_idx_x); - - int count = 0; - - for (int k_y = ker_y_start; k_y < kernel_y_end; k_y++) - { - for (int k_x = ker_x_start; k_x < kernel_x_end; k_x++) - { - const q7_t *start = src + channel_in * (k_x + base_idx_x + (k_y + base_idx_y) * input_x); - - if (count == 0) - { - memcpy(dst, start, channel_in); - count++; - } - else - { - compare_and_replace_if_larger_q7(dst, start, channel_in); - } - } - } - /* 'count' is expected to be non-zero here. */ - dst += channel_in; - } - } - - clamp_output(dst_base, output_x * output_y * channel_in, act_min, act_max); - - return ARM_MATH_SUCCESS; -} - -/** - * @} end of Pooling group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_pool_q7_HWC.c b/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_pool_q7_HWC.c deleted file mode 100644 index 14e0ef7..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/PoolingFunctions/arm_pool_q7_HWC.c +++ /dev/null @@ -1,454 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_pool_q7_HWC.c - * Description: Pooling function implementations - * - * $Date: 17. January 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -#if defined (ARM_MATH_DSP) - -/** - * @brief A few utility functions used by pooling functions - * - * - */ - -static void buffer_scale_back_q15_to_q7(q15_t * buffer, q7_t * target, uint16_t length, uint16_t scale) -{ - int i; - - for (i = 0; i < length; i++) - { - target[i] = (q7_t) (buffer[i] / scale); - } -} - -static void compare_and_replace_if_larger_q7(q7_t * base, // base data - const q7_t * target, // compare target - const uint16_t length // data length - ) -{ - q7_t *pIn = base; - const q7_t *pCom = target; - union arm_nnword in; - union arm_nnword com; - uint16_t cnt = length >> 2; - - while (cnt > 0u) - { - in.word = arm_nn_read_q7x4((const q7_t*)pIn); - com.word = arm_nn_read_q7x4_ia((const q7_t**)&pCom); - - // if version - if (com.bytes[0] > in.bytes[0]) - in.bytes[0] = com.bytes[0]; - if (com.bytes[1] > in.bytes[1]) - in.bytes[1] = com.bytes[1]; - if (com.bytes[2] > in.bytes[2]) - in.bytes[2] = com.bytes[2]; - if (com.bytes[3] > in.bytes[3]) - in.bytes[3] = com.bytes[3]; - - *__SIMD32(pIn)++ = in.word; - - cnt--; - } - - cnt = length & 0x3; - while (cnt > 0u) - { - if (*pCom > *pIn) - { - *pIn = *pCom; - } - pIn++; - pCom++; - cnt--; - } -} - -static void accumulate_q7_to_q15(q15_t * base, q7_t * target, const uint16_t length) -{ - q15_t *pCnt = base; - q7_t *pV = target; - q31_t v1, v2, vo1, vo2; - uint16_t cnt = length >> 2; - q31_t in; - - while (cnt > 0u) - { - q31_t value = arm_nn_read_q7x4_ia((const q7_t**)&pV); - v1 = __SXTB16(__ROR(value, 8)); - v2 = __SXTB16(value); -#ifndef ARM_MATH_BIG_ENDIAN - - vo2 = __PKHTB(v1, v2, 16); - vo1 = __PKHBT(v2, v1, 16); - -#else - - vo1 = __PKHTB(v1, v2, 16); - vo2 = __PKHBT(v2, v1, 16); - -#endif - - in = arm_nn_read_q15x2(pCnt); - *__SIMD32(pCnt)++ = __QADD16(vo1, in); - - in = arm_nn_read_q15x2(pCnt); - *__SIMD32(pCnt)++ = __QADD16(vo2, in); - - cnt--; - } - cnt = length & 0x3; - while (cnt > 0u) - { - *pCnt++ += *pV++; - cnt--; - } -} - -#endif // ARM_MATH_DSP - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Pooling - * @{ - */ - - /** - * @brief Q7 max pooling function - * @param[in, out] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA Not used - * @param[in,out] Im_out pointer to output tensor - * - * @details - * - * The pooling function is implemented as split x-pooling then - * y-pooling. - * - * This pooling function is input-destructive. Input data is undefined - * after calling this function. - * - */ - -void -arm_maxpool_q7_HWC(q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, const uint16_t dim_im_out, q7_t * bufferA, q7_t * Im_out) -{ - (void)bufferA; -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - int16_t i_x, i_y; - - /* first does the pooling along x axis */ - for (i_y = 0; i_y < dim_im_in; i_y++) - { - - for (i_x = 0; i_x < dim_im_out; i_x++) - { - /* for each output pixel */ - q7_t *target = Im_in + (i_y * dim_im_in + i_x) * ch_im_in; - q7_t *win_start; - q7_t *win_stop; - if (i_x * stride - padding < 0) - { - win_start = target; - } else - { - win_start = Im_in + (i_y * dim_im_in + i_x * stride - padding) * ch_im_in; - } - - if (i_x * stride - padding + dim_kernel >= dim_im_in) - { - win_stop = Im_in + (i_y * dim_im_in + dim_im_in) * ch_im_in; - } else - { - win_stop = Im_in + (i_y * dim_im_in + i_x * stride - padding + dim_kernel) * ch_im_in; - } - - /* first step is to copy over initial data */ - /* arm_copy_q7(win_start, target, ch_im_in); */ - memmove(target, win_start, ch_im_in); - - /* start the max operation from the second part */ - win_start += ch_im_in; - for (; win_start < win_stop; win_start += ch_im_in) - { - compare_and_replace_if_larger_q7(target, win_start, ch_im_in); - } - } - } - - /* then does the pooling along y axis */ - for (i_y = 0; i_y < dim_im_out; i_y++) - { - - /* for each output row */ - q7_t *target = Im_out + i_y * dim_im_out * ch_im_in; - q7_t *row_start; - q7_t *row_end; - /* setting the starting row */ - if (i_y * stride - padding < 0) - { - row_start = Im_in; - } else - { - row_start = Im_in + (i_y * stride - padding) * dim_im_in * ch_im_in; - } - /* setting the stopping row */ - if (i_y * stride - padding + dim_kernel >= dim_im_in) - { - row_end = Im_in + dim_im_in * dim_im_in * ch_im_in; - } else - { - row_end = Im_in + (i_y * stride - padding + dim_kernel) * dim_im_in * ch_im_in; - } - - /* copy over the first row */ - /* arm_copy_q7(row_start, target, dim_im_out * ch_im_in); */ - memmove(target, row_start, dim_im_out * ch_im_in); - - /* move over to next row */ - row_start += ch_im_in * dim_im_in; - - for (; row_start < row_end; row_start += dim_im_in * ch_im_in) - { - compare_and_replace_if_larger_q7(target, row_start, dim_im_out * ch_im_in); - } - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - int16_t i_ch_in, i_x, i_y; - int16_t k_x, k_y; - - for (i_ch_in = 0; i_ch_in < ch_im_in; i_ch_in++) - { - for (i_y = 0; i_y < dim_im_out; i_y++) - { - for (i_x = 0; i_x < dim_im_out; i_x++) - { - int max = -129; - for (k_y = i_y * stride - padding; k_y < i_y * stride - padding + dim_kernel; k_y++) - { - for (k_x = i_x * stride - padding; k_x < i_x * stride - padding + dim_kernel; k_x++) - { - if (k_y >= 0 && k_x >= 0 && k_y < dim_im_in && k_x < dim_im_in) - { - if (Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)] > max) - { - max = Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)]; - } - } - } - } - Im_out[i_ch_in + ch_im_in * (i_x + i_y * dim_im_out)] = max; - } - } - } - -#endif /* ARM_MATH_DSP */ - -} - - /** - * @brief Q7 average pooling function - * @param[in,out] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] Im_out pointer to output tensor - * - * @details - * - * Buffer size: - * - * bufferA size: 2*dim_im_out*ch_im_in - * - * The pooling function is implemented as split x-pooling then - * y-pooling. - * - * This pooling function is input-destructive. Input data is undefined - * after calling this function. - * - */ - -void -arm_avepool_q7_HWC(q7_t * Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, const uint16_t dim_im_out, q7_t * bufferA, q7_t * Im_out) -{ - -#if defined (ARM_MATH_DSP) - /* Run the following code for Cortex-M4 and Cortex-M7 */ - - q15_t *buffer = (q15_t *) bufferA; - int16_t i_x, i_y; - int16_t count = 0; - - /* first does the pooling along x axis */ - for (i_y = 0; i_y < dim_im_in; i_y++) - { - - for (i_x = 0; i_x < dim_im_out; i_x++) - { - /* for each output pixel */ - q7_t *target = Im_in + (i_y * dim_im_in + i_x) * ch_im_in; - q7_t *win_start; - q7_t *win_stop; - if (i_x * stride - padding < 0) - { - win_start = target; - } else - { - win_start = Im_in + (i_y * dim_im_in + i_x * stride - padding) * ch_im_in; - } - - if (i_x * stride - padding + dim_kernel >= dim_im_in) - { - win_stop = Im_in + (i_y * dim_im_in + dim_im_in) * ch_im_in; - } else - { - win_stop = Im_in + (i_y * dim_im_in + i_x * stride - padding + dim_kernel) * ch_im_in; - } - - /* first step is to copy over initial data */ - arm_q7_to_q15_no_shift(win_start, buffer, ch_im_in); - count = 1; - - /* start the max operation from the second part */ - win_start += ch_im_in; - for (; win_start < win_stop; win_start += ch_im_in) - { - accumulate_q7_to_q15(buffer, win_start, ch_im_in); - count++; - } - buffer_scale_back_q15_to_q7(buffer, target, ch_im_in, count); - } - } - - /* then does the pooling along y axis */ - for (i_y = 0; i_y < dim_im_out; i_y++) - { - /* for each output row */ - q7_t *target = Im_out + i_y * dim_im_out * ch_im_in; - q7_t *row_start; - q7_t *row_end; - /* setting the starting row */ - if (i_y * stride - padding < 0) - { - row_start = Im_in; - } else - { - row_start = Im_in + (i_y * stride - padding) * dim_im_in * ch_im_in; - } - /* setting the stopping row */ - if (i_y * stride - padding + dim_kernel >= dim_im_in) - { - row_end = Im_in + dim_im_in * dim_im_in * ch_im_in; - } else - { - row_end = Im_in + (i_y * stride - padding + dim_kernel) * dim_im_in * ch_im_in; - } - - /* copy over the first row */ - arm_q7_to_q15_no_shift(row_start, buffer, dim_im_out * ch_im_in); - count = 1; - - /* move over to next row */ - row_start += ch_im_in * dim_im_in; - - for (; row_start < row_end; row_start += dim_im_in * ch_im_in) - { - accumulate_q7_to_q15(buffer, row_start, dim_im_out * ch_im_in); - count++; - } - buffer_scale_back_q15_to_q7(buffer, target, dim_im_out * ch_im_in, count); - } - -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - - (void)bufferA; - int16_t i_ch_in, i_x, i_y; - int16_t k_x, k_y; - - for (i_ch_in = 0; i_ch_in < ch_im_in; i_ch_in++) - { - for (i_y = 0; i_y < dim_im_out; i_y++) - { - for (i_x = 0; i_x < dim_im_out; i_x++) - { - int sum = 0; - int count = 0; - for (k_y = i_y * stride - padding; k_y < i_y * stride - padding + dim_kernel; k_y++) - { - for (k_x = i_x * stride - padding; k_x < i_x * stride - padding + dim_kernel; k_x++) - { - if (k_y >= 0 && k_x >= 0 && k_y < dim_im_in && k_x < dim_im_in) - { - sum += Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)]; - count++; - } - } - } - Im_out[i_ch_in + ch_im_in * (i_x + i_y * dim_im_out)] = sum / count; - } - } - } - -#endif /* ARM_MATH_DSP */ - -} - -/** - * @} end of Pooling group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c deleted file mode 100644 index 066e0ff..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_reshape_s8.c - * Description: Reshape a s8 vector - * - * $Date: September 2019 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Reshape - * @{ - */ - -/** - * Basic s8 reshape function. - * - * Refer header file for details. - * - */ - -void arm_reshape_s8(const int8_t *input, - int8_t *output, - const uint32_t total_size) -{ - memcpy(output, input, total_size); -} - -/** - * @} end of Reshape group - */ \ No newline at end of file diff --git a/src/third_party/cmsis/CMSIS/NN/Source/SVDFunctions/arm_svdf_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/SVDFunctions/arm_svdf_s8.c deleted file mode 100644 index a03ed9b..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/SVDFunctions/arm_svdf_s8.c +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_svdf_s8.c - * Description: S8 basic SVDF layer function - * - * $Date: 17. August 2020 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M processors - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nn_types.h" -#include "arm_nnsupportfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup SVDF - * @{ - */ - -/* - * S8 SVDF layer function for TensorFlow Lite - * - * Refer to header file for details. - * - */ - -arm_status -arm_svdf_s8(const cmsis_nn_context *input_ctx, - const cmsis_nn_context *output_ctx, - const cmsis_nn_svdf_params *svdf_params, - const cmsis_nn_per_tensor_quant_params *input_quant_params, - const cmsis_nn_per_tensor_quant_params *output_quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *state_dims, - q15_t *state_data, - const cmsis_nn_dims *weights_feature_dims, - const q7_t *weights_feature_data, - const cmsis_nn_dims *weights_time_dims, - const q15_t *weights_time_data, - const cmsis_nn_dims *bias_dims, - const q31_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data) -{ - (void)bias_dims; - (void)state_dims; - (void)output_dims; - - const q31_t multiplier_in = input_quant_params->multiplier; - const q31_t shift_in = input_quant_params->shift; - const q31_t multiplier_out = output_quant_params->multiplier; - const q31_t shift_2 = output_quant_params->shift; - const int32_t zp_in = svdf_params->input_offset; - const int32_t zp_out = svdf_params->output_offset; - const int32_t in_activation_min = svdf_params->input_activation.min; - const int32_t in_activation_max = svdf_params->input_activation.max; - const int32_t out_activation_min = svdf_params->output_activation.min; - const int32_t out_activation_max = svdf_params->output_activation.max; - const int16_t rank = svdf_params->rank; - - int32_t zp_32 = (-zp_in & 0xffff) | - ((-zp_in & 0xffff) << 16); - - const int32_t input_batches = input_dims->n; - const int32_t input_height = input_dims->h; - const int32_t feature_batches = weights_feature_dims->n; - const int32_t time_batches = weights_time_dims->h; - const int32_t unit_count = feature_batches / rank; - - q31_t *buffer_a = (q31_t *)input_ctx->buf; - q31_t *buffer_b = (q31_t *)output_ctx->buf; - - memmove((q15_t *)state_data, (q15_t *)state_data + 1, - (size_t)(input_batches * feature_batches * time_batches * (int32_t)sizeof(int16_t))); - - q15_t *res_ptr = state_data + (time_batches - 1); - for (int i_batch = 0; i_batch < input_batches; i_batch++) - { - const q7_t *buffer_1 = weights_feature_data; - for (int r = 0; r < feature_batches; r++) - { - q31_t dot_prod = 0; - - const q7_t *buffer_2 = input_data + i_batch * input_height; - -#if defined(ARM_MATH_DSP) - int c = 0; - int32_t block_count = input_height >> 2; - for (int i = 0; i < block_count; i++) - { - c += 4; - - q31_t r1 = arm_nn_read_q7x4_ia(&buffer_1); - q31_t r1_a = __SXTB16(r1); - q31_t r1_b = __SXTB16(__ROR((uint32_t)r1, 8)); - - q31_t r2 = arm_nn_read_q7x4_ia(&buffer_2); - q31_t r2_a = __SXTAB16(zp_32, r2); - q31_t r2_b = __SXTAB16(zp_32, __ROR((uint32_t)r2, 8)); - - dot_prod = __SMLAD(r1_a, r2_a, dot_prod); - dot_prod = __SMLAD(r1_b, r2_b, dot_prod); - } - - for (; c < input_height; c++) - { - dot_prod += *buffer_1 * (*buffer_2 - zp_in); - buffer_1++; - buffer_2++; - } -#else - for (int c = 0; c < input_height; c++) - { - dot_prod += *buffer_1 * (*buffer_2 - zp_in); - buffer_1++; - buffer_2++; - } -#endif - - dot_prod = arm_nn_requantize(dot_prod, - multiplier_in, - shift_in); - dot_prod = CLAMP(dot_prod, in_activation_max, in_activation_min); - *res_ptr = dot_prod; - res_ptr += time_batches; - } - } - - for (int i_batch = 0; i_batch < input_batches; i_batch++) - { - q31_t *ptr_a = buffer_a + i_batch * feature_batches; - - const q15_t *v1 = weights_time_data; - const q15_t *v2 = state_data + i_batch * time_batches * feature_batches; - for (int i_feature_batch = 0; i_feature_batch < feature_batches; i_feature_batch++) - { - *ptr_a = 0; - - int32_t sum = 0; -#if defined(ARM_MATH_DSP) - int j = 0; - int32_t block_count = time_batches >> 1; - for (int i = 0; i < block_count; i++) - { - j += 2; - q31_t r1 = arm_nn_read_q15x2_ia(&v1); - q31_t r2 = arm_nn_read_q15x2_ia(&v2); - - sum = __SMLAD(r1, r2, sum); - } - - // Process the remaining data - for (; j < time_batches; j++) - { - sum += *v1 * *v2; - v1++; - v2++; - } -#else - for (int j = 0; j < time_batches; j++) - { - sum += *v1 * *v2; - v1++; - v2++; - } -#endif - - *ptr_a = sum; - ptr_a++; - } - } - - for (int i_batch = 0; i_batch < input_batches; i_batch++) - { - q31_t *output_data_temp = buffer_b + i_batch * unit_count; - q31_t *ptr_a = buffer_a + i_batch * feature_batches; - - for (int i = 0; i < unit_count; i++) - { - output_data_temp[i] = bias_data[i]; - for (int j = 0; j < rank; j++) - { - output_data_temp[i] += *ptr_a; - ptr_a++; - } - } - } - - for (int i = 0; i < input_batches * unit_count; i++) - { - output_data[i] = (q7_t)CLAMP(arm_nn_requantize(buffer_b[i], multiplier_out, shift_2) + zp_out, - out_activation_max, out_activation_min); - } - - return (ARM_MATH_SUCCESS); -} - -/** - * @} end of SVDF group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q15.c b/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q15.c deleted file mode 100644 index e852eec..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q15.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_softmax_q15.c - * Description: Q15 softmax function - * - * $Date: 20. February 2018 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Softmax - * @{ - */ - - /** - * @brief Q15 softmax function - * @param[in] vec_in pointer to input vector - * @param[in] dim_vec input vector dimention - * @param[out] p_out pointer to output vector - * - * @details - * - * Here, instead of typical e based softmax, we use - * 2-based softmax, i.e.,: - * - * y_i = 2^(x_i) / sum(2^x_j) - * - * The relative output will be different here. - * But mathematically, the gradient will be the same - * with a log(2) scaling factor. - * - */ - -void arm_softmax_q15(const q15_t * vec_in, const uint16_t dim_vec, q15_t * p_out) -{ - q31_t sum; - int16_t i; - uint8_t shift; - q31_t base; - base = -1 * 0x100000; - for (i = 0; i < dim_vec; i++) - { - if (vec_in[i] > base) - { - base = vec_in[i]; - } - } - - /* we ignore really small values - * anyway, they will be 0 after shrinking - * to q15_t - */ - base = base - 16; - - sum = 0; - - for (i = 0; i < dim_vec; i++) - { - if (vec_in[i] > base) - { - shift = (uint8_t)__USAT(vec_in[i] - base, 5); - sum += 0x1 << shift; - } - } - - /* This is effectively (0x1 << 32) / sum */ - int64_t div_base = 0x100000000LL; - int output_base = (int32_t)(div_base / sum); - - /* Final confidence will be output_base >> ( 17 - (vec_in[i] - base) ) - * so 32768 (0x1<<15) -> 100% confidence when sum = 0x1 << 16, output_base = 0x1 << 16 - * and vec_in[i]-base = 16 - */ - for (i = 0; i < dim_vec; i++) - { - if (vec_in[i] > base) - { - /* Here minimum value of 17+base-vec[i] will be 1 */ - shift = (uint8_t)__USAT(17+base-vec_in[i], 5); - p_out[i] = (q15_t) __SSAT((output_base >> shift), 16); - } else - { - p_out[i] = 0; - } - } - -} - -/** - * @} end of Softmax group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c b/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c deleted file mode 100644 index a0ef85e..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_softmax_q7.c - * Description: Q7 softmax function - * - * $Date: June 8, 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Softmax - * @{ - */ - - /** - * @brief Q7 softmax function - * @param[in] vec_in pointer to input vector - * @param[in] dim_vec input vector dimention - * @param[out] p_out pointer to output vector - * - * @details - * - * Here, instead of typical natural logarithm e based softmax, we use - * 2-based softmax here, i.e.,: - * - * y_i = 2^(x_i) / sum(2^x_j) - * - * The relative output will be different here. - * But mathematically, the gradient will be the same - * with a log(2) scaling factor. - * - */ - -void arm_softmax_q7(const q7_t * vec_in, const uint16_t dim_vec, q7_t * p_out ) -{ - q31_t sum; - int16_t i; - uint8_t shift; - q15_t base; - base = -128; - - /* We first search for the maximum */ - for (i = 0; i < dim_vec; i++) - { - if (vec_in[i] > base) - { - base = vec_in[i]; - } - } - - /* - * So the base is set to max-8, meaning - * that we ignore really small values. - * anyway, they will be 0 after shrinking to q7_t. - */ - base = base - (1 << 3); - - sum = 0; - - for (i = 0; i < dim_vec; i++) - { - shift = (uint8_t)__USAT(vec_in[i] - base, 3); - sum += 0x1 << shift; - } - - /* This is effectively (0x1 << 20) / sum */ - int output_base = (1 << 20) / sum; - - for (i = 0; i < dim_vec; i++) - { - - /* Here minimum value of 13+base-vec_in[i] will be 5 */ - shift = (uint8_t)__USAT(13 + base - vec_in[i], 5); - p_out[i] = (q7_t)__SSAT((output_base >> shift), 8); - } -} - -/** - * @} end of Softmax group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c b/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c deleted file mode 100644 index 1d5dcf6..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_softmax_s8.c - * Description: S8 softmax function - * - * $Date: April 6, 2020 - * $Revision: V.2.0.0 - * - * Target Processor: Cortex-M cores - * - * -------------------------------------------------------------------- */ - -#include "arm_nnsupportfunctions.h" - -#define ACCUM_BITS 12 - -#ifdef ARM_MATH_MVEI -static int32x4_t arm_exp_on_negative_values_mve_32x4(int32x4_t val) -{ -#define SHIFT_START (24) - int32_t shift = SHIFT_START; - int32x4_t mask; - - const int32x4_t val_mod_minus_quarter = vandq_s32(val, vdupq_n_s32((1 << SHIFT_START) - 1)) - vdupq_n_s32(1 << SHIFT_START); - const int32x4_t remainder = vsubq_s32(val_mod_minus_quarter, val); - const int32x4_t x = vaddq_n_s32(val_mod_minus_quarter << 5, 1 << 28); - const int32x4_t x2 = MUL_SAT_MVE(x, x); - const int32x4_t op_1 = DIV_POW2_MVE(MUL_SAT_MVE(x2, x2), 2) + MUL_SAT_MVE(x2, x); - const int32x4_t op_2 = x + DIV_POW2_MVE(MUL_SAT_MVE(op_1, vdupq_n_s32(715827883)) + x2, 1); - int32x4_t result = vdupq_n_s32(1895147668) + MUL_SAT_MVE(vdupq_n_s32(1895147668), op_2); - -#define SELECT_IF_NON_ZERO(x) \ - { \ - mve_pred16_t p = vcmpneq_n_s32(remainder & vdupq_n_s32(1 << shift++), 0); \ - mask = vmvnq_m_s32(vdupq_n_s32(0), vdupq_n_s32(0), p); \ - result = SELECT_USING_MASK(mask, MUL_SAT_MVE(result, vdupq_n_s32(x)), result); \ - } - - SELECT_IF_NON_ZERO(1672461947) - SELECT_IF_NON_ZERO(1302514674) - SELECT_IF_NON_ZERO(790015084) - SELECT_IF_NON_ZERO(290630308) - SELECT_IF_NON_ZERO(39332535) - SELECT_IF_NON_ZERO(720401) - SELECT_IF_NON_ZERO(242) - -#undef SELECT_IF_NON_ZERO - - mve_pred16_t p = vcmpeqq_n_s32(val, 0); - mask = vmvnq_m_s32(vdupq_n_s32(0), vdupq_n_s32(0), p); - - result = SELECT_USING_MASK(mask, vdupq_n_s32(Q31_MAX), result); - return result; -} -#endif - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Softmax - * @{ - */ - -void arm_softmax_s8(const int8_t *input, - const int32_t num_rows, - const int32_t row_size, - const int32_t mult, - const int32_t shift, - const int32_t diff_min, - int8_t *output) -{ -#ifdef ARM_MATH_MVEI - -#define ACT_MIN ((int8_t)Q7_MIN) -#define ACT_MAX ((int8_t)Q7_MAX) - - const int32_t mask = (1 << shift); - - for (int i_num_rows = 0; i_num_rows < num_rows; ++i_num_rows) - { - int8_t max = ACT_MIN; - - int32_t vec_count = (row_size + 15) / 16; - uint32_t r_count = (uint32_t)row_size; - for (int i = 0; i < vec_count; i++) - { - mve_pred16_t p = vctp8q(r_count); - const int8x16_t ip = vldrbq_z_s8(&input[i * 16], p); - max = vmaxvq_p_s8(max, ip, p); - r_count -= 16; - } - - vec_count = row_size / 4; - int32_t idx = 0; - int32_t sum = 0; - - while (vec_count) - { - int32x4_t ip = vldrbq_s32(&input[idx * 4]); - ip = vsubq_n_s32(ip, max); - mve_pred16_t p = vcmpgeq_n_s32(ip, diff_min); - if (p != 0) - { - ip = vmulq_n_s32(ip, mask); - - int32x4_t res = MUL_SAT_MVE(ip, vdupq_n_s32(mult)); - - res = arm_exp_on_negative_values_mve_32x4(res); - res = DIV_POW2_MVE(res, ACCUM_BITS); - res = vpselq_s32(res, vdupq_n_s32(0), p); - sum += vaddvq_s32(res); - } - - vec_count--; - idx++; - } - - const int32_t tail_idx = row_size & ~3; - for (int i = 0; i < (row_size & 3); i++) - { - const int32_t diff = input[tail_idx + i] - max; - if (diff >= diff_min) - { - sum += DIV_POW2(EXP_ON_NEG(MUL_SAT(diff * mask, mult)), ACCUM_BITS); - } - } - - const int32_t headroom = __CLZ((uint32_t)sum); - const int32_t bits_over_unit = ACCUM_BITS - headroom + 23; - const int32_t shifted_scale = ONE_OVER1((sum << headroom) - (1 << 31)); - - vec_count = row_size / 4; - idx = 0; - - while (vec_count) - { - int32x4_t ip = vldrbq_s32(&input[idx]); - ip = vsubq_n_s32(ip, max); - - mve_pred16_t p = vcmpgeq_n_s32(ip, diff_min); - - int32x4_t tmp_res; - - if (p != 0) - { - ip = vmulq_n_s32(ip, mask); - - tmp_res = MUL_SAT_MVE(ip, vdupq_n_s32(mult)); - tmp_res = arm_exp_on_negative_values_mve_32x4(tmp_res); - tmp_res = MUL_SAT_MVE(vdupq_n_s32(shifted_scale), tmp_res); - tmp_res = DIV_POW2_MVE(tmp_res, bits_over_unit); - tmp_res += vdupq_n_s32(ACT_MIN); - - tmp_res = vmaxq_s32(tmp_res, vdupq_n_s32(ACT_MIN)); - tmp_res = vminq_s32(tmp_res, vdupq_n_s32(ACT_MAX)); - tmp_res = vpselq_s32(tmp_res, vdupq_n_s32(ACT_MIN), p); - } - else - { - tmp_res = vdupq_n_s32(ACT_MIN); - } - vstrbq_s32(&output[idx], tmp_res); - vec_count--; - idx += 4; - } - - for (int i = 0; i < (row_size & 3); i++) - { - int32_t diff = input[tail_idx + i] - max; - if (diff >= diff_min) - { - const int32_t res = DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) - 128; - output[tail_idx + i] = (int8_t)CLAMP(res, (int32_t)ACT_MAX, (int32_t)ACT_MIN); - } - else - { - output[tail_idx + i] = ACT_MIN; - } - } - - input += row_size; - output += row_size; - } -#else - const int32_t mask = (1 << shift); - - int32_t col = 0; - int32_t row_idx; - - for (row_idx = 0; row_idx < num_rows; ++row_idx) - { - // Find the maximum value in order to ensure numerical stability - int8_t max = *input; - - for (col = 1; col < row_size; ++col) - { - max = MAX(max, input[col]); - } - - int32_t diff = 0; - int32_t sum = 0; - - for (col = 0; col < row_size; ++col) - { - diff = input[col] - max; - if (diff >= diff_min) - { - sum += DIV_POW2(EXP_ON_NEG(MUL_SAT(diff * mask, mult)), ACCUM_BITS); - } - } - - const int32_t headroom = __CLZ(sum); - const int32_t bits_over_unit = ACCUM_BITS - headroom + 23; - const int32_t shifted_scale = ONE_OVER1((sum << headroom) - (1 << 31)); - - for (col = 0; col < row_size; ++col) - { - diff = input[col] - max; - if (diff >= diff_min) - { - const int32_t res = DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) - 128; - output[col] = (int8_t)CLAMP(res, (int32_t)127, (int32_t)-128); - } - else - { - output[col] = -128; - } - } - input += row_size; - output += row_size; - } - -#endif -} -/** - * @} end of Softmax group - */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_u8.c b/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_u8.c deleted file mode 100644 index b2eebd2..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_u8.c +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_softmax_u8.c - * Description: U8 softmax function - * - * $Date: May 29, 2020 - * $Revision: V.1.0.1 - * - * Target Processor: Cortex-M CPUs - * - * -------------------------------------------------------------------- */ - -#include "arm_nnfunctions.h" - -#define ACCUM_BITS 12 - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Softmax - * @{ - */ -void arm_softmax_u8(const uint8_t *input, - const int32_t num_rows, - const int32_t row_size, - const int32_t mult, - const int32_t shift, - const int32_t diff_min, - uint8_t *output) -{ - const int32_t mask = (1 << shift); - - int32_t col = 0; - int32_t row_idx; - - for(row_idx = 0; row_idx < num_rows; ++row_idx) - { - // Find the maximum value in order to ensure numerical stability - uint8_t max = *input; - - for (col = 1; col < row_size; ++col) - { - max = MAX(max, input[col]); - } - - int32_t diff = 0; - int32_t sum = 0; - - for (col = 0; col < row_size; ++col) - { - diff = input[col] - max; - if(diff >= diff_min) - { - sum += DIV_POW2(EXP_ON_NEG(MUL_SAT(diff * mask, mult)), ACCUM_BITS); - } - } - - const int32_t headroom = __CLZ((uint32_t)sum); - const int32_t bits_over_unit = ACCUM_BITS - headroom + 23; - const int32_t shifted_scale = ONE_OVER1((sum << headroom) - (1 << 31)); - - for (col = 0; col < row_size; ++col) - { - diff = input[col] - max; - if (diff >= diff_min) - { - const int32_t res = DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit); - output[col] = (uint8_t) CLAMP(res, (int32_t)255, (int32_t)0); - } - else - { - output[col] = 0; - } - } - input += row_size; - output += row_size; - } -} -/** - * @} end of Softmax group - */ \ No newline at end of file diff --git a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c b/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c deleted file mode 100755 index 4c27e33..0000000 --- a/src/third_party/cmsis/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_softmax_with_batch_q7.c - * Description: Q7 softmax function - * - * $Date: 05. August 2019 - * $Revision: V.1.0.0 - * - * Target Processor: Cortex-M and Cortex-A cores - * - * -------------------------------------------------------------------- */ - -#include "arm_math.h" -#include "arm_nnfunctions.h" - -/** - * @ingroup groupNN - */ - -/** - * @addtogroup Softmax - * @{ - */ - - /** - * @brief Q7 softmax function with batch parameter - * @param[in] vec_in pointer to input vector - * @param[in] nb_batches number of batches - * @param[in] dim_vec input vector dimention - * @param[out] p_out pointer to output vector - * - * @details - * - * Here, instead of typical natural logarithm e based softmax, we use - * 2-based softmax here, i.e.,: - * - * y_i = 2^(x_i) / sum(2^x_j) - * - * The relative output will be different here. - * But mathematically, the gradient will be the same - * with a log(2) scaling factor. - * - */ - -void arm_softmax_with_batch_q7(const q7_t * vec_in, const uint16_t nb_batches,const uint16_t dim_vec, q7_t * p_out ) -{ - for(int i=0; i + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_compiler.h + * Description: Generic compiler header + * + * $Date: 20 June 2024 + * $Revision: V.1.3.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#ifndef ARM_NN_COMPILER_H +#define ARM_NN_COMPILER_H + +/** + * + * @brief Arm C-Language Extension(ACLE) Includes + * + */ + +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE __inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline + #endif + #ifndef __RESTRICT + #define __RESTRICT __restrict + #endif + +#elif defined(__ICCARM__) + + #warning IAR support is not tested + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE + #endif + #ifndef __RESTRICT + #define __RESTRICT __restrict + #endif + +#elif defined(_MSC_VER) + + // Build for non Arm Cortex-M processors is not tested or supported. + // Use this section to stub any macros or intrinsics + #warning Unsupported compiler + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE static __forceinline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __declspec(align(x)) + #endif + +#elif defined(__GNUC__) + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline + #endif + #ifndef __RESTRICT + #define __RESTRICT __restrict + #endif + +#else + + #error Unsupported compiler. Add support as needed + +#endif + +/** + * + * @brief Compiler specific diagnostic adjustment / fixes if applicable + * + */ + +// Note: __ARM_ARCH is used with M-profile architecture as the target here. +#if defined(__GNUC__) + #if (__GNUC__ == 12 && (__GNUC_MINOR__ <= 2)) && defined(__ARM_ARCH) + // Workaround for 'Internal Compiler Error' on Arm GNU Toolchain rel 12.2.x + // https://gcc.gnu.org/pipermail/gcc-patches/2022-December/607963.html + #define ARM_GCC_12_2_ICE + #endif +#endif + +#if defined(__ARM_FEATURE_MVE) && ((__ARM_FEATURE_MVE & 3) == 3) || (__ARM_FEATURE_MVE & 1) + #include +#endif + +#if defined(__ARM_ARCH) || defined(__ARM_ACLE) + #include +#endif + +#if defined(__GNUC__) + #include +#endif + +/** + * + * @brief ACLE and Intrinsics + * + */ + +// Note: Have __GNUC__, that is used to check for GCC , checks at the end +// as __GNUC__ is defined by non-GCC compilers as well + +/* Common intrinsics for all architectures */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) || defined(__ICCARM__) + #define CLZ __clz +#elif defined(__GNUC__) +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees Arm-compatible results if compiling on a non-Arm + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} +#endif + +// ACLE intrinsics under groups __ARM_FEATURE_QBIT, __ARM_FEATURE_DSP , __ARM_FEATURE_SAT, __ARM_FEATURE_SIMD32 + +// Note: Just __ARM_FEATURE_DSP is checked to collect all intrinsics from the above mentioned groups + +#if (defined(__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + + // Common intrinsics + #define SMLABB __smlabb + #define SMLATT __smlatt + #define SMLALD __smlald + #define QADD __qadd + #define QSUB8 __qsub8 + #define QSUB16 __qsub16 + #define SADD16 __sadd16 + + // Compiler specifc variants of intrinsics. Create a new section or file for IAR if needed + #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) || defined(__ICCARM__) + + #define SMULBB __smulbb + #define SMULTT __smultt + #define ROR __ror + #define SXTB16 __sxtb16 + #define SXTAB16 __sxtab16 + #define SXTB16_RORn(ARG1, ARG2) SXTB16(ROR(ARG1, ARG2)) + #define SXTAB16_RORn(ARG1, ARG2, ARG3) SXTAB16(ARG1, ROR(ARG2, ARG3)) + #define SMLAD __smlad + // PKH translates into pkh on AC6 + #define PKHBT(ARG1, ARG2, ARG3) \ + (((((uint32_t)(ARG1))) & 0x0000FFFFUL) | ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)) + #define PKHTB(ARG1, ARG2, ARG3) \ + (((((uint32_t)(ARG1))) & 0xFFFF0000UL) | ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)) + + #elif defined(__GNUC__) + + #define PKHBT(ARG1, ARG2, ARG3) \ + __extension__({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM("pkhbt %0, %1, %2, lsl %3" : "=r"(__RES) : "r"(__ARG1), "r"(__ARG2), "I"(ARG3)); \ + __RES; \ + }) + #define PKHTB(ARG1, ARG2, ARG3) \ + __extension__({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM("pkhtb %0, %1, %2" : "=r"(__RES) : "r"(__ARG1), "r"(__ARG2)); \ + else \ + __ASM("pkhtb %0, %1, %2, asr %3" : "=r"(__RES) : "r"(__ARG1), "r"(__ARG2), "I"(ARG3)); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM("sxtab16 %0, %1, %2" : "=r"(result) : "r"(op1), "r"(op2)); + return (result); +} + +__STATIC_FORCEINLINE uint32_t SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM("sxtb16 %0, %1" : "=r"(result) : "r"(op1)); + return (result); +} + +// __smlad is defined by GCC, but results in a performance drop(Tested on Arm GNU Toolchain version 11.x and 12.x) +__STATIC_FORCEINLINE uint32_t SMLAD(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile("smlad %0, %1, %2, %3" : "=r"(result) : "r"(op1), "r"(op2), "r"(op3)); + return (result); +} + +__STATIC_FORCEINLINE uint32_t ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + +__STATIC_FORCEINLINE uint32_t SXTB16_RORn(uint32_t op1, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) + { + __ASM volatile("sxtb16 %0, %1, ROR %2" : "=r"(result) : "r"(op1), "i"(rotate)); + } + else + { + result = SXTB16(ROR(op1, rotate)); + } + return result; +} + +__STATIC_FORCEINLINE uint32_t SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) + { + __ASM volatile("sxtab16 %0, %1, %2, ROR %3" : "=r"(result) : "r"(op1), "r"(op2), "i"(rotate)); + } + else + { + result = SXTAB16(op1, ROR(op2, rotate)); + } + return result; +} + +// Inline assembly routines for ACLE intrinsics that are not defined by GCC toolchain +__STATIC_FORCEINLINE uint32_t SMULBB(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile("smulbb %0, %1, %2" : "=r"(result) : "r"(op1), "r"(op2)); + return (result); +} + +__STATIC_FORCEINLINE uint32_t SMULTT(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile("smultt %0, %1, %2" : "=r"(result) : "r"(op1), "r"(op2)); + return (result); +} + #endif + +#endif + +#endif /* #ifndef ARM_NN_COMPILER_H */ diff --git a/src/third_party/cmsis_nn/Include/arm_nn_math_types.h b/src/third_party/cmsis_nn/Include/arm_nn_math_types.h new file mode 100644 index 0000000..5134fe1 --- /dev/null +++ b/src/third_party/cmsis_nn/Include/arm_nn_math_types.h @@ -0,0 +1,78 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_math_types.h + * Description: Compiler include and basic types + * + * $Date: 15 August 2023 + * $Revision: V.1.3.3 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#ifndef ARM_NN_MATH_TYPES_H + +#define ARM_NN_MATH_TYPES_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * + * @brief Translate architecture feature flags to CMSIS-NN defines + * + */ + +// CMSIS-NN uses the same macro names as CMSIS-DSP +#if (defined(__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + #ifndef ARM_MATH_DSP + #define ARM_MATH_DSP 1 + #endif +#endif + +#if (defined(__ARM_FEATURE_MVE) && (__ARM_FEATURE_MVE & 1)) + #ifndef ARM_MATH_MVEI + #define ARM_MATH_MVEI 1 + #endif +#endif + +/** + * + * @brief Limits macros + * + */ + +#define NN_Q31_MAX ((int32_t)(0x7FFFFFFFL)) +#define NN_Q15_MAX ((int16_t)(0x7FFF)) +#define NN_Q7_MAX ((int8_t)(0x7F)) +#define NN_Q31_MIN ((int32_t)(0x80000000L)) +#define NN_Q15_MIN ((int16_t)(0x8000)) +#define NN_Q7_MIN ((int8_t)(0x80)) + +#ifdef __cplusplus +} +#endif + +#endif /*ifndef ARM_NN_MATH_TYPES_H */ diff --git a/src/third_party/cmsis_nn/Include/arm_nn_tables.h b/src/third_party/cmsis_nn/Include/arm_nn_tables.h new file mode 100644 index 0000000..57c90cb --- /dev/null +++ b/src/third_party/cmsis_nn/Include/arm_nn_tables.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_tables.h + * Description: Extern declaration for NN tables + * + * $Date: 8 December 2022 + * $Revision: V.2.1.1 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#ifndef ARM_NN_TABLES_H +#define ARM_NN_TABLES_H + +#include "third_party/cmsis_nn/Include/arm_nn_math_types.h" + +/** + * @brief tables for various activation functions + * + */ + +extern const uint16_t sigmoid_table_uint16[256]; + +#endif /* ARM_NN_TABLES_H */ diff --git a/src/third_party/cmsis_nn/Include/arm_nn_types.h b/src/third_party/cmsis_nn/Include/arm_nn_types.h new file mode 100644 index 0000000..2846e62 --- /dev/null +++ b/src/third_party/cmsis_nn/Include/arm_nn_types.h @@ -0,0 +1,281 @@ +/* + * SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_types.h + * Description: Public header file to contain the CMSIS-NN structs for the + * TensorFlowLite micro compliant functions + * + * $Date: 21 Oct 2024 + * $Revision: V.3.5.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#ifndef ARM_NN_TYPES_H +#define ARM_NN_TYPES_H + +#include +#include + +/** + * @defgroup genPubTypes Structure Types + * @ingroup Public + * @brief Enums and Data Structures used in public API + * @{ + */ + +/** Enum for specifying activation function types */ +typedef enum +{ + ARM_SIGMOID = 0, /**< Sigmoid activation function */ + ARM_TANH = 1, /**< Tanh activation function */ +} arm_nn_activation_type; + +/** Function return codes */ +typedef enum +{ + ARM_CMSIS_NN_SUCCESS = 0, /**< No error */ + ARM_CMSIS_NN_ARG_ERROR = -1, /**< One or more arguments are incorrect */ + ARM_CMSIS_NN_NO_IMPL_ERROR = -2, /**< No implementation available */ + ARM_CMSIS_NN_FAILURE = -3, /**< Logical error */ +} arm_cmsis_nn_status; + +/** CMSIS-NN object to contain the width and height of a tile */ +typedef struct +{ + int32_t w; /**< Width */ + int32_t h; /**< Height */ +} cmsis_nn_tile; + +/** CMSIS-NN object used for the function context. */ +typedef struct +{ + void *buf; /**< Pointer to a buffer needed for the optimization */ + int32_t size; /**< Buffer size */ +} cmsis_nn_context; + +/** CMSIS-NN object used to hold bias data for int16 variants. */ +typedef struct +{ + const void *data; /**< Pointer to bias data */ + const bool is_int32_bias; /**< Indicate type of bias data. True means int32 else int64 */ +} cmsis_nn_bias_data; + +/** CMSIS-NN object to contain the dimensions of the tensors */ +typedef struct +{ + int32_t n; /**< Generic dimension to contain either the batch size or output channels. + Please refer to the function documentation for more information */ + int32_t h; /**< Height */ + int32_t w; /**< Width */ + int32_t c; /**< Input channels */ +} cmsis_nn_dims; + +/** CMSIS-NN object to contain LSTM specific input parameters related to dimensions */ +typedef struct +{ + int32_t max_time; + int32_t num_inputs; + int32_t num_batches; + int32_t num_outputs; +} cmsis_nn_lstm_dims; + +/** CMSIS-NN object for the per-channel quantization parameters */ +typedef struct +{ + int32_t *multiplier; /**< Multiplier values */ + int32_t *shift; /**< Shift values */ +} cmsis_nn_per_channel_quant_params; + +/** CMSIS-NN object for the per-tensor quantization parameters */ +typedef struct +{ + int32_t multiplier; /**< Multiplier value */ + int32_t shift; /**< Shift value */ +} cmsis_nn_per_tensor_quant_params; + +/** CMSIS-NN object for quantization parameters. + * This struct supports both per-tensor and per-channels requantization + * and is recommended for new operators. + */ +typedef struct +{ + int32_t *multiplier; /**< Multiplier values */ + int32_t *shift; /**< Shift values */ + int32_t is_per_channel; /** Indicating if per channel or per tensor quantization */ +} cmsis_nn_quant_params; + +/** CMSIS-NN object for the quantized Relu activation */ +typedef struct +{ + int32_t min; /**< Min value used to clamp the result */ + int32_t max; /**< Max value used to clamp the result */ +} cmsis_nn_activation; + +/** CMSIS-NN object for the convolution layer parameters */ +typedef struct +{ + int32_t input_offset; /**< The negative of the zero value for the input tensor */ + int32_t output_offset; /**< The negative of the zero value for the output tensor */ + cmsis_nn_tile stride; + cmsis_nn_tile padding; + cmsis_nn_tile dilation; + cmsis_nn_activation activation; +} cmsis_nn_conv_params; + +/** CMSIS-NN object for the transpose convolution layer parameters */ +typedef struct +{ + int32_t input_offset; /**< The negative of the zero value for the input tensor */ + int32_t output_offset; /**< The negative of the zero value for the output tensor */ + cmsis_nn_tile stride; + cmsis_nn_tile padding; + cmsis_nn_tile padding_offsets; + cmsis_nn_tile dilation; + cmsis_nn_activation activation; +} cmsis_nn_transpose_conv_params; + +/** CMSIS-NN object for the depthwise convolution layer parameters */ +typedef struct +{ + int32_t input_offset; /**< The negative of the zero value for the input tensor */ + int32_t output_offset; /**< The negative of the zero value for the output tensor */ + int32_t ch_mult; /**< Channel Multiplier. ch_mult * in_ch = out_ch */ + cmsis_nn_tile stride; + cmsis_nn_tile padding; + cmsis_nn_tile dilation; + cmsis_nn_activation activation; +} cmsis_nn_dw_conv_params; + +/** CMSIS-NN object for pooling layer parameters */ +typedef struct +{ + cmsis_nn_tile stride; + cmsis_nn_tile padding; + cmsis_nn_activation activation; +} cmsis_nn_pool_params; + +/** CMSIS-NN object for Fully Connected layer parameters */ +typedef struct +{ + int32_t input_offset; /**< The negative of the zero value for the input tensor */ + int32_t filter_offset; /**< The negative of the zero value for the filter tensor */ + int32_t output_offset; /**< The negative of the zero value for the output tensor */ + cmsis_nn_activation activation; +} cmsis_nn_fc_params; + +/** CMSIS-NN object for Batch Matmul layer parameters */ +typedef struct +{ + const bool adj_x; + const bool adj_y; + cmsis_nn_fc_params fc_params; +} cmsis_nn_bmm_params; + +/** CMSIS-NN object for Transpose layer parameters */ +typedef struct +{ + const int32_t num_dims; + const uint32_t *permutations; /**< The dimensions applied to the input dimensions */ +} cmsis_nn_transpose_params; + +/** CMSIS-NN object for SVDF layer parameters */ +typedef struct +{ + int32_t rank; + int32_t input_offset; /**< The negative of the zero value for the input tensor */ + int32_t output_offset; /**< The negative of the zero value for the output tensor */ + cmsis_nn_activation input_activation; + cmsis_nn_activation output_activation; +} cmsis_nn_svdf_params; + +/** CMSIS-NN object for Softmax s16 layer parameters */ +typedef struct +{ + const int16_t *exp_lut; + const int16_t *one_by_one_lut; +} cmsis_nn_softmax_lut_s16; + +/** CMSIS-NN object for quantization parameters */ +typedef struct +{ + int32_t multiplier; /**< Multiplier value */ + int32_t shift; /**< Shift value */ +} cmsis_nn_scaling; + +/** CMSIS-NN object for LSTM gate parameters*/ +typedef struct +{ + int32_t input_multiplier; + int32_t input_shift; + const void *input_weights; + const void *input_effective_bias; /**< Bias added with precomputed kernel_sum * lhs_offset*/ + + int32_t hidden_multiplier; + int32_t hidden_shift; + const void *hidden_weights; + const void *hidden_effective_bias; /**< Precomputed kernel_sum * lhs_offset*/ + + const void *bias; + arm_nn_activation_type activation_type; +} cmsis_nn_lstm_gate; + +/** CMSIS-NN object for LSTM parameters*/ +typedef struct +{ + int32_t time_major; /**< 0 if first dimension is batch, else first dimension is time */ + int32_t batch_size; + int32_t time_steps; + int32_t input_size; /**< Size of new data input into the LSTM cell*/ + int32_t + hidden_size; /**< Size of output from the LSTM cell, used as output and recursively into the next time step*/ + + int32_t input_offset; + + int32_t forget_to_cell_multiplier; + int32_t forget_to_cell_shift; + int32_t input_to_cell_multiplier; + int32_t input_to_cell_shift; + int32_t cell_clip; /**< Min/max value of cell output*/ + int32_t cell_scale_power; + + int32_t output_multiplier; + int32_t output_shift; + int32_t output_offset; + + cmsis_nn_lstm_gate forget_gate; + cmsis_nn_lstm_gate input_gate; + cmsis_nn_lstm_gate cell_gate; + cmsis_nn_lstm_gate output_gate; +} cmsis_nn_lstm_params; + +/** CMSIS-NN object for LSTM scratch buffers*/ +typedef struct +{ + void *temp1; + void *temp2; + void *cell_state; +} cmsis_nn_lstm_context; + +/** + * @} // end group genPubTypes + */ + +#endif /* ARM_NN_TYPES_H */ diff --git a/src/third_party/cmsis_nn/Include/arm_nnfunctions.h b/src/third_party/cmsis_nn/Include/arm_nnfunctions.h new file mode 100644 index 0000000..7d733d2 --- /dev/null +++ b/src/third_party/cmsis_nn/Include/arm_nnfunctions.h @@ -0,0 +1,2959 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nnfunctions.h + * Description: Public header file for CMSIS NN Library + * + * $Date: 04 November 2024 + * $Revision: V.18.0.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +/** + * @defgroup Public Public + * A collection of functions to perform basic operations for neural network layers. Functions with a _s8 suffix support + * TensorFlow Lite framework. + */ + +#ifndef ARM_NNFUNCTIONS_H +#define ARM_NNFUNCTIONS_H + +#include "third_party/cmsis_nn/Include/arm_nn_math_types.h" +#include "third_party/cmsis_nn/Include/arm_nn_types.h" + +#define USE_INTRINSIC + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup NNConv Convolution Functions + * + * Collection of convolution, depthwise convolution functions and their variants. + * + * The convolution is implemented in 2 steps: im2col and General Matrix Multiplication(GEMM) + * + * im2col is a process of converting each patch of image data into + * a column. After im2col, the convolution is computed as matrix-matrix + * multiplication. + * + * To reduce the memory footprint, the im2col is performed partially. + * Each iteration, only a few column (i.e., patches) are generated followed + * by GEMM. + * + */ + +/** + * @brief s4 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_wrapper_s4_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + */ +arm_cmsis_nn_status arm_convolve_wrapper_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s4 + * + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial + * filter dimensions + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_wrapper_s4_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s4 for Arm(R) Helium Architecture case. + * Refer to arm_convolve_wrapper_s4_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_convolve_wrapper_s4_get_buffer_size(). Currently this operator does not have an + * mve implementation, so dsp will be used. + * + */ +int32_t arm_convolve_wrapper_s4_get_buffer_size_mve(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s4 for processors with DSP extension. + * Refer to arm_convolve_wrapper_s4_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_convolve_wrapper_s4_get_buffer_size(). + * + */ +int32_t arm_convolve_wrapper_s4_get_buffer_size_dsp(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + */ +arm_cmsis_nn_status arm_convolve_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s8 + * + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial + * filter dimensions + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s8 for Arm(R) Helium Architecture case. + * Refer to arm_convolve_wrapper_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_convolve_wrapper_s8_get_buffer_size(). + * + */ +int32_t arm_convolve_wrapper_s8_get_buffer_size_mve(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s8 for processors with DSP extension. + * Refer to arm_convolve_wrapper_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_convolve_wrapper_s8_get_buffer_size(). + * + */ +int32_t arm_convolve_wrapper_s8_get_buffer_size_dsp(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief s16 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Struct with optional bias data pointer. Bias data type can be int64 or int32 depending + * flag in struct. + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int16 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + */ +arm_cmsis_nn_status arm_convolve_wrapper_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const cmsis_nn_bias_data *bias_data, + const cmsis_nn_dims *output_dims, + int16_t *output_data); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s16. + * + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial + * filter dimensions + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_wrapper_s16_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s16 for for processors with DSP extension. + * Refer to arm_convolve_wrapper_s16_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_convolve_wrapper_s16_get_buffer_size(). + * + */ +int32_t arm_convolve_wrapper_s16_get_buffer_size_dsp(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s16 for Arm(R) Helium Architecture case. + * Refer to arm_convolve_wrapper_s16_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_convolve_wrapper_s16_get_buffer_size(). + * + */ +int32_t arm_convolve_wrapper_s16_get_buffer_size_mve(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Basic s4 convolution function + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_s4_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Packed Filter data pointer. Data type: int8 packed with 2x int4 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details. + * + */ +arm_cmsis_nn_status arm_convolve_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Basic s4 convolution function with a requirement of even number of kernels. + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_s4_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions. Note the product must be even. + * @param[in] filter_data Packed Filter data pointer. Data type: int8 packed with 2x int4 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS if successful or + * ARM_CMSIS_NN_ARG_ERROR if incorrect arguments or + * ARM_CMSIS_NN_NO_IMPL_ERROR if not for MVE + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details. + * + */ +arm_cmsis_nn_status arm_convolve_even_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Basic s8 convolution function + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_s8_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, CK] where HK, WK and CK are the + * spatial filter dimensions. CK != C_IN is used for grouped convolution, in which + * case the required conditions are C_IN = N * CK and C_OUT = N * M for N groups of + * size M. + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] upscale_dims Inserts zeroes to upscale the input in h/w dimensions if set to 2. This is used for + * tranposed convolution. + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS if successful or + * ARM_CMSIS_NN_ARG_ERROR if incorrect arguments or + * ARM_CMSIS_NN_NO_IMPL_ERROR + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details. + * + */ +arm_cmsis_nn_status arm_convolve_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *upscale_dims, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get the required buffer size for s4 convolution function + * + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK + * are the spatial filter dimensions + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_s4_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Get the required buffer size for s8 convolution function + * + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK + * are the spatial filter dimensions + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Wrapper to select optimal transposed convolution algorithm depending on parameters. + * @param[in, out] ctx Function context that contains the additional buffer if required by the + * function. + * arm_transpose_conv_s8_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer, if applicable, for security + reasons. + * @param[in, out] output_ctx Temporary scratch buffer. + * The size required size is: output width * output height * output channel * 4 + * The caller is expected to clear the buffer, if applicable, for security + * reasons. + * @param[in] transpose_conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of transpose_conv_params->input_offset : [-127, 128] + * Range of transpose_conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each out channel. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. Additional memory is required for optimization. Refer to arguments 'ctx' and 'output_ctx' for details. + * + */ +arm_cmsis_nn_status arm_transpose_conv_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_transpose_conv_params *transpose_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Basic s8 transpose convolution function + * @param[in, out] ctx Function context that contains the additional buffer if required by the + * function. + * arm_transpose_conv_s8_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer, if applicable, for security + reasons. + * @param[in, out] output_ctx Temporary scratch buffer. + * The size required size is: output width * output height * output channel * 4 + * The caller is expected to clear the buffer, if applicable, for security + * reasons. + * @param[in] transpose_conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of transpose_conv_params->input_offset : [-127, 128] + * Range of transpose_conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each out channel. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. Additional memory is required for optimization. Refer to arguments 'ctx' and 'output_ctx' for details. + * + */ +arm_cmsis_nn_status arm_transpose_conv_s8(const cmsis_nn_context *ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_transpose_conv_params *transpose_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get the required buffer size for ctx in s8 transpose conv function + * + * @param[in] transposed_conv_params Transposed convolution parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK + * are the spatial filter dimensions + * @param[in] out_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_transpose_conv_s8_get_buffer_size(const cmsis_nn_transpose_conv_params *transposed_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *out_dims); + +/** + * @brief Get the required buffer size for output_ctx in s8 transpose conv function + * + * @param[in] transposed_conv_params Transposed convolution parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK + * are the spatial filter dimensions + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_transpose_conv_s8_get_reverse_conv_buffer_size(const cmsis_nn_transpose_conv_params *transposed_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims); + +/** + * @brief Get size of additional buffer required by arm_transpose_conv_s8() for processors with DSP extension. + * Refer to arm_transpose_conv_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_transpose_conv_s8_get_buffer_size(). + * + */ +int32_t arm_transpose_conv_s8_get_buffer_size_dsp(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *out_dims); + +/** + * @brief Get size of additional buffer required by arm_transpose_conv_s8() for Arm(R) Helium Architecture case. + * Refer to arm_transpose_conv_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_transpose_conv_s8_get_buffer_size(). + * + */ +int32_t arm_transpose_conv_s8_get_buffer_size_mve(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *out_dims); + +/** + * @brief Basic s16 convolution function + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_s16_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Struct with optional bias data pointer. Bias data type can be int64 or int32 depending + * flag in struct. + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int16 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS if successful or + * ARM_CMSIS_NN_ARG_ERROR if incorrect arguments or + * ARM_CMSIS_NN_NO_IMPL_ERROR + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. Additional memory is required for optimization. Refer to argument 'ctx' for details. + * + */ +arm_cmsis_nn_status arm_convolve_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const cmsis_nn_bias_data *bias_data, + const cmsis_nn_dims *output_dims, + int16_t *output_data); + +/** + * @brief Get the required buffer size for s16 convolution function + * + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK + * are the spatial filter dimensions + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Fast s4 version for 1x1 convolution (non-square shape) + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_1x1_s4_fast_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN] + * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# conv_params->padding.w = conv_params->padding.h = 0 + * -# conv_params->stride.w = conv_params->stride.h = 1 + * + */ +arm_cmsis_nn_status arm_convolve_1x1_s4_fast(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief s4 version for 1x1 convolution with support for non-unity stride values + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * None is required by this function. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN] + * @param[in] filter_data Filter data pointer. Data type: int8 packed with 2x int4 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# conv_params->padding.w = conv_params->padding.h = 0 + * + */ +arm_cmsis_nn_status arm_convolve_1x1_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Fast s8 version for 1x1 convolution (non-square shape) + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# conv_params->padding.w = conv_params->padding.h = 0 + * -# conv_params->stride.w = conv_params->stride.h = 1 + * + */ +arm_cmsis_nn_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get the required buffer size for arm_convolve_1x1_s4_fast + * + * @param[in] input_dims Input (activation) dimensions + * @return The function returns the required buffer size in bytes + * + */ +int32_t arm_convolve_1x1_s4_fast_get_buffer_size(const cmsis_nn_dims *input_dims); + +/** + * @brief Get the required buffer size for arm_convolve_1x1_s8_fast + * + * @param[in] input_dims Input (activation) dimensions + * @return The function returns the required buffer size in bytes + * + */ +int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims *input_dims); + +/** + * @brief s8 version for 1x1 convolution with support for non-unity stride values + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * None is required by this function. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# conv_params->padding.w = conv_params->padding.h = 0 + * + */ +arm_cmsis_nn_status arm_convolve_1x1_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief 1xn convolution + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal + * spatial filter dimension + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# input_dims->n equals 1 + * -# ouput_dims->w is a multiple of 4 + * -# Explicit constraints(since it is for 1xN convolution) + * -## input_dims->h equals 1 + * -## output_dims->h equals 1 + * -## filter_dims->h equals 1 + *@todo Remove constraint on output_dims->w to make the function generic. + * + */ +arm_cmsis_nn_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief 1xn convolution for s4 weights + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_1_x_n_s4_get_buffer_size will return the buffer_size if required + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal + * spatial filter dimension + * @param[in] filter_data Filter data pointer. Data type: int8 as packed int4 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# stride.w * input_dims->c is a multiple of 4 + * -# Explicit constraints(since it is for 1xN convolution) + * -## input_dims->h equals 1 + * -## output_dims->h equals 1 + * -## filter_dims->h equals 1 + *@todo Remove constraint on output_dims->w to make the function generic. + * + */ +arm_cmsis_nn_status arm_convolve_1_x_n_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get the required additional buffer size for 1xn convolution + * + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the + * horizontal spatial filter dimension + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get the required additional buffer size for 1xn convolution + * + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the + * horizontal spatial filter dimension + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_1_x_n_s4_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Wrapper function to pick the right optimized s8 depthwise convolution function + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * dw_conv_params->dilation is not used. + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful completion. + * + * @details + * - Supported framework: TensorFlow Lite + * - Picks one of the the following functions + * -# arm_depthwise_conv_s8() + * -# arm_depthwise_conv_3x3_s8() - Cortex-M CPUs with DSP extension only + * -# arm_depthwise_conv_s8_opt() + * - Check details of arm_depthwise_conv_s8_opt() for potential data that can be accessed outside of the + * boundary. + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Wrapper function to pick the right optimized s4 depthwise convolution function + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * dw_conv_params->dilation is not used. + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential + * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43]. + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful completion. + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s4(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8() + * + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->input_offset : [-128, 127] + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @return Size of additional memory required for optimizations in bytes. + * + */ +int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8() for processors with DSP extension. + * Refer to arm_depthwise_conv_wrapper_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_depthwise_conv_wrapper_s8_get_buffer_size(). + * + */ +int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size_dsp(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8() for Arm(R) Helium Architecture case. + * Refer to arm_depthwise_conv_wrapper_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_depthwise_conv_wrapper_s8_get_buffer_size(). + * + */ +int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size_mve(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s4() + * + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->input_offset : [-128, 127] + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @return Size of additional memory required for optimizations in bytes. + * + */ +int32_t arm_depthwise_conv_wrapper_s4_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s4() for processors with DSP extension. + * Refer to arm_depthwise_conv_wrapper_s4_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_depthwise_conv_wrapper_s4_get_buffer_size(). + * + */ +int32_t arm_depthwise_conv_wrapper_s4_get_buffer_size_dsp(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s4() for Arm(R) Helium Architecture case. + * Refer to arm_depthwise_conv_wrapper_s4_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_depthwise_conv_wrapper_s4_get_buffer_size(). + * + */ +int32_t arm_depthwise_conv_wrapper_s4_get_buffer_size_mve(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required exists if additional memory is. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * dw_conv_params->dilation is not used. + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->input_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Batch argument N is not used. + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Basic s4 depthwise convolution function that doesn't have any constraints on the input dimensions. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required exists if additional memory is. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * dw_conv_params->dilation is not used. + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->input_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Batch argument N is not used. + * @param[in] input Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] kernel Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential + * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43]. + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[in, out] output Output data pointer. Data type: int8 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_depthwise_conv_s4(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output); + +/** + * @brief Basic s16 depthwise convolution function that doesn't have any constraints on the input dimensions. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * exists if additional memory is. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Batch argument N is not used. + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int16 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_depthwise_conv_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + int16_t *output_data); + +/** + * @brief Wrapper function to pick the right optimized s16 depthwise convolution function + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * dw_conv_params->dilation is not used. + * Range of dw_conv_params->input_offset : Not used + * Range of dw_conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int16 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful completion. + * + * @details + * - Supported framework: TensorFlow Lite + * - Picks one of the the following functions + * -# arm_depthwise_conv_s16() + * -# arm_depthwise_conv_fast_s16() - Cortex-M CPUs with DSP extension only + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + int16_t *output_data); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s16() + * + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * Range of dw_conv_params->input_offset : Not used + * Range of dw_conv_params->input_offset : Not used + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @return Size of additional memory required for optimizations in bytes. + * + */ +int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s16() for processors with DSP extension. + * Refer to arm_depthwise_conv_wrapper_s16_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_depthwise_conv_wrapper_s16_get_buffer_size(). + * + */ +int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size_dsp(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s16() for Arm(R) Helium Architecture + * case. Refer to arm_depthwise_conv_wrapper_s16_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_depthwise_conv_wrapper_s16_get_buffer_size(). + * + */ +int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size_mve(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Optimized s16 depthwise convolution function with constraint that in_channel equals out_channel. + * Refer arm_depthwise_conv_s16() for function argument details. + * + * @return The function returns one of the following + * ARM_CMSIS_NN_ARG_ERROR - ctx-buff == NULL and + * arm_depthwise_conv_fast_s16_get_buffer_size() > 0 or + * input channel != output channel or + * ch_mult != 1 + * + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @details + * - Supported framework: TensorFlow Lite + * - The following constrains on the arguments apply + * -# Number of input channel equals number of output channels or ch_mult equals 1 + * - Reccomended when number of channels is 4 or greater. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_fast_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + int16_t *output_data); + +/** + * @brief Get the required buffer size for optimized s16 depthwise convolution + * function with constraint that in_channel equals out_channel. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] + * Batch argument N is not used. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_depthwise_conv_fast_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Optimized s8 depthwise convolution function for 3x3 kernel size with some constraints on + * the input arguments(documented below). Refer arm_depthwise_conv_s8() for function + * argument details. + * + * @return The function returns one of the following + * ARM_CMSIS_NN_ARG_ERROR - Unsupported dimension of tensors + * - Unsupported pad size along the x axis + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# Number of input channel equals number of output channels + * -# Filter height and width equals 3 + * -# Padding along x is either 0 or 1. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel. + * Refer arm_depthwise_conv_s8() for function argument details. + * + * @return The function returns one of the following + * ARM_CMSIS_NN_ARG_ERROR - input channel != output channel or + * ch_mult != 1 + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out + * for the following if MVE optimizations(Arm Helium Technology) are used. + * - Output shift + * - Output multiplier + * - Output bias + * - kernel + * @details + * - Supported framework: TensorFlow Lite + * - The following constrains on the arguments apply + * -# Number of input channel equals number of output channels or ch_mult equals 1 + * - Reccomended when number of channels is 4 or greater. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Optimized s4 depthwise convolution function with constraint that in_channel equals out_channel. + * Refer arm_depthwise_conv_s4() for function argument details. + * + * @return The function returns one of the following + * ARM_CMSIS_NN_ARG_ERROR - input channel != output channel or + * ch_mult != 1 + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out + * for the following if MVE optimizations(Arm Helium Technology) are used. + * - Output shift + * - Output multiplier + * - Output bias + * - kernel + * @details + * - Supported framework: TensorFlow Lite + * - The following constrains on the arguments apply + * -# Number of input channel equals number of output channels or ch_mult equals 1 + * - Reccomended when number of channels is 4 or greater. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_s4_opt(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get the required buffer size for optimized s8 depthwise convolution + * function with constraint that in_channel equals out_channel. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] + * Batch argument N is not used. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Get the required buffer size for optimized s4 depthwise convolution + * function with constraint that in_channel equals out_channel. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] + * Batch argument N is not used. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_depthwise_conv_s4_opt_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @defgroup FC Fully-connected Layer Functions + * + * Collection of fully-connected and matrix multiplication functions. + * + * Fully-connected layer is basically a matrix-vector multiplication + * with bias. The matrix is the weights and the input/output vectors + * are the activation values. Supported {weight, activation} precisions + * include {8-bit, 8-bit} and {8-bit, 16-bit} + * + * + */ + +/** + * @brief Basic s4 Fully Connected function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] fc_params Fully Connected layer parameters. + * Range of fc_params->input_offset : [-127, 128] + * fc_params->filter_offset : 0 + * Range of fc_params->output_offset : [-128, 127] + * @param[in] quant_params Per-tensor quantization info. + * It contains the multiplier and shift value to be applied to the output tensor. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Input dimension is taken as Nx(H * W * C_IN) + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] + * N : accumulation depth and equals (H * W * C_IN) from input_dims + * C : output depth and equals C_OUT in output_dims + * H & W : Not used + * @param[in] filter_data Filter data pointer. Data type: int8_t packed 4-bit weights, e.g four sequential + * weights [0x1, 0x2, 0x3, 0x4] packed as [0x21, 0x43]. + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * N, H, W : Not used + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] + * N : Batches + * C_OUT : Output depth + * H & W : Not used. + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_fully_connected_s4(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Basic s8 Fully Connected function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] fc_params Fully Connected layer parameters. + * Range of fc_params->input_offset : [-127, 128] + * fc_params->filter_offset : 0 + * Range of fc_params->output_offset : [-128, 127] + * @param[in] quant_params Per-tensor quantization info. + * It contains the multiplier and shift value to be applied to the output tensor. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Input dimension is taken as Nx(H * W * C_IN) + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] + * N : accumulation depth and equals (H * W * C_IN) from input_dims + * C : output depth and equals C_OUT in output_dims + * H & W : Not used + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * N, H, W : Not used + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] + * N : Batches + * C_OUT : Output depth + * H & W : Not used. + * @param[in, out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_fully_connected_s8(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Basic s8 Fully Connected function using per channel quantization. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] fc_params Fully Connected layer parameters. + * Range of fc_params->input_offset : [-127, 128] + * fc_params->filter_offset : 0 + * Range of fc_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Input dimension is taken as Nx(H * W * C_IN) + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] + * N : accumulation depth and equals (H * W * C_IN) from input_dims + * C : output depth and equals C_OUT in output_dims + * H & W : Not used + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * N, H, W : Not used + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] + * N : Batches + * C_OUT : Output depth + * H & W : Not used. + * @param[in, out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_fully_connected_per_channel_s8(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief s8 Fully Connected layer wrapper function + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] fc_params Fully Connected layer parameters. + * Range of fc_params->input_offset : [-127, 128] + * fc_params->filter_offset : 0 + * Range of fc_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel or per-tensor quantization info. Check struct defintion for details. + * It contains the multiplier and shift value(s) to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Input dimension is taken as Nx(H * W * C_IN) + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] + * N : accumulation depth and equals (H * W * C_IN) from input_dims + * C : output depth and equals C_OUT in output_dims + * H & W : Not used + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * N, H, W : Not used + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] + * N : Batches + * C_OUT : Output depth + * H & W : Not used. + * @param[in, out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_fully_connected_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Calculate the sum of each row in vector_data, multiply by lhs_offset and optionally add s32 bias_data. + * @param[in, out] vector_sum_buf Buffer for vector sums + * @param[in] vector_cols Number of vector columns + * @param[in] vector_rows Number of vector rows + * @param[in] vector_data Vector of weigths data + * @param[in] lhs_offset Constant multiplied with each sum + * @param[in] rhs_offset Constant added to each vector element before sum + * @param[in] bias_data Vector of bias data, added to each sum. + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful operation + */ +arm_cmsis_nn_status arm_vector_sum_s8(int32_t *vector_sum_buf, + const int32_t vector_cols, + const int32_t vector_rows, + const int8_t *vector_data, + const int32_t lhs_offset, + const int32_t rhs_offset, + const int32_t *bias_data); + +/** + * @brief Calculate the sum of each row in vector_data, multiply by lhs_offset and optionally add s64 bias_data. + * @param[in, out] vector_sum_buf Buffer for vector sums + * @param[in] vector_cols Number of vector columns + * @param[in] vector_rows Number of vector rows + * @param[in] vector_data Vector of weigths data + * @param[in] lhs_offset Constant multiplied with each sum + * @param[in] bias_data Vector of bias data, added to each sum. + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful operation + */ +arm_cmsis_nn_status arm_vector_sum_s8_s64(int64_t *vector_sum_buf, + const int32_t vector_cols, + const int32_t vector_rows, + const int8_t *vector_data, + const int32_t lhs_offset, + const int64_t *bias_data); + +/** + * @brief Get size of additional buffer required by arm_fully_connected_s8(). + * See also arm_vector_sum_s8, which is required if buffer size is > 0. + * @param[in] filter_dims dimension of filter + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims); + +/** + * @brief Get size of additional buffer required by arm_fully_connected_s8() for processors with DSP extension. + * Refer to arm_fully_connected_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_fully_connected_s8_get_buffer_size(). + * + */ +int32_t arm_fully_connected_s8_get_buffer_size_dsp(const cmsis_nn_dims *filter_dims); + +/** + * @brief Get size of additional buffer required by arm_fully_connected_s8() for Arm(R) Helium Architecture case. + * Refer to arm_fully_connected_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_fully_connected_s8_get_buffer_size(). + * + */ +int32_t arm_fully_connected_s8_get_buffer_size_mve(const cmsis_nn_dims *filter_dims); + +/** + * @brief Basic s16 Fully Connected function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] fc_params Fully Connected layer parameters. + * fc_params->input_offset : 0 + * fc_params->filter_offset : 0 + * fc_params->output_offset : 0 + * @param[in] quant_params Per-tensor quantization info. + * It contains the multiplier and shift value to be applied to the output tensor. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Input dimension is taken as Nx(H * W * C_IN) + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] + * N : accumulation depth and equals (H * W * C_IN) from input_dims + * C : output depth and equals C_OUT in output_dims + * H & W : Not used + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * N, H, W : Not used + * @param[in] bias_data Bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] + * N : Batches + * C_OUT : Output depth + * H & W : Not used. + * @param[in, out] output_data Output data pointer. Data type: int16 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + */ +arm_cmsis_nn_status arm_fully_connected_s16(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + int16_t *output_data); + +/** + * @brief Get size of additional buffer required by arm_fully_connected_s16(). + * @param[in] filter_dims dimension of filter + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_fully_connected_s16_get_buffer_size(const cmsis_nn_dims *filter_dims); + +/** + * @brief Get size of additional buffer required by arm_fully_connected_s16() for processors with DSP extension. + * Refer to arm_fully_connected_s16_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_fully_connected_s16_get_buffer_size(). + * + */ +int32_t arm_fully_connected_s16_get_buffer_size_dsp(const cmsis_nn_dims *filter_dims); + +/** + * @brief Get size of additional buffer required by arm_fully_connected_s16() for Arm(R) Helium Architecture case. + * Refer to arm_fully_connected_s16_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_fully_connected_s16_get_buffer_size(). + * + */ +int32_t arm_fully_connected_s16_get_buffer_size_mve(const cmsis_nn_dims *filter_dims); + +/** + * @defgroup groupElementwise Elementwise Functions + * + * Elementwise add and multiplication functions. + * + */ + +/** + * @brief s8 elementwise add of two vectors + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Range: -127 to 128 + * @param[in] input_1_mult multiplier for input 1 + * @param[in] input_1_shift shift for input 1 + * @param[in] input_2_offset offset for input 2. Range: -127 to 128 + * @param[in] input_2_mult multiplier for input 2 + * @param[in] input_2_shift shift for input 2 + * @param[in] left_shift input left shift + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Range: -128 to 127 + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -128 + * @param[in] out_activation_max maximum value to clamp output to. Max: 127 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + */ +arm_cmsis_nn_status arm_elementwise_add_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @brief s16 elementwise add of two vectors + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Not used. + * @param[in] input_1_mult multiplier for input 1 + * @param[in] input_1_shift shift for input 1 + * @param[in] input_2_offset offset for input 2. Not used. + * @param[in] input_2_mult multiplier for input 2 + * @param[in] input_2_shift shift for input 2 + * @param[in] left_shift input left shift + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Not used. + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -32768 + * @param[in] out_activation_max maximum value to clamp output to. Max: 32767 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + */ +arm_cmsis_nn_status arm_elementwise_add_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @brief s8 elementwise multiplication + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Range: -127 to 128 + * @param[in] input_2_offset offset for input 2. Range: -127 to 128 + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Range: -128 to 127 + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -128 + * @param[in] out_activation_max maximum value to clamp output to. Max: 127 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_elementwise_mul_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @brief s16 elementwise multiplication + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Not used. + * @param[in] input_2_offset offset for input 2. Not used. + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Not used. + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -32768 + * @param[in] out_activation_max maximum value to clamp output to. Max: 32767 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_elementwise_mul_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @defgroup Acti Activation Functions + * + * Perform activation layers, including ReLU (Rectified Linear Unit), + * sigmoid and tanh + * + */ + +/** + * @brief Q7 RELU function + * @param[in,out] data pointer to input + * @param[in] size number of elements + */ +void arm_relu_q7(int8_t *data, uint16_t size); + +/** + * @brief s8 ReLU6 function + * @param[in,out] data pointer to input + * @param[in] size number of elements + */ +void arm_relu6_s8(int8_t *data, uint16_t size); + +/** + * @brief Q15 RELU function + * @param[in,out] data pointer to input + * @param[in] size number of elements + */ +void arm_relu_q15(int16_t *data, uint16_t size); + +/** + * @brief s16 neural network activation function using direct table look-up + * @param[in] input pointer to input data + * @param[out] output pointer to output + * @param[in] size number of elements + * @param[in] left_shift bit-width of the integer part, assumed to be smaller than 3. + * @param[in] type type of activation functions + * @return The function returns ARM_CMSIS_NN_SUCCESS + + * + * @details Supported framework: TensorFlow Lite for Microcontrollers. + * This activation function must be bit precise congruent with the corresponding TFLM tanh and sigmoid activation + * functions + */ +arm_cmsis_nn_status arm_nn_activation_s16(const int16_t *input, + int16_t *output, + const int32_t size, + const int32_t left_shift, + const arm_nn_activation_type type); + +/** + * @defgroup Pooling Pooling Functions + * + * Perform max and average pooling operations + * + */ + +/** + * @brief s8 average pooling function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] pool_params Pooling parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] + * Argument N and C are not used. + * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] + * Argument N is not used. + * C_OUT equals C_IN. + * @param[in, out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ +arm_cmsis_nn_status arm_avgpool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get the required buffer size for S8 average pooling function + * @param[in] dim_dst_width output tensor dimension + * @param[in] ch_src number of input tensor channels + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_avgpool_s8_get_buffer_size(const int dim_dst_width, const int ch_src); + +/** + * @brief Get the required buffer size for S8 average pooling function for processors with DSP extension. + * Refer to arm_avgpool_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_avgpool_s8_get_buffer_size(). + * + */ +int32_t arm_avgpool_s8_get_buffer_size_dsp(const int dim_dst_width, const int ch_src); + +/** + * @brief Get the required buffer size for S8 average pooling function for Arm(R) Helium Architecture case. + * Refer to arm_avgpool_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_avgpool_s8_get_buffer_size(). + * + */ +int32_t arm_avgpool_s8_get_buffer_size_mve(const int dim_dst_width, const int ch_src); + +/** + * @brief s16 average pooling function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] pool_params Pooling parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] + * Argument N and C are not used. + * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] + * Argument N is not used. + * C_OUT equals C_IN. + * @param[in, out] output_data Output data pointer. Data type: int16 + * + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful operation + * ARM_CMSIS_NN_ARG_ERROR - In case of invalid arguments + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ +arm_cmsis_nn_status arm_avgpool_s16(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int16_t *output_data); + +/** + * @brief Get the required buffer size for S16 average pooling function + * @param[in] dim_dst_width output tensor dimension + * @param[in] ch_src number of input tensor channels + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_avgpool_s16_get_buffer_size(const int dim_dst_width, const int ch_src); + +/** + * @brief Get the required buffer size for S16 average pooling function for processors with DSP extension. + * Refer to arm_avgpool_s16_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_avgpool_s16_get_buffer_size(). + * + */ +int32_t arm_avgpool_s16_get_buffer_size_dsp(const int dim_dst_width, const int ch_src); + +/** + * @brief Get the required buffer size for S16 average pooling function for Arm(R) Helium Architecture case. + * Refer to arm_avgpool_s16_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_avgpool_s16_get_buffer_size(). + * + */ +int32_t arm_avgpool_s16_get_buffer_size_mve(const int dim_dst_width, const int ch_src); + +/** + * @brief s8 max pooling function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] pool_params Pooling parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. The input tensor must not + * overlap with the output tensor. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] + * Argument N and C are not used. + * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] + * Argument N is not used. + * C_OUT equals C_IN. + * @param[in, out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ +arm_cmsis_nn_status arm_max_pool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief s16 max pooling function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] pool_params Pooling parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * @param[in] src Input (activation) data pointer. The input tensor must not + * overlap with the output tensor. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] + * Argument N and C are not used. + * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] + * Argument N is not used. + * C_OUT equals C_IN. + * @param[in, out] dst Output data pointer. Data type: int16 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ +arm_cmsis_nn_status arm_max_pool_s16(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int16_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int16_t *dst); + +/** + * @defgroup Softmax Softmax Functions + * + * + */ + +/** + * @brief S8 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ +void arm_softmax_s8(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int8_t *output); + +/** + * @brief S8 to s16 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ +void arm_softmax_s8_s16(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int16_t *output); + +/** + * @brief S16 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] softmax_params Softmax s16 layer parameters with two pointers to LUTs speficied below. + * For indexing the high 9 bits are used and 7 remaining for interpolation. + * That means 512 entries for the 9-bit indexing and 1 extra for interpolation, i.e. 513 + * values for each LUT. + * - Lookup table for exp(x), where x uniform distributed between [-10.0 , 0.0] + * - Lookup table for 1 / (1 + x), where x uniform distributed between [0.0 , 1.0] + * @param[out] output Pointer to the output tensor + * @return The function returns + * ARM_CMSIS_NN_ARG_ERROR Argument error check failed + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ +arm_cmsis_nn_status arm_softmax_s16(const int16_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const cmsis_nn_softmax_lut_s16 *softmax_params, + int16_t *output); + +/** + * @brief U8 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ + +void arm_softmax_u8(const uint8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + uint8_t *output); + +/** + * @defgroup Reshape Reshape Functions + * + */ + +/** + * @brief Reshape a s8 vector into another with different shape + * @param[in] input points to the s8 input vector + * @param[out] output points to the s8 output vector + * @param[in] total_size total size of the input and output vectors in bytes + * + * @note The output is expected to be in a memory area that does not overlap with the input's + * + */ +void arm_reshape_s8(const int8_t *input, int8_t *output, const uint32_t total_size); + +/** + * @defgroup Transpose Transpose Functions + * + */ + +/** + * @brief Basic transpose function + * + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[out] output_data Output data pointer. Data type: int8 + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] output_dims Output tensor dimensions. Format may be arbitrary relative to input format. + * The output dimension will depend on the permutation dimensions. + * In other words the out dimensions are the result of applying the permutation + * to the input dimensions. + * @param[in] transpose_params Transpose parameters. Contains permutation dimensions. + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + */ +arm_cmsis_nn_status arm_transpose_s8(const int8_t *input_data, + int8_t *const output_data, + const cmsis_nn_dims *const input_dims, + const cmsis_nn_dims *const output_dims, + const cmsis_nn_transpose_params *const transpose_params); + +/** + * @defgroup Concatenation Concatenation Functions + * + */ + +/** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis + * This function should be called for each input tensor to concatenate. The argument offset_x + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_x = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x) + * offset_x += input_x[i] + * } + * + * This function assumes that the output tensor has: + * -# The same height of the input tensor + * -# The same number of channels of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it + * does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor. Input tensor must not overlap with the output tensor. + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor. Expected to be at least + * (input_x * input_y * input_z * input_w) + offset_x + * bytes. + * @param[in] output_x Width of output tensor + * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_x is less than output_x + * + */ +void arm_concatenation_s8_x(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_x, + const uint32_t offset_x); + +/** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis + * This function should be called for each input tensor to concatenate. The argument offset_y + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_y = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y) + * offset_y += input_y[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same number of channels of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it + * does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor. Input tensor must not overlap with the output tensor. + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor. Expected to be at least + * (input_z * input_w * input_x * input_y) + offset_y + * bytes. + * @param[in] output_y Height of output tensor + * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_y is less than output_y + * + */ +void arm_concatenation_s8_y(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_y, + const uint32_t offset_y); + +/** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis + * This function should be called for each input tensor to concatenate. The argument offset_z + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_z = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z) + * offset_z += input_z[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same height of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it + * does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor. Input tensor must not overlap with output tensor. + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor. Expected to be at least + * (input_x * input_y * input_z * input_w) + offset_z + * bytes. + * @param[in] output_z Channels in output tensor + * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_z is less than output_z + * + */ +void arm_concatenation_s8_z(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_z, + const uint32_t offset_z); + +/** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size) + * This function should be called for each input tensor to concatenate. The argument offset_w + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_w = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w) + * offset_w += input_w[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same height of the input tensor + * -# The same number o channels of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it + * does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor. Expected to be at least + * input_x * input_y * input_z * input_w + * bytes. + * @param[in] offset_w The offset on the W axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + */ +void arm_concatenation_s8_w(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint32_t offset_w); +/** + * @defgroup SVDF SVDF Functions + * + */ + +/** + * @brief s8 SVDF function with 8 bit state tensor and 8 bit time weights + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function arm_fully_connected_s8_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] input_ctx Temporary scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] output_ctx Temporary output scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] svdf_params SVDF Parameters + * Range of svdf_params->input_offset : [-128, 127] + * Range of svdf_params->output_offset : [-128, 127] + * @param[in] input_quant_params Input quantization parameters + * @param[in] output_quant_params Output quantization parameters + * @param[in] input_dims Input tensor dimensions + * @param[in] input_data Pointer to input tensor + * @param[in] state_dims State tensor dimensions + * @param[in] state_data Pointer to state tensor + * @param[in] weights_feature_dims Weights (feature) tensor dimensions + * @param[in] weights_feature_data Pointer to the weights (feature) tensor + * @param[in] weights_time_dims Weights (time) tensor dimensions + * @param[in] weights_time_data Pointer to the weights (time) tensor + * @param[in] bias_dims Bias tensor dimensions + * @param[in] bias_data Pointer to bias tensor + * @param[in] output_dims Output tensor dimensions + * @param[out] output_data Pointer to the output tensor + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * 1. Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_svdf_s8(const cmsis_nn_context *ctx, + const cmsis_nn_context *input_ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_svdf_params *svdf_params, + const cmsis_nn_per_tensor_quant_params *input_quant_params, + const cmsis_nn_per_tensor_quant_params *output_quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *state_dims, + int8_t *state_data, + const cmsis_nn_dims *weights_feature_dims, + const int8_t *weights_feature_data, + const cmsis_nn_dims *weights_time_dims, + const int8_t *weights_time_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief s8 SVDF function with 16 bit state tensor and 16 bit time weights + * + * @param[in] input_ctx Temporary scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] output_ctx Temporary output scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] svdf_params SVDF Parameters + * Range of svdf_params->input_offset : [-128, 127] + * Range of svdf_params->output_offset : [-128, 127] + * @param[in] input_quant_params Input quantization parameters + * @param[in] output_quant_params Output quantization parameters + * @param[in] input_dims Input tensor dimensions + * @param[in] input_data Pointer to input tensor + * @param[in] state_dims State tensor dimensions + * @param[in] state_data Pointer to state tensor + * @param[in] weights_feature_dims Weights (feature) tensor dimensions + * @param[in] weights_feature_data Pointer to the weights (feature) tensor + * @param[in] weights_time_dims Weights (time) tensor dimensions + * @param[in] weights_time_data Pointer to the weights (time) tensor + * @param[in] bias_dims Bias tensor dimensions + * @param[in] bias_data Pointer to bias tensor + * @param[in] output_dims Output tensor dimensions + * @param[out] output_data Pointer to the output tensor + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_svdf_state_s16_s8(const cmsis_nn_context *input_ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_svdf_params *svdf_params, + const cmsis_nn_per_tensor_quant_params *input_quant_params, + const cmsis_nn_per_tensor_quant_params *output_quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *state_dims, + int16_t *state_data, + const cmsis_nn_dims *weights_feature_dims, + const int8_t *weights_feature_data, + const cmsis_nn_dims *weights_time_dims, + const int16_t *weights_time_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data); + +/** + * @brief Get size of additional buffer required by arm_svdf_s8(). + * @param[in] filter_dims dimension of filter + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_svdf_s8_get_buffer_size(const cmsis_nn_dims *filter_dims); + +/** + * @brief Get size of additional buffer required by arm_svdf_s8() for processors with DSP extension. + * Refer to arm_svdf_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_svdf_s8_get_buffer_size(). + * + */ +int32_t arm_svdf_s8_get_buffer_size_dsp(const cmsis_nn_dims *filter_dims); + +/** + * @brief Get size of additional buffer required by arm_svdf_s8() for Arm(R) Helium Architecture case. + * Refer to arm_svdf_s8_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_svdf_s8_get_buffer_size(). + * + */ +int32_t arm_svdf_s8_get_buffer_size_mve(const cmsis_nn_dims *filter_dims); + +/** + * @defgroup LSTM LSTM Layer Functions + * + */ + +/** + * @brief LSTM unidirectional function with 8 bit input and output and 16 bit gate output, 32 bit bias. + * + * @param[in] input Pointer to input data + * @param[out] output Pointer to output data + * @param[in] params Struct containing all information about the lstm operator, see arm_nn_types. + * @param[in] buffers Struct containing pointers to all temporary scratch buffers needed for the + * lstm operator, see arm_nn_types. + * + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite Micro + * + */ +arm_cmsis_nn_status arm_lstm_unidirectional_s8(const int8_t *input, + int8_t *output, + const cmsis_nn_lstm_params *params, + cmsis_nn_lstm_context *buffers); + +/** + * @brief LSTM unidirectional function with 16 bit input and output and 16 bit gate output, 64 bit bias. + * + * @param[in] input Pointer to input data + * @param[out] output Pointer to output data + * @param[in] params Struct containing all information about the lstm operator, see arm_nn_types. + * @param[in] buffers Struct containing pointers to all temporary scratch buffers needed for the + * lstm operator, see arm_nn_types. + * + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite Micro + * + */ +arm_cmsis_nn_status arm_lstm_unidirectional_s16(const int16_t *input, + int16_t *output, + const cmsis_nn_lstm_params *params, + cmsis_nn_lstm_context *buffers); + +/** + * @brief Batch matmul function with 8 bit input and output. + * + * @param[in] ctx Temporary scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * Optional function arm_fully_connected_s8_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * @param[in] bmm_params Batch matmul Parameters + * Adjoint flags are currently unused. + * @param[in] quant_params Quantization parameters + * @param[in] input_lhs_dims Input lhs tensor dimensions. + * This should be NHWC where lhs C = rhs C + * @param[in] input_lhs Pointer to input tensor + * @param[in] input_rhs_dims Input lhs tensor dimensions. + * This is expected to be transposed so + * should be NHWC where lhs C = rhs C + * @param[in] input_rhs Pointer to transposed input tensor + * @param[in] output_dims Output tensor dimensions + * @param[out] output Pointer to the output tensor + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite Micro + * 2. Performs row * row matrix multiplication with the RHS transposed. + * + */ +arm_cmsis_nn_status arm_batch_matmul_s8(const cmsis_nn_context *ctx, + const cmsis_nn_bmm_params *bmm_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_lhs_dims, + const int8_t *input_lhs, + const cmsis_nn_dims *input_rhs_dims, + const int8_t *input_rhs, + const cmsis_nn_dims *output_dims, + int8_t *output); + +/** + * @brief Batch matmul function with 16 bit input and output. + * + * @param[in] ctx Temporary scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * Optional function arm_fully_connected_s8_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * @param[in] bmm_params Batch matmul Parameters + * Adjoint flags are currently unused. + * @param[in] quant_params Quantization parameters + * @param[in] input_lhs_dims Input lhs tensor dimensions. + * This should be NHWC where LHS.C = RHS.C + * @param[in] input_lhs Pointer to input tensor + * @param[in] input_rhs_dims Input lhs tensor dimensions. + * This is expected to be transposed so + * should be NHWC where LHS.C = RHS.C + * @param[in] input_rhs Pointer to transposed input tensor + * @param[in] output_dims Output tensor dimensions + * @param[out] output Pointer to the output tensor + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite Micro + * 2. Performs row * row matrix multiplication with the RHS transposed. + * + */ +arm_cmsis_nn_status arm_batch_matmul_s16(const cmsis_nn_context *ctx, + const cmsis_nn_bmm_params *bmm_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_lhs_dims, + const int16_t *input_lhs, + const cmsis_nn_dims *input_rhs_dims, + const int16_t *input_rhs, + const cmsis_nn_dims *output_dims, + int16_t *output); + +/** + * @defgroup Pad Pad Layer Functions: + * + */ + +/** + * @brief Expands the size of the input by adding constant values before and after the data, in all dimensions. + * + * @param[in] input Pointer to input data + * @param[out] output Pointer to output data + * @param[in] pad_value Value to pad with + * @param[in] input_size Input tensor dimensions + * @param[in] pre_pad Padding to apply before data in each dimension + * @param[in] post_pad Padding to apply after data in each dimension + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_pad_s8(const int8_t *input, + int8_t *output, + const int8_t pad_value, + const cmsis_nn_dims *input_size, + const cmsis_nn_dims *pre_pad, + const cmsis_nn_dims *post_pad); + +/** + * @brief Elementwise binary minimum with 8bit data. + * + * @param[in] ctx Temporary scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] input_1_data Pointer to input1 tensor + * @param[in] input_1_dims Input1 tensor dimensions + * @param[in] input_2_data Pointer to input2 tensor + * @param[in] input_2_dims Input2 tensor dimensions + * @param[out] output_data Pointer to the output tensor + * @param[in] output_dims Output tensor dimensions + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite Micro + * + */ +arm_cmsis_nn_status arm_minimum_s8(const cmsis_nn_context *ctx, + const int8_t *input_1_data, + const cmsis_nn_dims *input_1_dims, + const int8_t *input_2_data, + const cmsis_nn_dims *input_2_dims, + int8_t *output_data, + const cmsis_nn_dims *output_dims); + +/** + * @brief Elementwise binary maximum with 8bit data. + * + * @param[in] ctx Temporary scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] input_1_data Pointer to input1 tensor + * @param[in] input_1_dims Input1 tensor dimensions + * @param[in] input_2_data Pointer to input2 tensor + * @param[in] input_2_dims Input2 tensor dimensions + * @param[out] output_data Pointer to the output tensor + * @param[in] output_dims Output tensor dimensions + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite Micro + * + */ +arm_cmsis_nn_status arm_maximum_s8(const cmsis_nn_context *ctx, + const int8_t *input_1_data, + const cmsis_nn_dims *input_1_dims, + const int8_t *input_2_data, + const cmsis_nn_dims *input_2_dims, + int8_t *output_data, + const cmsis_nn_dims *output_dims); + +#ifdef __cplusplus +} +#endif + +#endif /* ARM_NNFUNCTIONS_H */ diff --git a/src/third_party/cmsis_nn/Include/arm_nnsupportfunctions.h b/src/third_party/cmsis_nn/Include/arm_nnsupportfunctions.h new file mode 100644 index 0000000..067000c --- /dev/null +++ b/src/third_party/cmsis_nn/Include/arm_nnsupportfunctions.h @@ -0,0 +1,2160 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nnsupportfunctions.h + * Description: Public header file of support functions for CMSIS NN Library + * + * $Date: 08 Nov 2024 + * $Revision: V.22.7.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#ifndef ARM_NNSUPPORTFUNCTIONS_H +#define ARM_NNSUPPORTFUNCTIONS_H + +#include "third_party/cmsis_nn/Include/Internal/arm_nn_compiler.h" +#include "third_party/cmsis_nn/Include/arm_nn_math_types.h" +#include "third_party/cmsis_nn/Include/arm_nn_types.h" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define USE_FAST_DW_CONV_S16_FUNCTION(dw_conv_params, filter_dims, input_dims) \ + (dw_conv_params->ch_mult == 1 && dw_conv_params->dilation.w == 1 && dw_conv_params->dilation.h == 1 && \ + filter_dims->w * filter_dims->h < 512) + +#define LEFT_SHIFT(_shift) (_shift > 0 ? _shift : 0) +#define RIGHT_SHIFT(_shift) (_shift > 0 ? 0 : -_shift) +#define MASK_IF_ZERO(x) (x) == 0 ? ~0 : 0 +#define MASK_IF_NON_ZERO(x) (x) != 0 ? ~0 : 0 +#define SELECT_USING_MASK(mask, a, b) ((mask) & (a)) ^ (~(mask) & (b)) + +#define MAX(A, B) ((A) > (B) ? (A) : (B)) +#define MIN(A, B) ((A) < (B) ? (A) : (B)) +#define CLAMP(x, h, l) MAX(MIN((x), (h)), (l)) +#define REDUCE_MULTIPLIER(_mult) ((_mult < 0x7FFF0000) ? ((_mult + (1 << 15)) >> 16) : 0x7FFF) + +// Number of channels processed in a block for DW Conv with Int8 weights(MVE) +// Requirement: Greater than 0 & less than 128 +// This can be fine tuned to match number of input channels for best performance. +// A layer with lower number of channels than CH_IN_BLOCK_MVE will result in higher +// scratch buffer usage and a layer with higher number of channels than CH_IN_BLOCK_MVE +// will result in lower scratch buffer usage. +#define CH_IN_BLOCK_MVE (124) + +// Number of channels processed in a block for DW Conv with Int4 weights(MVE) +// Requirement: See CH_IN_BLOCK_MVE. +// An additional requirement for this signed 4 variant is that it must be an even number. +#define S4_CH_IN_BLOCK_MVE (124) + +// For input of int16 when number of columns are above this limit int64 accumulation is needed +// to not loose precision. +#define MAX_COL_COUNT (512) + +// CMSIS-NN has two implementations of the transpose conv operator, selected depending on the number of input +// channels. This is based on heuristics and may be finetuned depending on other parameters of the operator +#define REVERSE_TCOL_EFFICIENT_THRESHOLD (16) + +// Threshold for number of output channels that decide whether to convert a depthwise conv to a +// regular conv operation when number of input channels is one. +// Only applicable for processors with MVE extension. +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #define CONVERT_DW_CONV_WITH_ONE_INPUT_CH_AND_OUTPUT_CH_ABOVE_THRESHOLD (8) +#else + #define CONVERT_DW_CONV_WITH_ONE_INPUT_CH_AND_OUTPUT_CH_ABOVE_THRESHOLD (1) +#endif + +// By default this will have no effect. During compilation this may be set to __restrict, +// which may be beneficial for performance. See README.md for more intformation. +#ifndef OPTIONAL_RESTRICT_KEYWORD + #define OPTIONAL_RESTRICT_KEYWORD +#endif + +/** + * @brief definition to pack four 8 bit values. + */ +#define PACK_S8x4_32x1(v0, v1, v2, v3) \ + ((((int32_t)(v0) << 0) & (int32_t)0x000000FF) | (((int32_t)(v1) << 8) & (int32_t)0x0000FF00) | \ + (((int32_t)(v2) << 16) & (int32_t)0x00FF0000) | (((int32_t)(v3) << 24) & (int32_t)0xFF000000)) + +/** + * @brief definition to pack two 16 bit values. + */ +#define PACK_Q15x2_32x1(v0, v1) (((int32_t)v0 & (int32_t)0xFFFF) | ((int32_t)v1 << 16)) + +/** + * @defgroup groupSupport Private + * + * Internal Support functions. Not intended to be called direclty by a CMSIS-NN user. + * + */ + +/** + * @defgroup genPrivTypes Structure Types + * @ingroup groupSupport + * @brief Data structure types used by private functions. + * @{ + */ + +/** + * @brief Union for SIMD access of q31/s16/s8 types + */ +union arm_nnword +{ + int32_t word; + /**< q31 type */ + int16_t half_words[2]; + /**< s16 type */ + int8_t bytes[4]; + /**< s8 type */ +}; + +/** + * @brief Union for data type long long + */ +struct arm_nn_double +{ + uint32_t low; + int32_t high; +}; + +union arm_nn_long_long +{ + int64_t long_long; + struct arm_nn_double word; +}; + +/** + * @} // end group groupPrivTypes + */ + +/** + * @defgroup supportConversion Data Conversion + * + * Perform data type conversion in-between neural network operations + * + */ + +/** + * @brief Converts the elements from a s8 vector to a s16 vector with an added offset + * @param[in] src pointer to the s8 input vector + * @param[out] dst pointer to the s16 output vector + * @param[in] block_size length of the input vector + * @param[in] offset s16 offset to be added to each input vector element. + * + * \par Description: + * + * Output elements are ordered. + * The equation used for the conversion process is: + * + *
+ *  dst[n] = (int16_t) src[n] + offset;   0 <= n < block_size.
+ * 
+ * + */ +void arm_q7_to_q15_with_offset(const int8_t *src, int16_t *dst, int32_t block_size, int16_t offset); + +#if defined(ARM_MATH_DSP) +/** + * @brief Converts the elements from a s8 vector to a s16 vector with an added offset + * @param[in] src pointer to the s8 input vector + * @param[out] dst pointer to the s16 output vector + * @param[in] block_size length of the input vector + * @param[in] offset s16 offset to be added to each input vector element. + * + * \par Description: + * + * No additonal ordering is done with the result that output elements are not in order. + * Instead of ABCD order will be ACBD. + * Note this is for processors with DSP extension only. + * The equation used for the conversion process is: + * + *
+ *  dst[n - 0] = (int16_t) src[n - 0] + offset;   0 <= n < block_size.
+ *  dst[n - 1] = (int16_t) src[n - 2] + offset;   0 <= n < block_size.
+ *  dst[n - 2] = (int16_t) src[n - 1] + offset;   0 <= n < block_size.
+ *  dst[n - 3] = (int16_t) src[n - 3] + offset;   0 <= n < block_size.
+ * 
+ * + */ +void arm_s8_to_s16_unordered_with_offset(const int8_t *src, int16_t *dst, int32_t block_size, int16_t offset); + +#endif + +/** + * @brief Get the required buffer size for optimized s8 depthwise convolution + * function with constraint that in_channel equals out_channel. + * This is for processors with MVE extension. + * Refer to arm_depthwise_conv_s8_opt_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_depthwise_conv_s8_opt_get_buffer_size(). Note also this is a support function, + * so not recommended to call directly even on Host. + * + */ +int32_t arm_depthwise_conv_s8_opt_get_buffer_size_mve(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims); + +/** + * @brief Get the required buffer size for optimized s8 depthwise convolution + * function with constraint that in_channel equals out_channel. + * This is for processors with DSP extension. + * Refer to arm_depthwise_conv_s8_opt_get_buffer_size() for function argument details. + * + * @note Intended for compilation on Host. If compiling for an Arm target, use + * arm_depthwise_conv_s8_opt_get_buffer_size(). Note also this is a support function, + * so not recommended to call directly even on Host. + * + */ +int32_t arm_depthwise_conv_s8_opt_get_buffer_size_dsp(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims); + +/** + * @brief Depthwise conv on an im2col buffer where the input channel equals output channel. + * @param[in] row pointer to row + * @param[in] col pointer to im2col buffer, always consists of 2 columns. + * @param[in] num_ch number of channels + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] activation_min minimum value to clamp the output to. Range : int8 + * @param[in] activation_max maximum value to clamp the output to. Range : int8 + * @param[in] kernel_size number of elements in one column. + * @param[in] output_bias per output channel bias. Range : int32 + * @param[out] out pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details Supported framework: TensorFlow Lite micro. + */ +int8_t *arm_nn_depthwise_conv_s8_core(const int8_t *row, + const int16_t *col, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t kernel_size, + const int32_t *const output_bias, + int8_t *out); + +/** + * @brief General Matrix-multiplication function with per-channel requantization. + * @param[in] input_row pointer to row operand + * @param[in] input_col pointer to col operand + * @param[in] output_ch number of rows of input_row + * @param[in] col_batches number of column batches. Range: 1 to 4 + * @param[in] output_shift pointer to per output channel requantization shift parameter. + * @param[in] output_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] col_offset input tensor(col) offset. + * @param[in] row_offset kernel offset(row). Not used. + * @param[in] out_activation_min minimum value to clamp the output to. Range : int8 + * @param[in] out_activation_max maximum value to clamp the output to. Range : int8 + * @param[in] row_len number of elements in each row + * @param[in] bias per output channel bias. Range : int32 + * @param[in,out] out pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details Supported framework: TensorFlow Lite + */ +int8_t *arm_nn_mat_mult_s8(const int8_t *input_row, + const int8_t *input_col, + const uint16_t output_ch, + const uint16_t col_batches, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t col_offset, + const int32_t row_offset, + const int16_t out_activation_min, + const int16_t out_activation_max, + const uint16_t row_len, + const int32_t *const bias, + int8_t *out); +/** + * @brief Matrix-multiplication function for convolution with per-channel requantization for 16 bits convolution. + * @param[in] input_a pointer to operand A + * @param[in] input_b pointer to operand B, always consists of 2 vectors. + * @param[in] output_ch number of rows of A + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] activation_min minimum value to clamp the output to. Range : int16 + * @param[in] activation_max maximum value to clamp the output to. Range : int16 + * @param[in] num_col_a number of columns of A + * @param[in] bias_data pointer to struct with bias vector. The length of this vector is equal to the number + * of output columns (or RHS input rows). The vector can be int32 or int64 indicated by a + * flag in the struct. + * @param[in,out] out_0 pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details This function does the matrix multiplication of weight matrix for all output channels + * with 2 columns from im2col and produces two elements/output_channel. The outputs are + * clamped in the range provided by activation min and max. + * Supported framework: TensorFlow Lite micro. + */ +int16_t *arm_nn_mat_mult_kernel_s16(const int8_t *input_a, + const int16_t *input_b, + const int32_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max, + const int32_t num_col_a, + const cmsis_nn_bias_data *const bias_data, + int16_t *out_0); + +/** + * @brief General Vector by Matrix multiplication with requantization and storage of result. + * @param[in] row_elements number of row elements + * @param[in] skipped_row_elements number of row elements skipped due to padding. + * row_elements + skipped_row_elements = (kernel_x * kernel_y) * input_ch + * @param[in] row_base_ref pointer to row operand + * @param[in] col_base_ref pointer to col operand + * @param[out] out_ch Number of output channels + * @param[in] conv_params Pointer to convolution parameters like offsets and activation values + * @param[in] quant_params Pointer to per-channel quantization parameters + * @param[in] bias Pointer to optional per-channel bias + * @param[out] output Pointer to output where int8 results are stored. + * @return The function performs matrix(row_base_ref) multiplication with vector(col_base_ref) and + * scaled result is stored in memory. + * + * @details Pseudo-code + * *output = 0 + * sum_col = 0 + * for (j = 0; j < out_ch; j++) + * for (i = 0; i < row_elements; i++) + * *output += row_base_ref[i] * col_base_ref[i] + * sum_col += col_base_ref[i] + * scale sum_col using quant_params and bias + * store result in 'output' + * + * + */ +arm_cmsis_nn_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, + const int32_t skipped_row_elements, + const int8_t *row_base_ref, + const int8_t *col_base_ref, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output); + +/** + * @brief General Vector by Matrix multiplication with requantization, storage of result and int4 weights packed into an + * int8 buffer. + * @param[in] row_elements number of row elements + * @param[in] skipped_row_elements number of row elements skipped due to padding. + * row_elements + skipped_row_elements = (kernel_x * kernel_y) * input_ch + * @param[in] row_base_ref pointer to row operand + * @param[in] col_base_ref pointer to col operand as packed int4 + * @param[out] out_ch Number of output channels + * @param[in] conv_params Pointer to convolution parameters like offsets and activation values + * @param[in] quant_params Pointer to per-channel quantization parameters + * @param[in] bias Pointer to optional per-channel bias + * @param[out] output Pointer to output where int8 results are stored. + * @return The function performs matrix(row_base_ref) multiplication with vector(col_base_ref) and + * scaled result is stored in memory. + * + * @details Pseudo-code as int8 example. Int4 filter data will be unpacked. + * *output = 0 + * sum_col = 0 + * for (j = 0; j < out_ch; j++) + * for (i = 0; i < row_elements; i++) + * *output += row_base_ref[i] * col_base_ref[i] + * sum_col += col_base_ref[i] + * scale sum_col using quant_params and bias + * store result in 'output' + * + * + */ +arm_cmsis_nn_status arm_nn_mat_mul_core_1x_s4(int32_t row_elements, + const int32_t skipped_row_elements, + const int8_t *row_base_ref, + const int8_t *col_base_ref, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output); + +/** + * @brief Matrix-multiplication with requantization & activation function for four rows and one column + * @param[in] row_elements number of row elements + * @param[in] offset offset between rows. Can be the same as row_elements. + * For e.g, in a 1x1 conv scenario with stride as 1. + * @param[in] row_base pointer to row operand + * @param[in] col_base pointer to col operand + * @param[in] out_ch Number of output channels + * @param[in] conv_params Pointer to convolution parameters like offsets and activation values + * @param[in] quant_params Pointer to per-channel quantization parameters + * @param[in] bias Pointer to per-channel bias + * @param[out] output Pointer to output where int8 results are stored. + * + * @return The function returns the updated output pointer or NULL if implementation is not available. + * + * @details Compliant to TFLM int8 specification. MVE implementation only + */ +int8_t *arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, + const int32_t offset, + const int8_t *row_base, + const int8_t *col_base, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output); + +/** + * @brief General Matrix-multiplication function with per-channel requantization. + * This function assumes: + * - LHS input matrix NOT transposed (nt) + * - RHS input matrix transposed (t) + * - RHS is int8 packed with 2x int4 + * - LHS is int8 + * + * @note This operation also performs the broadcast bias addition before the requantization + * + * @param[in] lhs Pointer to the LHS input matrix + * @param[in] rhs Pointer to the RHS input matrix + * @param[in] bias Pointer to the bias vector. The length of this vector is equal to the number of + * output columns (or RHS input rows) + * @param[out] dst Pointer to the output matrix with "m" rows and "n" columns + * @param[in] dst_multipliers Pointer to the multipliers vector needed for the per-channel requantization. + * The length of this vector is equal to the number of output columns (or RHS input + * rows) + * @param[in] dst_shifts Pointer to the shifts vector needed for the per-channel requantization. The length + * of this vector is equal to the number of output columns (or RHS input rows) + * @param[in] lhs_rows Number of LHS input rows + * @param[in] rhs_rows Number of RHS input rows + * @param[in] rhs_cols Number of LHS/RHS input columns + * @param[in] lhs_offset Offset to be applied to the LHS input value + * @param[in] dst_offset Offset to be applied the output result + * @param[in] activation_min Minimum value to clamp down the output. Range : int8 + * @param[in] activation_max Maximum value to clamp up the output. Range : int8 + * @param[in] lhs_cols_offset Column offset between subsequent lhs_rows + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s4(const int8_t *lhs, + const int8_t *rhs, + const int32_t *bias, + int8_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t lhs_cols_offset); + +/** + * @brief General Matrix-multiplication function with per-channel requantization. + * This function assumes: + * - LHS input matrix NOT transposed (nt) + * - RHS input matrix transposed (t) + * - RHS is int8 packed with 2x int4 + * - LHS is int8 + * - LHS/RHS input columns must be even numbered + * - LHS must be interleaved. Compare to arm_nn_mat_mult_nt_t_s4 where LHS is not interleaved. + * + * @note This operation also performs the broadcast bias addition before the requantization + * + * @param[in] lhs Pointer to the LHS input matrix + * @param[in] rhs Pointer to the RHS input matrix + * @param[in] bias Pointer to the bias vector. The length of this vector is equal to the number of + * output columns (or RHS input rows) + * @param[out] dst Pointer to the output matrix with "m" rows and "n" columns + * @param[in] dst_multipliers Pointer to the multipliers vector needed for the per-channel requantization. + * The length of this vector is equal to the number of output columns (or RHS input + * rows) + * @param[in] dst_shifts Pointer to the shifts vector needed for the per-channel requantization. The length + * of this vector is equal to the number of output columns (or RHS input rows) + * @param[in] lhs_rows Number of LHS input rows + * @param[in] rhs_rows Number of RHS input rows + * @param[in] rhs_cols Number of LHS/RHS input columns. Note this must be even. + * @param[in] lhs_offset Offset to be applied to the LHS input value + * @param[in] dst_offset Offset to be applied the output result + * @param[in] activation_min Minimum value to clamp down the output. Range : int8 + * @param[in] activation_max Maximum value to clamp up the output. Range : int8 + * @param[in] lhs_cols_offset Column offset between subsequent lhs_rows + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_interleaved_t_even_s4(const int8_t *lhs, + const int8_t *rhs, + const int32_t *bias, + int8_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t lhs_cols_offset); + +/** + * @brief General Matrix-multiplication function with per-channel requantization. + * This function assumes: + * - LHS input matrix NOT transposed (nt) + * - RHS input matrix transposed (t) + * + * @note This operation also performs the broadcast bias addition before the requantization + * + * @param[in] lhs Pointer to the LHS input matrix + * @param[in] rhs Pointer to the RHS input matrix + * @param[in] bias Pointer to the bias vector. The length of this vector is equal to the number of + * output columns (or RHS input rows) + * @param[out] dst Pointer to the output matrix with "m" rows and "n" columns + * @param[in] dst_multipliers Pointer to the multipliers vector needed for the per-channel requantization. + * The length of this vector is equal to the number of output columns (or RHS input + * rows) + * @param[in] dst_shifts Pointer to the shifts vector needed for the per-channel requantization. The length + * of this vector is equal to the number of output columns (or RHS input rows) + * @param[in] lhs_rows Number of LHS input rows + * @param[in] rhs_rows Number of RHS input rows + * @param[in] rhs_cols Number of LHS/RHS input columns + * @param[in] lhs_offset Offset to be applied to the LHS input value + * @param[in] dst_offset Offset to be applied the output result + * @param[in] activation_min Minimum value to clamp down the output. Range : int8 + * @param[in] activation_max Maximum value to clamp up the output. Range : int8 + * @param[in] row_address_offset Address offset between rows in output. NOTE: Only used for MVEI extension. + * @param[in] lhs_cols_offset Column offset between subsequent lhs_rows + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t *bias, + int8_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t row_address_offset, + const int32_t lhs_cols_offset); + +/** + * @brief General Matrix-multiplication function with per-channel requantization and int16 input (LHS) and output. + * This function assumes: + * - LHS input matrix NOT transposed (nt) + * - RHS input matrix transposed (t) + * + * @note This operation also performs the broadcast bias addition before the requantization + * + * @param[in] lhs Pointer to the LHS input matrix + * @param[in] rhs Pointer to the RHS input matrix + * @param[in] bias_data Pointer to struct with bias vector. The length of this vector is equal to the number + * of output columns (or RHS input rows). The vector can be int32 or int64 indicated by a + * flag in the struct. + * @param[out] dst Pointer to the output matrix with "m" rows and "n" columns + * @param[in] dst_multipliers Pointer to the multipliers vector needed for the per-channel requantization. + * The length of this vector is equal to the number of output columns (or RHS input + * rows) + * @param[in] dst_shifts Pointer to the shifts vector needed for the per-channel requantization. The length + * of this vector is equal to the number of output columns (or RHS input rows) + * @param[in] lhs_rows Number of LHS input rows + * @param[in] rhs_rows Number of RHS input rows + * @param[in] rhs_cols Number of LHS/RHS input columns + * @param[in] activation_min Minimum value to clamp down the output. Range : int16 + * @param[in] activation_max Maximum value to clamp up the output. Range : int16 + * + * @details MVE implementation only. + * + * @return The function returns ARM_CMSIS_NN_SUCCESS or + * ARM_CMSIS_NN_NO_IMPL_ERROR if not for MVE + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s16(const int16_t *lhs, + const int8_t *rhs, + const cmsis_nn_bias_data *bias_data, + int16_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief General Matrix-multiplication function with int8 input and int32 output. + * This function assumes: + * - LHS input matrix NOT transposed (nt) + * - RHS input matrix transposed (t) + * + * @note Dst/output buffer must be zeroed out before calling this function. + * + * @param[in] lhs Pointer to the LHS input matrix + * @param[in] rhs Pointer to the RHS input matrix + * @param[out] dst Pointer to the output matrix with "m" rows and "n" columns + * @param[in] lhs_rows Number of LHS input rows + * @param[in] rhs_rows Number of LHS input columns/RHS input rows + * @param[in] rhs_cols Number of RHS input columns + * @param[in] lhs_offset Offset to be applied to the LHS input value + * @param[in] dst_idx_offset Offset between subsequent output results + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s8_s32(const int8_t *lhs, + const int8_t *rhs, + int32_t *dst, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_idx_offset); + +/** + * @brief s4 Vector by Matrix (transposed) multiplication + * + * @param[in] lhs Input left-hand side vector + * @param[in] packed_rhs Input right-hand side matrix (transposed) + * @param[in] bias Input bias + * @param[out] dst Output vector + * @param[in] lhs_offset Offset to be added to the input values of the left-hand side vector. + * Range: -127 to 128 + * @param[in] dst_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] dst_multiplier Output multiplier + * @param[in] dst_shift Output shift + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s4(const int8_t *lhs, + const int8_t *packed_rhs, + const int32_t *bias, + int8_t *dst, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief s8 Vector by Matrix (transposed) multiplication + * + * @param[in] lhs Input left-hand side vector + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] kernel_sum Kernel sums of the kernels (rhs). See arm_vector_sum_s8 for more info. + * @param[in] bias Input bias + * @param[out] dst Output vector + * @param[in] lhs_offset Offset to be added to the input values of the left-hand side vector. + * Range: -127 to 128 + * @param[in] dst_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] dst_multiplier Output multiplier + * @param[in] dst_shift Output shift + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] address_offset Memory position offset for dst. First output is stored at 'dst', the + * second at 'dst + address_offset' and so on. Default value is typically 1. + * @param[in] rhs_offset Offset to be added to the input values of the right-hand side vector. + * Range: -127 to 128 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t *kernel_sum, + const int32_t *bias, + int8_t *dst, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max, + const int32_t address_offset, + const int32_t rhs_offset); + +/** + * @brief s8 Vector by Matrix (transposed) multiplication using per channel quantization for output + * + * @param[in] lhs Input left-hand side vector + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] kernel_sum Kernel sums of the kernels (rhs). See arm_vector_sum_s8 for more info. + * @param[in] bias Input bias + * @param[out] dst Output vector + * @param[in] lhs_offset Offset to be added to the input values of the left-hand side vector. + * Range: -127 to 128 + * @param[in] dst_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] dst_multiplier Output multipliers + * @param[in] dst_shift Output shifts + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] address_offset Memory position offset for dst. First output is stored at 'dst', the + * second at 'dst + address_offset' and so on. Default value is typically 1. + * @param[in] rhs_offset Offset to be added to the input values of the right-hand side vector. + * Range: -127 to 128 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_per_ch_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t *kernel_sum, + const int32_t *bias, + int8_t *dst, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t *dst_multiplier, + const int32_t *dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max, + const int32_t address_offset, + const int32_t rhs_offset); + +/** + * @brief s16 Vector by s8 Matrix (transposed) multiplication + * + * @param[in] lhs Input left-hand side vector + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] bias Input bias + * @param[out] dst Output vector + * @param[in] dst_multiplier Output multiplier + * @param[in] dst_shift Output shift + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int16 + * @param[in] activation_max Maximum value to clamp the output to. Range: int16 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s16(const int16_t *lhs, + const int8_t *rhs, + const int64_t *bias, + int16_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief s16 Vector by s16 Matrix (transposed) multiplication + * + * @param[in] lhs Input left-hand side vector + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] bias Input bias + * @param[out] dst Output vector + * @param[in] dst_multiplier Output multiplier + * @param[in] dst_shift Output shift + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int16 + * @param[in] activation_max Maximum value to clamp the output to. Range: int16 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s16_s16(const int16_t *lhs, + const int16_t *rhs, + const int64_t *bias, + int16_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief s8 Vector by Matrix (transposed) multiplication with s16 output + * + * @param[in] lhs Input left-hand side vector + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[out] dst Output vector + * @param[in] lhs_offset Offset to be added to the input values of the left-hand side + * vector. Range: -127 to 128 + * @param[in] scatter_offset Address offset for dst. First output is stored at 'dst', the + * second at 'dst + scatter_offset' and so on. + * @param[in] dst_multiplier Output multiplier + * @param[in] dst_shift Output shift + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int16 + * @param[in] activation_max Maximum value to clamp the output to. Range: int16 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_svdf_s8(const int8_t *lhs, + const int8_t *rhs, + int16_t *dst, + const int32_t lhs_offset, + const int32_t scatter_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in padded cases where + * the padding is -lhs_offset(Range: int8). Dimensions are the same for lhs and rhs. + * + * @param[in] lhs Input left-hand side matrix + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 + * @param[in] active_ch Subset of total_ch processed + * @param[in] total_ch Number of channels in LHS/RHS + * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels + * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels + * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix + * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels + * @param[in] out Output pointer + * + * @return The function returns one of the two + * - Updated output pointer if an implementation is available + * - NULL if no implementation is available. + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read + * out for the following. + * - Output shift + * - Output multiplier + * - Output bias + * - rhs + */ +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_padded_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t lhs_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + int8_t *out); + +/** + * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in non-padded cases. + * Dimensions are the same for lhs and rhs. + * + * @param[in] lhs Input left-hand side matrix + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 + * @param[in] active_ch Subset of total_ch processed + * @param[in] total_ch Number of channels in LHS/RHS + * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels. + * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels. + * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix + * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels. + * @param[in] out Output pointer + * + * @return The function returns one of the two + * - Updated output pointer if an implementation is available + * - NULL if no implementation is available. + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read + * out for the following. + * - Output shift + * - Output multiplier + * - Output bias + * - rhs + */ +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t lhs_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + int8_t *out); + +/** + * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in non-padded cases. rhs + * consists of packed int4 data. Dimensions are the same for lhs and rhs. + * + * @param[in] lhs Input left-hand side matrix + * @param[in] rhs Input right-hand side matrix (transposed). Consists of int4 data packed in an int8 + * buffer. + * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 + * @param[in] active_ch Subset of total_ch processed + * @param[in] total_ch Number of channels in LHS/RHS + * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels. + * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels. + * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix + * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels. + * @param[in] out Output pointer + * + * @return The function returns one of the two + * - Updated output pointer if an implementation is available + * - NULL if no implementation is available. + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read + * out for the following. + * - Output shift + * - Output multiplier + * - Output bias + * - rhs + */ +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_s4(const int8_t *lhs, + const int8_t *rhs, + const int32_t lhs_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + int8_t *out); + +/** + * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in non-padded cases. + * Dimensions are the same for lhs and rhs. + * + * @param[in] lhs Input left-hand side matrix + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] num_ch Number of channels in LHS/RHS + * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels. + * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels. + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix + * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels. + * @param[in] out Output pointer + * + * @return The function returns one of the two + * - Updated output pointer if an implementation is available + * - NULL if no implementation is available. + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read + * out for the following. + * - Output shift + * - Output multiplier + * - Output bias + * - rhs + */ +int16_t *arm_nn_depthwise_conv_nt_t_s16(const int16_t *lhs, + const int8_t *rhs, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int64_t *const output_bias, + int16_t *out); + +/** + * @brief Row of s8 scalars multiplicated with a s8 matrix ad accumulated into a s32 rolling scratch buffer. + * Helpfunction for transposed convolution. + * + * @param[in] lhs Input left-hand side scalars + * @param[in] rhs Input right-hand side matrix + * @param[out] output_start Output buffer start + * @param[in] output_index Output buffer current index + * @param[in] output_max Output buffer size + * @param[in] rhs_rows Number of rows in rhs matrix + * @param[in] rhs_cols Number of columns in rhs matrix + * @param[in] input_channels Number of input channels + * @param[in] output_channels Number of output channels + * @param[in] lhs_offset Offset added to lhs before multiplication + * @param[in] row_offset Address offset between each row of data output + * @param[in] input_x Length of lhs scalar row. + * @param[in] stride_x Address offset between each scalar-matrix multiplication result. + * @param[in] skip_row_top Skip rows on top of the filter, used for padding. + * @param[in] skip_row_bottom Skip rows in the bottom of the filter, used for padding. + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @note Rolling buffer refers to how the function wraps around the scratch buffer, e.g. it starts writing at + * [output_start + output_index], writes to [output_start + output_max] and then continues at [output_start] again. + */ +arm_cmsis_nn_status arm_nn_transpose_conv_row_s8_s32(const int8_t *lhs, + const int8_t *rhs, + int32_t *output_start, + const int32_t output_index, + const int32_t output_max, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t input_channels, + const int32_t output_channels, + const int32_t lhs_offset, + const int32_t row_offset, + const int32_t input_x, + const int32_t stride_x, + const int32_t skip_row_top, + const int32_t skip_row_bottom); + +/** + @brief Read 2 s16 elements and post increment pointer. + @param[in] in_q15 Pointer to pointer that holds address of input. + @return q31 value + */ +__STATIC_FORCEINLINE int32_t arm_nn_read_q15x2_ia(const int16_t **in_q15) +{ + int32_t val; + + memcpy(&val, *in_q15, 4); + *in_q15 += 2; + + return (val); +} + +/** + @brief Read 4 s8 from s8 pointer and post increment pointer. + @param[in] in_s8 Pointer to pointer that holds address of input. + @return q31 value + */ +__STATIC_FORCEINLINE int32_t arm_nn_read_s8x4_ia(const int8_t **in_s8) +{ + int32_t val; + memcpy(&val, *in_s8, 4); + *in_s8 += 4; + + return (val); +} + +/** + @brief Read 2 s8 from s8 pointer and post increment pointer. + @param[in] in_s8 Pointer to pointer that holds address of input. + @return q31 value + */ +__STATIC_FORCEINLINE int32_t arm_nn_read_s8x2_ia(const int8_t **in_s8) +{ + int32_t val; + memcpy(&val, *in_s8, 2); + *in_s8 += 2; + + return (val); +} + +/** + @brief Read 2 int16 values from int16 pointer. + @param[in] in pointer to address of input. + @return s32 value + */ +__STATIC_FORCEINLINE int32_t arm_nn_read_s16x2(const int16_t *in) +{ + int32_t val; + memcpy(&val, in, 4); + + return (val); +} + +/** + @brief Read 4 s8 values. + @param[in] in_s8 pointer to address of input. + @return s32 value + */ +__STATIC_FORCEINLINE int32_t arm_nn_read_s8x4(const int8_t *in_s8) +{ + int32_t val; + memcpy(&val, in_s8, 4); + + return (val); +} +/** + @brief Read 2 s8 values. + @param[in] in_s8 pointer to address of input. + @return s32 value + */ +__STATIC_FORCEINLINE int32_t arm_nn_read_s8x2(const int8_t *in_s8) +{ + int32_t val; + memcpy(&val, in_s8, 2); + + return (val); +} + +/** + @brief Write four s8 to s8 pointer and increment pointer afterwards. + @param[in] in Double pointer to input value + @param[in] value Four bytes to copy + */ +__STATIC_FORCEINLINE void arm_nn_write_s8x4_ia(int8_t **in, int32_t value) +{ + memcpy(*in, &value, 4); + *in += 4; +} + +/** + * @brief memset optimized for MVE + * @param[in, out] dst Destination pointer + * @param[in] val Value to set + * @param[in] block_size Number of bytes to copy. + * + */ +__STATIC_FORCEINLINE void arm_memset_s8(int8_t *dst, const int8_t val, uint32_t block_size) +{ +#if defined(ARM_MATH_MVEI) + __asm volatile(" vdup.8 q0, %[set_val] \n" + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vstrb.8 q0, [%[in]], #16 \n" + " letp lr, 2b \n" + "1: \n" + : [in] "+r"(dst) + : [cnt] "r"(block_size), [set_val] "r"(val) + : "q0", "memory", "r14"); +#else + memset(dst, val, block_size); +#endif +} + +#if defined(ARM_MATH_DSP) + +/** + * @brief read and expand one s4 word into two s8 words. + */ +__STATIC_FORCEINLINE void read_and_pad_s4(const int8_t *source, int32_t *out1, int32_t *out2) +{ + int16_t in = arm_nn_read_s8x2(source); + int32_t inA = (in & 0x00FF) | ((in & 0xFF00) << 8); + + *out1 = SXTB16_RORn(__sxtb16(inA << 4), 4); + *out2 = SXTB16_RORn(__sxtb16(inA), 4); +} + +/** + * @brief read and expand one s4 word into two s8 words. + * @details The s4 elements are not evenly aligned on the byte boundary, so 3 bytes need to be read instead of 2. + * In other words first nibble to read start at the middle of a byte. + * byte index, s4 element + * 0, s4_x + * 0, s4_0 + * 1, s4_1 + * 1, s4_2 + * 2, s4_3 + * 2, s4_x + */ +__STATIC_FORCEINLINE void read_and_pad_s4_uneven(const int8_t *source, int32_t *out1, int32_t *out2) +{ + int32_t inA1 = (source[0] & 0xFF) | ((source[1] & 0xFF) << 16); + int32_t inA2 = (source[1] & 0xFF) | ((source[2] & 0xFF) << 16); + + *out1 = SXTB16_RORn(__sxtb16(inA2 << 4), 4); + *out2 = SXTB16_RORn(__sxtb16(inA1), 4); +} + +/** + * @brief read and expand one s4 word into two s16 words with ordering. + */ +__STATIC_FORCEINLINE void read_and_pad_s4_ordered(const int8_t *source, int32_t *out1, int32_t *out2) +{ + int16_t in = arm_nn_read_s8x2(source); + int32_t inA = (in & 0x00FF) | ((in & 0xFF00) << 8); + int32_t inAbuf1 = SXTB16_RORn(__sxtb16(inA), 4); + int32_t inAbuf2 = SXTB16_RORn(__sxtb16(inA << 4), 4); + #ifndef ARM_MATH_BIG_ENDIAN + *out2 = (int32_t)(PKHTB(inAbuf1, inAbuf2, 16)); + *out1 = (int32_t)(PKHBT(inAbuf2, inAbuf1, 16)); + #else + *out1 = (int32_t)(PKHTB(inAbuf1, inAbuf2, 16)); + *out2 = (int32_t)(PKHBT(inAbuf2, inAbuf1, 16)); + #endif +} + +/** + * @brief read and expand one s8 word into two s16 words with ordering. + */ +__STATIC_FORCEINLINE const int8_t *read_and_pad(const int8_t *source, int32_t *out1, int32_t *out2) +{ + int32_t inA = arm_nn_read_s8x4_ia(&source); + int32_t inAbuf1 = SXTB16_RORn((uint32_t)inA, 8); + int32_t inAbuf2 = SXTB16(inA); + + #ifndef ARM_MATH_BIG_ENDIAN + *out2 = (int32_t)(PKHTB(inAbuf1, inAbuf2, 16)); + *out1 = (int32_t)(PKHBT(inAbuf2, inAbuf1, 16)); + #else + *out1 = (int32_t)(PKHTB(inAbuf1, inAbuf2, 16)); + *out2 = (int32_t)(PKHBT(inAbuf2, inAbuf1, 16)); + #endif + + return source; +} + +/** + * @brief read and expand one s8 word into two s16 words with ordering and addition. + */ +__STATIC_FORCEINLINE void read_pad_and_add_s8(const int8_t *source, int32_t *out1, int32_t *out2, const uint32_t add) +{ + int32_t inA = arm_nn_read_s8x4(source); + int32_t inAbuf1 = SXTAB16_RORn(add, (uint32_t)inA, 8); + int32_t inAbuf2 = SXTAB16(add, inA); + + #ifndef ARM_MATH_BIG_ENDIAN + *out2 = (int32_t)(PKHTB(inAbuf1, inAbuf2, 16)); + *out1 = (int32_t)(PKHBT(inAbuf2, inAbuf1, 16)); + #else + *out1 = (int32_t)(PKHTB(inAbuf1, inAbuf2, 16)); + *out2 = (int32_t)(PKHBT(inAbuf2, inAbuf1, 16)); + #endif +} + +/** + * @brief read and expand two bytes into one word with ordering. + */ +__STATIC_FORCEINLINE void read_and_pad_s8x2(const int8_t *source, int32_t *out) +{ + int16_t in = arm_nn_read_s8x2(source); + int32_t inA = (in & 0x00FF) | ((in & 0xFF00) << 8); + *out = SXTB16(inA); +} + +/** + * @brief read and expand two bytes into one word with ordering and addition. + */ +__STATIC_FORCEINLINE void read_pad_and_add_s8x2(const int8_t *source, int32_t *out, const uint32_t add) +{ + int16_t in = arm_nn_read_s8x2(source); + int32_t inA = (in & 0x00FF) | ((in & 0xFF00) << 8); + *out = SXTAB16(add, inA); +} + +/** + * @brief read and expand one s8 word into two s16 words with no additional ordering. + */ +__STATIC_FORCEINLINE const int8_t *read_and_pad_reordered(const int8_t *source, int32_t *out1, int32_t *out2) +{ + int32_t inA = arm_nn_read_s8x4_ia(&source); + #ifndef ARM_MATH_BIG_ENDIAN + *out2 = SXTB16(ROR((uint32_t)inA, 8)); + *out1 = SXTB16(inA); + #else + *out1 = SXTB16(ROR((uint32_t)inA, 8)); + *out2 = SXTB16(inA); + #endif + + return source; +} + +#endif + +/** + * @brief Matrix-multiplication function for convolution with per-channel requantization and 4 bit weights. + * @param[in] input_a pointer to operand A, int8 packed with 2x int4. + * @param[in] input_b pointer to operand B, always consists of 2 vectors. + * @param[in] output_ch number of rows of A + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] activation_min minimum value to clamp the output to. Range : int8 + * @param[in] activation_max maximum value to clamp the output to. Range : int8 + * @param[in] num_col_a number of columns of A + * @param[in] output_bias per output channel bias. Range : int32 + * @param[in,out] out_0 pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details This function does the matrix multiplication of weight matrix for all output channels + * with 2 columns from im2col and produces two elements/output_channel. The outputs are + * clamped in the range provided by activation min and max. + * Supported framework: TensorFlow Lite micro. + */ +int8_t *arm_nn_mat_mult_kernel_s4_s16(const int8_t *input_a, + const int16_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t num_col_a, + const int32_t *const output_bias, + int8_t *out_0); +/** + * @brief Matrix-multiplication function for convolution with per-channel requantization. + * @param[in] input_a pointer to operand A + * @param[in] input_b pointer to operand B, always consists of 2 vectors. + * @param[in] output_ch number of rows of A + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] activation_min minimum value to clamp the output to. Range : int8 + * @param[in] activation_max maximum value to clamp the output to. Range : int8 + * @param[in] num_col_a number of columns of A + * @param[in] aligned_num_col_a number of columns of A aligned by 4 + * @param[in] output_bias per output channel bias. Range : int32 + * @param[in,out] out_0 pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details This function does the matrix multiplication of weight matrix for all output channels + * with 2 columns from im2col and produces two elements/output_channel. The outputs are + * clamped in the range provided by activation min and max. + * Supported framework: TensorFlow Lite micro. + */ +int8_t *arm_nn_mat_mult_kernel_s8_s16(const int8_t *input_a, + const int16_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const int32_t num_col_a, + const int32_t aligned_num_col_a, + const int32_t *const output_bias, + int8_t *out_0); + +/** + * @brief Matrix-multiplication function for convolution with per-channel requantization, supporting an address offset + * between rows. + * @param[in] input_a pointer to operand A + * @param[in] input_b pointer to operand B, always consists of 2 vectors. + * @param[in] output_ch number of rows of A + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] activation_min minimum value to clamp the output to. Range : int8 + * @param[in] activation_max maximum value to clamp the output to. Range : int8 + * @param[in] num_col_a number of columns of A + * @param[in] aligned_num_col_a number of columns of A aligned by 4 + * @param[in] output_bias per output channel bias. Range : int32 + * @param[in] row_address_offset address offset between rows in the output + * @param[in,out] out_0 pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details This function does the matrix multiplication of weight matrix for all output channels + * with 2 columns from im2col and produces two elements/output_channel. The outputs are + * clamped in the range provided by activation min and max. + * + * This function is slighly less performant than arm_nn_mat_mult_kernel_s8_s16, but allows support for + * grouped convolution. Supported framework: TensorFlow Lite micro. + */ +int8_t *arm_nn_mat_mult_kernel_row_offset_s8_s16(const int8_t *input_a, + const int16_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const int32_t num_col_a, + const int32_t aligned_num_col_a, + const int32_t *const output_bias, + const int32_t row_address_offset, + int8_t *out_0); + +/** + * @brief Common softmax function for s8 input and s8 or s16 output + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[in] int16_output Indicating s8 output if 0 else s16 output + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ +void arm_nn_softmax_common_s8(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + const bool int16_output, + void *output); + +/** + * @brief macro for adding rounding offset + */ +#ifndef ARM_NN_TRUNCATE + #define NN_ROUND(out_shift) ((0x1 << out_shift) >> 1) +#else + #define NN_ROUND(out_shift) 0 +#endif + +// Macros for shortening quantization functions' names and avoid long lines +#define MUL_SAT(a, b) arm_nn_doubling_high_mult((a), (b)) +#define MUL_SAT_MVE(a, b) arm_doubling_high_mult_mve_32x4((a), (b)) +#define MUL_POW2(a, b) arm_nn_mult_by_power_of_two((a), (b)) + +#define DIV_POW2(a, b) arm_nn_divide_by_power_of_two((a), (b)) +#define DIV_POW2_MVE(a, b) arm_divide_by_power_of_two_mve((a), (b)) + +#define EXP_ON_NEG(x) arm_nn_exp_on_negative_values((x)) +#define ONE_OVER1(x) arm_nn_one_over_one_plus_x_for_x_in_0_1((x)) + +/** + * @brief Saturating doubling high multiply. Result matches + * NEON instruction VQRDMULH. + * @param[in] m1 Multiplicand. Range: {NN_Q31_MIN, NN_Q31_MAX} + * @param[in] m2 Multiplier. Range: {NN_Q31_MIN, NN_Q31_MAX} + * @return Result of multiplication. + * + */ +__STATIC_FORCEINLINE int32_t arm_nn_doubling_high_mult(const int32_t m1, const int32_t m2) +{ + int32_t result = 0; + // Rounding offset to add for a right shift of 31 + int64_t mult = 1 << 30; + + if ((m1 < 0) ^ (m2 < 0)) + { + mult = 1 - mult; + } + // Gets resolved as a SMLAL instruction + mult = mult + (int64_t)m1 * m2; + + // Utilize all of the upper 32 bits. This is the doubling step + // as well. + result = (int32_t)(mult / (1ll << 31)); + + if ((m1 == m2) && (m1 == (int32_t)NN_Q31_MIN)) + { + result = NN_Q31_MAX; + } + return result; +} + +/** + * @brief Doubling high multiply without saturation. This is intended + * for requantization where the scale is a positive integer + * + * @param[in] m1 Multiplicand. Range: {NN_Q31_MIN, NN_Q31_MAX} + * @param[in] m2 Multiplier Range: {NN_Q31_MIN, NN_Q31_MAX} + * @return Result of multiplication. + * @note The result of this matches that of neon instruction + * VQRDMULH for m1 in range {NN_Q31_MIN, NN_Q31_MAX} and m2 in + * range {NN_Q31_MIN + 1, NN_Q31_MAX}. Saturation occurs when + * m1 equals m2 equals NN_Q31_MIN and that is not handled by + * this function. + * + */ +__STATIC_FORCEINLINE int32_t arm_nn_doubling_high_mult_no_sat(const int32_t m1, const int32_t m2) +{ + int32_t result = 0; + union arm_nn_long_long mult; + + // Rounding offset to add for a right shift of 31 + mult.word.low = 1 << 30; + mult.word.high = 0; + + // Gets resolved as a SMLAL instruction + mult.long_long = mult.long_long + (int64_t)m1 * m2; + + // Utilize all of the upper 32 bits. This is the doubling step + // as well. + result = (int32_t)(mult.long_long >> 31); + + return result; +} + +/** + * @brief Rounding divide by power of two. + * @param[in] dividend - Dividend + * @param[in] exponent - Divisor = power(2, exponent) + * Range: [0, 31] + * @return Rounded result of division. Midpoint is rounded away from zero. + * + */ +__STATIC_FORCEINLINE int32_t arm_nn_divide_by_power_of_two(const int32_t dividend, const int32_t exponent) +{ + int32_t result = 0; + const int32_t remainder_mask = (1 << exponent) - 1; + int32_t remainder = remainder_mask & dividend; + + // Basic division + result = dividend >> exponent; + + // Adjust 'result' for rounding (mid point away from zero) + int32_t threshold = remainder_mask >> 1; + if (result < 0) + { + threshold++; + } + if (remainder > threshold) + { + result++; + } + + return result; +} + +/** + * @brief Requantize a given value. + * @details Essentially returns (val * multiplier)/(2 ^ shift) with different rounding depending if + * CMSIS_NN_USE_SINGLE_ROUNDING is defined or not. + * @param[in] val Value to be requantized + * @param[in] multiplier Multiplier. Range {NN_Q31_MIN + 1, Q32_MAX} + * @param[in] shift Shift. Range: {-31, 30} + * Default branch: + * If shift is positive left shift 'val * multiplier' with shift + * If shift is negative right shift 'val * multiplier' with abs(shift) + * Single round branch: + * Input for total_shift in divide by '2 ^ total_shift' + * + * @return Default branch: + * Returns (val * multiplier) with rounding divided by (2 ^ shift) with rounding + * Single round branch: + * Returns (val * multiplier)/(2 ^ (31 - shift)) with rounding + * + */ +__STATIC_FORCEINLINE int32_t arm_nn_requantize(const int32_t val, const int32_t multiplier, const int32_t shift) +{ +#ifdef CMSIS_NN_USE_SINGLE_ROUNDING + const int64_t total_shift = 31 - shift; + const int64_t new_val = val * (int64_t)multiplier; + + int32_t result = new_val >> (total_shift - 1); + result = (result + 1) >> 1; + + return result; +#else + return arm_nn_divide_by_power_of_two(arm_nn_doubling_high_mult_no_sat(val * (1 << LEFT_SHIFT(shift)), multiplier), + RIGHT_SHIFT(shift)); +#endif +} + +/** + * @brief Requantize a given 64 bit value. + * @param[in] val Value to be requantized in the range {-(1<<47)} to {(1<<47) - 1} + * @param[in] reduced_multiplier Reduced multiplier in the range {NN_Q31_MIN + 1, Q32_MAX} to {Q16_MIN + 1, + * Q16_MAX} + * @param[in] shift Left or right shift for 'val * multiplier' in the range {-31} to {7} + * + * @return Returns (val * multiplier)/(2 ^ shift) + * + */ +__STATIC_FORCEINLINE int32_t arm_nn_requantize_s64(const int64_t val, + const int32_t reduced_multiplier, + const int32_t shift) +{ + const int64_t new_val = val * reduced_multiplier; + + int32_t result = new_val >> (14 - shift); // 64->32 bit reduction + result = (result + 1) >> 1; // Last shift position and insert round + + return result; +} + +/** + * @brief memcpy optimized for MVE + * @param[in, out] dst Destination pointer + * @param[in] src Source pointer. + * @param[in] block_size Number of bytes to copy. + * + */ +__STATIC_FORCEINLINE void arm_memcpy_s8(int8_t *__RESTRICT dst, const int8_t *__RESTRICT src, uint32_t block_size) +{ +#if defined(ARM_MATH_MVEI) + __asm volatile(" wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vldrb.8 q0, [%[in]], #16 \n" + " vstrb.8 q0, [%[out]], #16 \n" + " letp lr, 2b \n" + "1: \n" + : [in] "+r"(src), [out] "+r"(dst) + : [cnt] "r"(block_size) + : "q0", "memory", "r14"); +#else + memcpy(dst, src, block_size); +#endif +} + +/** + * @brief memcpy wrapper for int16 + * @param[in, out] dst Destination pointer + * @param[in] src Source pointer. + * @param[in] block_size Number of bytes to copy. + * + */ +__STATIC_FORCEINLINE void arm_memcpy_q15(int16_t *__RESTRICT dst, const int16_t *__RESTRICT src, uint32_t block_size) +{ + memcpy(dst, src, block_size); +} + +#if defined(ARM_MATH_MVEI) +/** + * @brief Vector saturating doubling high multiply returning high half. + * @param[in] m1 Multiplicand + * @param[in] m2 Multiplier + * @return Result of multiplication. + * + */ +__STATIC_FORCEINLINE int32x4_t arm_doubling_high_mult_mve(const int32x4_t m1, const int32_t m2) +{ + return vqrdmulhq_n_s32(m1, m2); +} + +/** + * @brief Vector rounding divide by power of two. + * @param[in] dividend - Dividend vector + * @param[in] exponent - Divisor = power(2, exponent) + * Range: [0, 31] + * @return Rounded result of division. Midpoint is rounded away from zero. + * + */ +__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve(const int32x4_t dividend, const int32_t exponent) +{ + const int32x4_t shift = vdupq_n_s32(-exponent); + const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31); + const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup); + return vrshlq_s32(fixed_up_dividend, shift); +} + +/** + * @brief Requantize a given vector. + * @param[in] val Vector to be requantized + * @param[in] multiplier multiplier + * @param[in] shift shift + * + * @return Returns (val * multiplier)/(2 ^ shift) with different rounding. See arm_nn_requantize for detatails. + * + */ +__STATIC_FORCEINLINE int32x4_t arm_requantize_mve(const int32x4_t val, const int32_t multiplier, const int32_t shift) +{ + #ifdef CMSIS_NN_USE_SINGLE_ROUNDING + const int right_shift = MIN(-1, shift); + const int left_shift = shift - right_shift; + + const int32x4_t left_shift_dup = vdupq_n_s32(left_shift); + const int32x4_t right_shift_dup = vdupq_n_s32(right_shift); + + int32x4_t result = vqdmulhq_n_s32(vshlq_s32(val, left_shift_dup), multiplier); + result = vrshlq_s32(result, right_shift_dup); + + return result; + #else + return arm_divide_by_power_of_two_mve( + arm_doubling_high_mult_mve(vshlq_s32(val, vdupq_n_s32(LEFT_SHIFT(shift))), multiplier), RIGHT_SHIFT(shift)); + #endif +} + +/** + * @brief Vector saturating doubling high multiply with predication returning high half. + * @param[in] m1 Multiplicand + * @param[in] m2 Multiplier + * @param[in] p Vector predication mask + * @param[in] v_zero Vector of zeroes for merging predication intrinsic + * @return Result of multiplication. + * + */ +__STATIC_FORCEINLINE int32x4_t arm_doubling_high_mult_mve_pred(const int32x4_t m1, + const int32_t m2, + const mve_pred16_t p, + const int32x4_t v_zero) +{ + return vqrdmulhq_m_n_s32(v_zero, m1, m2, p); +} + +/** + * @brief Vector rounding divide by power of two with predication. + * @param[in] dividend - Dividend vector + * @param[in] exponent - Divisor = power(2, exponent) + * Range: [0, 31] + * @param[in] p - Vector predication mask + * @param[in] v_zero - Vector of zeroes for merging predication intrinsic + * @return Rounded result of division. Midpoint is rounded away from zero. + * + */ +__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve_pred(const int32x4_t dividend, + const int32_t exponent, + const mve_pred16_t p, + const int32x4_t v_zero) +{ + const int32x4_t shift = vdupq_x_n_s32(-exponent, p); + const int32x4_t fixup = vshrq_x_n_s32(vandq_x_s32(dividend, shift, p), 31, p); + const int32x4_t fixed_up_dividend = vqaddq_m_s32(v_zero, dividend, fixup, p); + return vrshlq_m_s32(v_zero, fixed_up_dividend, shift, p); +} + +/** + * @brief Requantize a given vector with predication. + * @param[in] val Vector to be requantized + * @param[in] multiplier multiplier + * @param[in] shift shift + * @param[in] p Vector predication mask + * + * @return Returns (val * multiplier)/(2 ^ shift) + * + */ +__STATIC_FORCEINLINE int32x4_t arm_requantize_mve_pred(const int32x4_t val, + const int32_t multiplier, + const int32_t shift, + const mve_pred16_t p) +{ + #ifdef CMSIS_NN_USE_SINGLE_ROUNDING + const int right_shift = MIN(-1, shift); + const int left_shift = shift - right_shift; + const int32x4_t v_zero = vcreateq_s32(0, 0); + + const int32x4_t left_shift_dup = vdupq_x_n_s32(left_shift, p); + const int32x4_t right_shift_dup = vdupq_x_n_s32(right_shift, p); + + int32x4_t result = vqrdmulhq_m_n_s32(v_zero, vshlq_m_s32(v_zero, val, left_shift_dup, p), multiplier, p); + result = vrshlq_m_s32(v_zero, result, right_shift_dup, p); + + return result; + #else + const int32x4_t v_zero = vcreateq_s32(0, 0); + return arm_divide_by_power_of_two_mve_pred( + arm_doubling_high_mult_mve_pred( + vshlq_m_s32(v_zero, val, vdupq_x_n_s32(LEFT_SHIFT(shift), p), p), multiplier, p, v_zero), + RIGHT_SHIFT(shift), + p, + v_zero); + #endif +} + +__STATIC_FORCEINLINE int32x4_t arm_doubling_high_mult_mve_32x4(const int32x4_t m1, const int32x4_t m2) +{ + return vqrdmulhq_s32(m1, m2); +} + +__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve_32x4(const int32x4_t dividend, const int32x4_t exponent) +{ + const int32x4_t shift = -exponent; + const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31); + const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup); + return vrshlq_s32(fixed_up_dividend, shift); +} + +__STATIC_FORCEINLINE int32x4_t arm_requantize_mve_32x4(const int32x4_t val, + const int32x4_t multiplier, + const int32x4_t shift) +{ + #ifdef CMSIS_NN_USE_SINGLE_ROUNDING + const int32x4_t right_shift = vminq_s32(vdupq_n_s32(-1), shift); + const int32x4_t left_shift = vqsubq_s32(shift, right_shift); + + int32x4_t result = vqdmulhq_s32(vshlq_s32(val, left_shift), multiplier); + result = vrshlq_s32(result, right_shift); + + return result; + #else + const int32x4_t zz = vdupq_n_s32(0); + const mve_pred16_t p = vcmpgtq_n_s32(shift, 0); + + const int32x4_t left_shift = vpselq_s32(shift, zz, p); + const int32x4_t right_shift = -vpselq_s32(zz, shift, p); + + return arm_divide_by_power_of_two_mve_32x4(arm_doubling_high_mult_mve_32x4(vshlq_s32(val, left_shift), multiplier), + right_shift); + #endif +} +#endif + +// @note The following functions are used only for softmax layer, scaled bits = 5 assumed + +__STATIC_FORCEINLINE int32_t arm_nn_exp_on_negative_values(int32_t val) +{ + int32_t mask = 0; + int32_t shift = 24; + + const int32_t val_mod_minus_quarter = (val & ((1 << shift) - 1)) - (1 << shift); + const int32_t remainder = val_mod_minus_quarter - val; + const int32_t x = (val_mod_minus_quarter << 5) + (1 << 28); + const int32_t x2 = MUL_SAT(x, x); + + int32_t result = 1895147668 + + MUL_SAT(1895147668, x + DIV_POW2(MUL_SAT(DIV_POW2(MUL_SAT(x2, x2), 2) + MUL_SAT(x2, x), 715827883) + x2, 1)); + +#define SELECT_IF_NON_ZERO(x) \ + { \ + mask = MASK_IF_NON_ZERO(remainder & (1 << shift++)); \ + result = SELECT_USING_MASK(mask, MUL_SAT(result, x), result); \ + } + + SELECT_IF_NON_ZERO(1672461947) + SELECT_IF_NON_ZERO(1302514674) + SELECT_IF_NON_ZERO(790015084) + SELECT_IF_NON_ZERO(290630308) + SELECT_IF_NON_ZERO(39332535) + SELECT_IF_NON_ZERO(720401) + SELECT_IF_NON_ZERO(242) + +#undef SELECT_IF_NON_ZERO + + mask = MASK_IF_ZERO(val); + return SELECT_USING_MASK(mask, NN_Q31_MAX, result); +} + +__STATIC_FORCEINLINE int32_t arm_nn_mult_by_power_of_two(const int32_t val, const int32_t exp) +{ + const int32_t thresh = ((1 << (31 - exp)) - 1); + int32_t result = val << exp; + result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val > thresh), NN_Q31_MAX, result); + result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val < -thresh), NN_Q31_MIN, result); + return result; +} + +__STATIC_FORCEINLINE int32_t arm_nn_one_over_one_plus_x_for_x_in_0_1(int32_t val) +{ + const int64_t sum = (int64_t)val + (int64_t)NN_Q31_MAX; + const int32_t half_denominator = (int32_t)((sum + (sum >= 0 ? 1 : -1)) / 2L); + int32_t x = 1515870810 + MUL_SAT(half_denominator, -1010580540); + + const int32_t shift = (1 << 29); + x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); + x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); + x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); + + return MUL_POW2(x, 1); +} + +/** + @brief Write 2 s16 elements and post increment pointer. + @param[in] dest_q15 Pointer to pointer that holds address of destination. + @param[in] src_q31 Input value to be written. + */ +__STATIC_FORCEINLINE void arm_nn_write_q15x2_ia(int16_t **dest_q15, int32_t src_q31) +{ + int32_t val = src_q31; + + memcpy(*dest_q15, &val, 4); + *dest_q15 += 2; +} + +/** + @brief Write 2 s8 elements and post increment pointer. + @param[in] dst Pointer to pointer that holds address of destination. + @param[in] src Input value to be written. + */ +__STATIC_FORCEINLINE void arm_nn_write_s8x2_ia(int8_t **dst, int16_t src) +{ + memcpy(*dst, &src, 2); + *dst += 2; +} + +// Support functions for LSTM +/** + * @brief Update LSTM function for an iteration step using s8 input and output, and s16 internally. + * + * @param[in] data_in Data input pointer + * @param[in] hidden_in Hidden state/ recurrent input pointer + * @param[out] hidden_out Hidden state/ recurrent output pointer + * @param[in] params Struct containg all information about the lstm operator, see + * arm_nn_types. + * @param[in] buffers Struct containg pointers to all temporary scratch buffers needed for the + * lstm operator, see arm_nn_types. + * @param[in] batch_offset Number of timesteps between consecutive batches. + * E.g for params->timing_major = true, all batches for t=0 are stored sequentially, so batch offset = 1. + * For params->time major = false, all time steps are stored continously before the next batch, so + * batch offset = params->time_steps. + * @return The function returns ARM_CMSIS_NN_SUCCESS + + */ +arm_cmsis_nn_status arm_nn_lstm_step_s8(const int8_t *data_in, + const int8_t *hidden_in, + int8_t *hidden_out, + const cmsis_nn_lstm_params *params, + cmsis_nn_lstm_context *buffers, + const int32_t batch_offset); + +/** + * @brief Update LSTM function for an iteration step using s16 input and output, and s16 internally. + * + * @param[in] data_in Data input pointer + * @param[in] hidden_in Hidden state/ recurrent input pointer + * @param[out] hidden_out Hidden state/ recurrent output pointer + * @param[in] params Struct containg all information about the lstm operator, see + * arm_nn_types. + * @param[in] buffers Struct containg pointers to all temporary scratch buffers needed for the + * lstm operator, see arm_nn_types. + * @param[in] batch_offset Number of timesteps between consecutive batches. + * E.g for params->timing_major = true, all batches for t=0 are stored sequentially, so batch offset = 1. + * For params->time major = false, all time steps are stored continously before the next batch, so + * batch offset = params->time_steps. + * @return The function returns ARM_CMSIS_NN_SUCCESS + + */ +arm_cmsis_nn_status arm_nn_lstm_step_s16(const int16_t *data_in, + const int16_t *hidden_in, + int16_t *hidden_out, + const cmsis_nn_lstm_params *params, + cmsis_nn_lstm_context *buffers, + const int32_t batch_offset); + +/** + * @brief Updates a LSTM gate for an iteration step of LSTM function, int8x8_16 version. + * + * @param[in] data_in Data input pointer + * @param[in] hidden_in Hidden state/ recurrent input pointer + * @param[in] gate_data Struct containing all information about the gate caluclation, see + * arm_nn_types. + * @param[in] params Struct containing all information about the lstm_operation, see + * arm_nn_types + * @param[out] output Hidden state/ recurrent output pointer + * @param[in] batch_offset Number of timesteps between consecutive batches, see + * arm_nn_lstm_step_s8. + * @return The function returns ARM_CMSIS_NN_SUCCESS + */ +arm_cmsis_nn_status arm_nn_lstm_calculate_gate_s8_s16(const int8_t *data_in, + const int8_t *hidden_in, + const cmsis_nn_lstm_gate *gate_data, + const cmsis_nn_lstm_params *params, + int16_t *output, + const int32_t batch_offset); + +/** + * @brief Updates a LSTM gate for an iteration step of LSTM function, int16x8_16 version. + * + * @param[in] data_in Data input pointer + * @param[in] hidden_in Hidden state/ recurrent input pointer + * @param[in] gate_data Struct containing all information about the gate caluclation, see + * arm_nn_types. + * @param[in] params Struct containing all information about the lstm_operation, see + * arm_nn_types + * @param[out] output Hidden state/ recurrent output pointer + * @param[in] batch_offset Number of timesteps between consecutive batches, see + * arm_nn_lstm_step_s16. + * @return The function returns ARM_CMSIS_NN_SUCCESS + */ +arm_cmsis_nn_status arm_nn_lstm_calculate_gate_s16(const int16_t *data_in, + const int16_t *hidden_in, + const cmsis_nn_lstm_gate *gate_data, + const cmsis_nn_lstm_params *params, + int16_t *output, + const int32_t batch_offset); + +/** + * @brief The result of the multiplication is accumulated to the passed result buffer. + * Multiplies a matrix by a "batched" vector (i.e. a matrix with a batch dimension composed by input vectors independent + * from each other). + * + * @param[in] lhs Batched vector + * @param[in] rhs Weights - input matrix (H(Rows)xW(Columns)) + * @param[in] effective_bias Bias + lhs_offset * kernel_sum term precalculated into a constant vector. + * @param[out] dst Output + * @param[in] dst_multiplier Multiplier for quantization + * @param[in] dst_shift Shift for quantization + * @param[in] rhs_cols Vector/matarix column length + * @param[in] rhs_rows Row count of matrix + * @param[in] batches Batch size + * @param[in] batch_offset Number of timesteps between consecutive batches in input, see arm_nn_lstm_step_s8. Note + that the output is always stored with sequential batches. + * @return The function returns ARM_CMSIS_NN_SUCCESS + + */ +arm_cmsis_nn_status arm_nn_vec_mat_mul_result_acc_s8_s16(const int8_t *lhs, + const int8_t *rhs, + const int32_t *effective_bias, + int16_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t batches, + const int32_t batch_offset); + +/** + * @brief The result of the multiplication is accumulated to the passed result buffer. + * Multiplies a matrix by a "batched" vector (i.e. a matrix with a batch dimension composed by input vectors independent + * from each other). + * + * @param[in] lhs Batched vector + * @param[in] rhs Weights - input matrix (H(Rows)xW(Columns)) + * @param[in] effective_bias Bias + lhs_offset * kernel_sum term precalculated into a constant vector. + * @param[out] dst Output + * @param[in] dst_multiplier Multiplier for quantization + * @param[in] dst_shift Shift for quantization + * @param[in] rhs_cols Vector/matarix column length + * @param[in] rhs_rows Row count of matrix + * @param[in] batches Batch size + * @param[in] batch_offset Number of timesteps between consecutive batches in input, see arm_nn_lstm_step_s16. + Note that the output is always stored with sequential batches. + * @return The function returns ARM_CMSIS_NN_SUCCESS + + */ +arm_cmsis_nn_status arm_nn_vec_mat_mul_result_acc_s16(const int16_t *lhs, + const int8_t *rhs, + const int64_t *effective_bias, + int16_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t batches, + const int32_t batch_offset); + +/** + * @brief s16 elementwise multiplication with s8 output + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] block_size number of samples per batch + * @param[in] batch_size number of samples per batch + * @param[in] batch_offset Number of timesteps between consecutive batches in output, see + * arm_nn_lstm_step_s8. Note that it is assumed that the input is stored with sequential batches. + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_elementwise_mul_s16_s8(const int16_t *input_1_vect, + const int16_t *input_2_vect, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t block_size, + const int32_t batch_size, + const int32_t batch_offset); + +/** + * @brief s16 elementwise multiplication with s16 output + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] block_size number of samples per batch + * @param[in] batch_size number of samples per batch + * @param[in] batch_offset Number of timesteps between consecutive batches in output, see + * arm_nn_lstm_step_s16. Note that it is assumed that the input is stored with sequential batches. + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_elementwise_mul_s16_batch_offset(const int16_t *input_1_vect, + const int16_t *input_2_vect, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t block_size, + const int32_t batch_size, + const int32_t batch_offset); + +/** + * @brief s16 elementwise multiplication. The result of the multiplication is accumulated to the passed result buffer. + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Not used. + * @param[in] input_2_offset offset for input 2. Not used. + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Not used. + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -32768 + * @param[in] out_activation_max maximum value to clamp output to. Max: 32767 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_elementwise_mul_acc_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @brief Check if a broadcast is required between 2 cmsis_nn_dims. + * @param[in] shape_1 pointer to input tensor 1 + * @param[in] shape_2 pointer to input tensor 2 + * @return The function returns 1 if a broadcast is required, or 0 if not. + * + * @details Compares each dimension and returns 1 if any dimension does not match. + * This function does not check that broadcast rules are met. + */ +__STATIC_FORCEINLINE int32_t arm_check_broadcast_required(const cmsis_nn_dims *shape_1, const cmsis_nn_dims *shape_2) +{ + if ((shape_1->n != shape_2->n) || (shape_1->h != shape_2->h) || (shape_1->w != shape_2->w) || + (shape_1->c != shape_2->c)) + { + return 1; + } + + return 0; +} + +#ifdef __cplusplus +} +#endif + +#endif /* ARM_NNSUPPORTFUNCTIONS_H */ diff --git a/src/tensorflow/LICENSE b/src/third_party/cmsis_nn/LICENSE similarity index 99% rename from src/tensorflow/LICENSE rename to src/third_party/cmsis_nn/LICENSE index 40f8c34..8dada3e 100644 --- a/src/tensorflow/LICENSE +++ b/src/third_party/cmsis_nn/LICENSE @@ -1,5 +1,3 @@ -Copyright 2019 The TensorFlow Authors. All rights reserved. - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -180,7 +178,7 @@ Copyright 2019 The TensorFlow Authors. All rights reserved. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -188,7 +186,7 @@ Copyright 2019 The TensorFlow Authors. All rights reserved. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_nn_activation_s16.c b/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_nn_activation_s16.c new file mode 100644 index 0000000..28d4ad7 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_nn_activation_s16.c @@ -0,0 +1,121 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022, 2024 Arm Limited and/or its affiliates + * + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_activation_s16.c + * Description: Q15 neural network activation function using direct table look-up + * + * $Date: 19 January 2024 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nn_tables.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup Acti + * @{ + */ + +/* + * @brief Neural network activation function using direct table look-up + * + * @note Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_nn_activation_s16(const int16_t *input, + int16_t *output, + const int32_t size, + const int32_t left_shift, + const arm_nn_activation_type type) +{ + uint32_t abs_input_shift, max_saturation; + switch (type) + { + case ARM_SIGMOID: + abs_input_shift = 9; + max_saturation = 0x7FFF << 10; + break; + case ARM_TANH: + default: + abs_input_shift = 8; + max_saturation = 0xFFFF << 8; + break; + } + + const int32_t input_multiplier = (left_shift < 0) ? 3 : 3 << left_shift; + const int32_t abs_left_shift = (left_shift < 0) ? -left_shift : 0; + const int32_t rounding = (abs_left_shift > 0) ? 1 << (abs_left_shift - 1) : 0; + // Use the LUT for sigmoid and take into account, that + // tanh(x) = 2*sigmoid(2*x) - 1 + + for (int i = 0; i < size; ++i, input++, output++) + { + const int32_t input_data = ((*input) * input_multiplier + rounding) >> abs_left_shift; + const uint32_t abs_input_data = input_data > 0 ? input_data : -input_data; + const uint32_t uh = abs_input_data >> abs_input_shift; + uint32_t result; + + if (uh >= 255) + { + result = max_saturation; + } + else + { + const uint32_t ua = sigmoid_table_uint16[uh]; + const uint32_t ub = sigmoid_table_uint16[uh + 1]; + uint32_t ut; + if (type == ARM_SIGMOID) + { + ut = abs_input_data & 0x1ff; + } + else + { + ut = abs_input_data & 0x0ff; + } + result = (ua << abs_input_shift) + ut * (ub - ua); + } + if (type == ARM_SIGMOID) + { + result = (input_data >= 0) ? (result + (1 << 9)) : ((1 << 25) - result + (1 << 9) - 1); + result >>= 10; + } + else + { + result = (input_data >= 0) ? (result - (1 << 23)) + (1 << 7) : ((-result + (1 << 23)) + (1 << 7) - 1); + result >>= 8; + } + *output = (int16_t)result; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Acti group + */ diff --git a/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu6_s8.c b/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu6_s8.c new file mode 100644 index 0000000..a9ecb12 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu6_s8.c @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2019, 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_relu6_s8.c + * Description: Basic s8 version of ReLU6 + * + * $Date: 26 October 2022 + * $Revision: V.1.0.2 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup Acti + * @{ + */ + +/* + * Basic ReLU6 function + * + * Refer to header file for details. + * + */ + +void arm_relu6_s8(int8_t *data, uint16_t size) +{ + int32_t i; + + for (i = 0; i < size; i++) + { + int32_t ip = data[i]; + + ip = MAX(ip, 0); + data[i] = MIN(ip, 6); + } +} + +/** + * @} end of Acti group + */ diff --git a/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu_q15.c b/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu_q15.c new file mode 100644 index 0000000..d079167 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu_q15.c @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_relu_q15.c + * Description: Q15 version of ReLU + * + * $Date: 31 January 2023 + * $Revision: V.1.1.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup Acti + * @{ + */ + +/* + * Q15 ReLu function + * + * Refer header file for details. + * + */ + +void arm_relu_q15(int16_t *data, uint16_t size) +{ + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + /* Run the following code for M cores with DSP extension */ + + uint16_t i = size >> 1; + int16_t *input = data; + int16_t *output = data; + int32_t in; + int32_t buf; + int32_t mask; + + while (i) + { + in = arm_nn_read_q15x2_ia((const int16_t **)&input); + + /* extract the first bit */ + buf = ROR(in & 0x80008000, 15); + + /* if MSB=1, mask will be 0xFF, 0x0 otherwise */ + mask = QSUB16(0x00000000, buf); + + arm_nn_write_q15x2_ia(&output, in & (~mask)); + i--; + } + + if (size & 0x1) + { + if (*input < 0) + { + *input = 0; + } + input++; + } +#else + /* Run the following code as reference implementation for M cores without DSP extension */ + uint16_t i; + + for (i = 0; i < size; i++) + { + if (data[i] < 0) + data[i] = 0; + } + +#endif /* ARM_MATH_DSP */ +} + +/** + * @} end of Acti group + */ diff --git a/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu_q7.c b/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu_q7.c new file mode 100644 index 0000000..58d2284 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ActivationFunctions/arm_relu_q7.c @@ -0,0 +1,105 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_relu_q7.c + * Description: Q7 version of ReLU + * + * $Date: 31 January 2023 + * $Revision: V.1.2.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup Acti + * @{ + */ + +/* + * Q7 ReLu function + * + * Refer header file for details. + * + */ + +void arm_relu_q7(int8_t *data, uint16_t size) +{ + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + /* Run the following code for M cores with DSP extension */ + + uint16_t i = size >> 2; + int8_t *input = data; + int8_t *output = data; + int32_t in; + int32_t buf; + int32_t mask; + + while (i) + { + in = arm_nn_read_s8x4_ia((const int8_t **)&input); + + /* extract the first bit */ + buf = (int32_t)ROR((uint32_t)in & 0x80808080, 7); + + /* if MSB=1, mask will be 0xFF, 0x0 otherwise */ + mask = QSUB8(0x00000000, buf); + + arm_nn_write_s8x4_ia(&output, in & (~mask)); + + i--; + } + + i = size & 0x3; + while (i) + { + if (*input < 0) + { + *input = 0; + } + input++; + i--; + } + +#else + /* Run the following code as reference implementation for cores without DSP extension */ + + uint16_t i; + + for (i = 0; i < size; i++) + { + if (data[i] < 0) + data[i] = 0; + } + +#endif +} + +/** + * @} end of Acti group + */ diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_add_s16.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_add_s16.c new file mode 100644 index 0000000..7e3dce0 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_add_s16.c @@ -0,0 +1,170 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_add_s16 + * Description: Elementwise add + * + * $Date: 24 Oct 2022 + * $Revision: V.2.2.0 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup groupElementwise + * @{ + */ + +/* + * s16 elementwise add + * + * Refer header file for details. + * + */ + +/* Note: __SHIFT is expected to be <=0 */ + +arm_cmsis_nn_status arm_elementwise_add_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) +{ + (void)input_1_offset; + (void)input_2_offset; + (void)out_offset; + +#if defined(ARM_MATH_MVEI) + + int32_t count = block_size; + + while (count > 0) + { + + mve_pred16_t pred = vctp32q(count); + + int32x4_t vect_1 = vldrhq_z_s32(input_1_vect, pred); + int32x4_t vect_2 = vldrhq_z_s32(input_2_vect, pred); + + vect_1 = vshlq_r_s32(vect_1, left_shift); + vect_2 = vshlq_r_s32(vect_2, left_shift); + + vect_1 = arm_requantize_mve(vect_1, input_1_mult, input_1_shift); + vect_2 = arm_requantize_mve(vect_2, input_2_mult, input_2_shift); + + vect_1 = vaddq_s32(vect_1, vect_2); + vect_1 = arm_requantize_mve(vect_1, out_mult, out_shift); + + vect_1 = vmaxq_s32(vect_1, vdupq_n_s32(out_activation_min)); + vect_1 = vminq_s32(vect_1, vdupq_n_s32(out_activation_max)); + + input_1_vect += 4; + input_2_vect += 4; + + vstrhq_p_s32(output, vect_1, pred); + + output += 4; + count -= 4; + } + +#else // #if defined(ARM_MATH_MVEI) + int32_t input_1; + int32_t input_2; + int32_t sum; + int32_t two_halfword_1, two_halfword_2; + int16_t sum_1, sum_2; + int32_t loop_count = block_size / 2; + while (loop_count > 0) + { + two_halfword_1 = arm_nn_read_q15x2_ia(&input_1_vect); + two_halfword_2 = arm_nn_read_q15x2_ia(&input_2_vect); + + input_1 = (int16_t)(two_halfword_1 & 0xFFFF) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + input_2 = (int16_t)(two_halfword_2 & 0xFFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + sum_1 = (int16_t)sum; + + input_1 = (int16_t)(two_halfword_1 >> 16) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + input_2 = (int16_t)(two_halfword_2 >> 16) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + sum_2 = (int16_t)sum; + + arm_nn_write_q15x2_ia(&output, PACK_Q15x2_32x1(sum_1, sum_2)); + + loop_count--; + } + loop_count = block_size & 0x1; + + while (loop_count > 0) + { + /* C = A + B */ + input_1 = *input_1_vect++ << left_shift; + input_2 = *input_2_vect++ << left_shift; + + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + + *output++ = (int16_t)sum; + + /* Decrement loop counter */ + loop_count--; + } +#endif // #if defined(ARM_MATH_MVEI) + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of Doxygen group + */ \ No newline at end of file diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_add_s8.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_add_s8.c new file mode 100644 index 0000000..e2d895b --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_add_s8.c @@ -0,0 +1,234 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_add_s8 + * Description: Elementwise add + * + * $Date: 5 January 2023 + * $Revision: V.3.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup groupElementwise + * @{ + */ + +/* + * s8 elementwise add + * + * Refer header file for details. + * + */ + +/* Note: __SHIFT is expected to be <=0 */ + +arm_cmsis_nn_status arm_elementwise_add_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) +{ +#if defined(ARM_MATH_MVEI) + int32_t count = block_size; + + while (count > 0) + { + int32x4_t vect_1; + int32x4_t vect_2; + + mve_pred16_t p = vctp32q((uint32_t)count); + + vect_1 = vldrbq_z_s32(input_1_vect, p); + vect_2 = vldrbq_z_s32(input_2_vect, p); + + vect_1 = vaddq_s32(vect_1, vdupq_n_s32(input_1_offset)); + vect_2 = vaddq_s32(vect_2, vdupq_n_s32(input_2_offset)); + + vect_1 = vshlq_r_s32(vect_1, left_shift); + vect_2 = vshlq_r_s32(vect_2, left_shift); + + vect_1 = arm_requantize_mve(vect_1, input_1_mult, input_1_shift); + vect_2 = arm_requantize_mve(vect_2, input_2_mult, input_2_shift); + + vect_1 = vaddq_s32(vect_1, vect_2); + vect_1 = arm_requantize_mve(vect_1, out_mult, out_shift); + + vect_1 = vaddq_n_s32(vect_1, out_offset); + + vect_1 = vmaxq_s32(vect_1, vdupq_n_s32(out_activation_min)); + vect_1 = vminq_s32(vect_1, vdupq_n_s32(out_activation_max)); + + input_1_vect += 4; + input_2_vect += 4; + vstrbq_p_s32(output, vect_1, p); + + output += 4; + count -= 4; + } +#else + int32_t loop_count; + int32_t input_1; + int32_t input_2; + int32_t sum; + + #if defined(ARM_MATH_DSP) + int32_t a_1, b_1, a_2, b_2; + + int32_t offset_1_packed, offset_2_packed; + + int8_t r1, r2, r3, r4; + + offset_1_packed = (input_1_offset << 16U) | (input_1_offset & 0x0FFFFL); + offset_2_packed = (input_2_offset << 16U) | (input_2_offset & 0x0FFFFL); + + loop_count = block_size >> 2; + + while (loop_count > 0) + { + /* 4 outputs are calculated in one loop. The order of calculation is follows the order of output sign extension + intrinsic */ + input_1_vect = read_and_pad_reordered(input_1_vect, &b_1, &a_1); + input_2_vect = read_and_pad_reordered(input_2_vect, &b_2, &a_2); + + a_1 = SADD16(a_1, offset_1_packed); + b_1 = SADD16(b_1, offset_1_packed); + + a_2 = SADD16(a_2, offset_2_packed); + b_2 = SADD16(b_2, offset_2_packed); + + /* Sum 1 */ + input_1 = (b_1 & 0x0FFFF) << left_shift; + + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + + input_2 = (b_2 & 0x0FFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum += out_offset; + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + r1 = (int8_t)sum; + + /* Sum 3 */ + input_1 = ((b_1 >> 16) & 0x0FFFF) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + + input_2 = ((b_2 >> 16) & 0x0FFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum += out_offset; + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + r3 = (int8_t)sum; + + /* Sum 2 */ + input_1 = (a_1 & 0x0FFFF) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + + input_2 = (a_2 & 0x0FFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum += out_offset; + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + r2 = (int8_t)sum; + + /* Sum 4 */ + input_1 = ((a_1 >> 16) & 0x0FFFF) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + + input_2 = ((a_2 >> 16) & 0x0FFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum += out_offset; + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + r4 = (int8_t)sum; + + arm_nn_write_s8x4_ia(&output, PACK_S8x4_32x1(r1, r2, r3, r4)); + + loop_count--; + } + + loop_count = block_size & 0x3; + #else + loop_count = block_size; + #endif + + while (loop_count > 0) + { + /* C = A + B */ + + input_1 = (*input_1_vect++ + input_1_offset) << left_shift; + input_2 = (*input_2_vect++ + input_2_offset) << left_shift; + + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum += out_offset; + + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + + *output++ = (int8_t)sum; + + /* Decrement loop counter */ + loop_count--; + } + +#endif /* ARM_MATH_MVEI */ + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_acc_s16.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_acc_s16.c new file mode 100644 index 0000000..01cd83f --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_acc_s16.c @@ -0,0 +1,167 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022, 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_mul_acc_s16 + * Description: Accumulative element wise multiplication + * + * $Date: 19 January 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup groupElementwise + * @{ + */ + +/** + * @brief s16 element wise accumulative multiplication of two vectors + * + * @note Refer header file for details. + * + */ +arm_cmsis_nn_status arm_elementwise_mul_acc_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) +{ + (void)input_1_offset; + (void)input_2_offset; + (void)out_offset; + int32_t loop_count; + + const int32_t activation_max = (out_activation_max > 0) ? out_activation_max : NN_Q15_MAX; + const int32_t activation_min = (out_activation_max > 0) ? out_activation_min : NN_Q15_MIN; + +#if defined(ARM_MATH_MVEI) + + loop_count = block_size; + + while (loop_count > 0) + { + mve_pred16_t pred = vctp32q(loop_count); + + int32x4_t input_1 = vldrhq_z_s32(input_1_vect, pred); + int32x4_t input_2 = vldrhq_z_s32(input_2_vect, pred); + + int32x4_t res_0 = vmulq_s32(input_1, input_2); + + res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); + + res_0 = vaddq_s32(res_0, vldrhq_z_s32(output, pred)); + + res_0 = vmaxq_s32(res_0, vdupq_n_s32(activation_min)); + res_0 = vminq_s32(res_0, vdupq_n_s32(activation_max)); + + vstrhq_p_s32(output, res_0, pred); + input_1_vect += 4; + input_2_vect += 4; + + output += 4; + loop_count -= 4; + } + +#else + int32_t input_1; + int32_t input_2; + int32_t mul_res; + int32_t two_halfword_1, two_halfword_2; + int16_t mul_1, mul_2; + loop_count = block_size / 2; + + while (loop_count > 0) + { + two_halfword_1 = arm_nn_read_q15x2_ia(&input_1_vect); + two_halfword_2 = arm_nn_read_q15x2_ia(&input_2_vect); + + #if defined(ARM_MATH_DSP) + mul_res = SMULBB(two_halfword_1, two_halfword_2); + #else + input_1 = (int16_t)(two_halfword_1 & 0xFFFF); + input_2 = (int16_t)(two_halfword_2 & 0xFFFF); + mul_res = input_1 * input_2; + #endif + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + mul_res += output[0]; + + mul_res = MAX(mul_res, activation_min); + mul_res = MIN(mul_res, activation_max); + mul_1 = (int16_t)mul_res; + + #if defined(ARM_MATH_DSP) + mul_res = SMULTT(two_halfword_1, two_halfword_2); + #else + input_1 = (int16_t)(two_halfword_1 >> 16); + input_2 = (int16_t)(two_halfword_2 >> 16); + mul_res = input_1 * input_2; + #endif + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + mul_res += output[1]; + mul_res = MAX(mul_res, activation_min); + mul_res = MIN(mul_res, activation_max); + mul_2 = (int16_t)mul_res; + + arm_nn_write_q15x2_ia(&output, PACK_Q15x2_32x1(mul_1, mul_2)); + + loop_count--; + } + loop_count = block_size & 0x1; + + while (loop_count > 0) + { + + input_1 = *input_1_vect++; + input_2 = *input_2_vect++; + + mul_res = input_1 * input_2; + + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + mul_res += output[0]; + + mul_res = MAX(mul_res, activation_min); + mul_res = MIN(mul_res, activation_max); + + *output++ = (int16_t)mul_res; + + loop_count--; + } +#endif // #if defined(ARM_MATH_MVEI) + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16.c new file mode 100644 index 0000000..7315b9c --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16.c @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_mul_s16 + * Description: Element wise multiplication + * + * $Date: 20 January 2023 + * $Revision: V.2.4.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup groupElementwise + * @{ + */ + +/** + * @brief s16 element wise multiplication of two vectors + * + * @note Refer header file for details. + * + */ +arm_cmsis_nn_status arm_elementwise_mul_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) +{ + (void)input_1_offset; + (void)input_2_offset; + (void)out_offset; + int32_t loop_count; + +#if defined(ARM_MATH_MVEI) + + loop_count = block_size; + + while (loop_count > 0) + { + mve_pred16_t pred = vctp32q(loop_count); + + int32x4_t input_1 = vldrhq_z_s32(input_1_vect, pred); + int32x4_t input_2 = vldrhq_z_s32(input_2_vect, pred); + + int32x4_t res_0 = vmulq_s32(input_1, input_2); + + res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); + + res_0 = vmaxq_s32(res_0, vdupq_n_s32(out_activation_min)); + res_0 = vminq_s32(res_0, vdupq_n_s32(out_activation_max)); + + vstrhq_p_s32(output, res_0, pred); + input_1_vect += 4; + input_2_vect += 4; + + output += 4; + loop_count -= 4; + } + +#else + int32_t input_1; + int32_t input_2; + int32_t mul_res; + int32_t two_halfword_1, two_halfword_2; + int16_t mul_1, mul_2; + loop_count = block_size / 2; + + while (loop_count > 0) + { + two_halfword_1 = arm_nn_read_q15x2_ia(&input_1_vect); + two_halfword_2 = arm_nn_read_q15x2_ia(&input_2_vect); + + #if defined(ARM_MATH_DSP) + mul_res = SMULBB(two_halfword_1, two_halfword_2); + #else + input_1 = (int16_t)(two_halfword_1 & 0xFFFF); + input_2 = (int16_t)(two_halfword_2 & 0xFFFF); + mul_res = input_1 * input_2; + #endif + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + mul_1 = (int16_t)mul_res; + + #if defined(ARM_MATH_DSP) + mul_res = SMULTT(two_halfword_1, two_halfword_2); + #else + input_1 = (int16_t)(two_halfword_1 >> 16); + input_2 = (int16_t)(two_halfword_2 >> 16); + mul_res = input_1 * input_2; + #endif + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + mul_2 = (int16_t)mul_res; + + arm_nn_write_q15x2_ia(&output, PACK_Q15x2_32x1(mul_1, mul_2)); + + loop_count--; + } + loop_count = block_size & 0x1; + + while (loop_count > 0) + { + /* C = A * B */ + + input_1 = *input_1_vect++; + input_2 = *input_2_vect++; + + mul_res = input_1 * input_2; + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + + *output++ = (int16_t)mul_res; + + /* Decrement loop counter */ + loop_count--; + } +#endif // #if defined(ARM_MATH_MVEI) + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16_batch_offset.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16_batch_offset.c new file mode 100644 index 0000000..229a415 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16_batch_offset.c @@ -0,0 +1,166 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_mul_s16_batch_offset + * Description: Element wise multiplication + * + * $Date: 18 March 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup groupElementwise + * @{ + */ + +/** + * @brief s16 element wise multiplication of batches of two vectors + * + * @note Refer header file for details. + * + */ +arm_cmsis_nn_status arm_elementwise_mul_s16_batch_offset(const int16_t *input_1_vect, + const int16_t *input_2_vect, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t block_size, + const int32_t batch_size, + const int32_t batch_offset) +{ + + int32_t loop_count; + + for (int i = 0; i < batch_size; i++) + { + +#if defined(ARM_MATH_MVEI) + + const int16_t *input_1_ptr = input_1_vect; + const int16_t *input_2_ptr = input_2_vect; + int16_t *output_ptr = output; + + loop_count = block_size; + + while (loop_count > 0) + { + mve_pred16_t pred = vctp32q(loop_count); + + int32x4_t input_1 = vldrhq_z_s32(input_1_ptr, pred); + int32x4_t input_2 = vldrhq_z_s32(input_2_ptr, pred); + + int32x4_t res_0 = vmulq_s32(input_1, input_2); + + res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); + res_0 = vaddq_n_s32(res_0, out_offset); + + res_0 = vmaxq_s32(res_0, vdupq_n_s32(NN_Q15_MIN)); + res_0 = vminq_s32(res_0, vdupq_n_s32(NN_Q15_MAX)); + + vstrhq_p_s32(output_ptr, res_0, pred); + input_1_ptr += 4; + input_2_ptr += 4; + + output_ptr += 4; + loop_count -= 4; + } + + input_1_vect += block_size; + input_2_vect += block_size; + output += block_size; + +#else + int32_t input_1; + int32_t input_2; + int32_t mul_res; + int32_t two_halfword_1, two_halfword_2; + int16_t mul_1, mul_2; + loop_count = block_size / 2; + + while (loop_count > 0) + { + two_halfword_1 = arm_nn_read_q15x2_ia(&input_1_vect); + two_halfword_2 = arm_nn_read_q15x2_ia(&input_2_vect); + + #if defined(ARM_MATH_DSP) + mul_res = SMULBB(two_halfword_1, two_halfword_2); + #else + input_1 = (int16_t)(two_halfword_1 & 0xFFFF); + input_2 = (int16_t)(two_halfword_2 & 0xFFFF); + mul_res = input_1 * input_2; + #endif + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + mul_res = MAX(mul_res, NN_Q15_MIN); + mul_res = MIN(mul_res, NN_Q15_MAX); + mul_1 = (int16_t)mul_res; + + #if defined(ARM_MATH_DSP) + mul_res = SMULTT(two_halfword_1, two_halfword_2); + #else + input_1 = (int16_t)(two_halfword_1 >> 16); + input_2 = (int16_t)(two_halfword_2 >> 16); + mul_res = input_1 * input_2; + #endif + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + mul_res = MAX(mul_res, NN_Q15_MIN); + mul_res = MIN(mul_res, NN_Q15_MAX); + mul_2 = (int16_t)mul_res; + + arm_nn_write_q15x2_ia(&output, PACK_Q15x2_32x1(mul_1, mul_2)); + + loop_count--; + } + + if (block_size & 0x1) + { + /* C = A * B */ + + input_1 = *input_1_vect++; + input_2 = *input_2_vect++; + + mul_res = input_1 * input_2; + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + + mul_res = MAX(mul_res, NN_Q15_MIN); + mul_res = MIN(mul_res, NN_Q15_MAX); + + *output++ = (int16_t)mul_res; + } +#endif // #if defined(ARM_MATH_MVEI) + + output += (batch_offset - 1) * block_size; + } + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16_s8.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16_s8.c new file mode 100644 index 0000000..83a56b6 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s16_s8.c @@ -0,0 +1,136 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_mul_s16_s8.c + * Description: Elementwise multiplication of 16 bit input with 8 bit output + * + * $Date: 20 January 2023 + * $Revision: V.2.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup BasicMath + * @{ + */ + +/* + * s16 elementwise multiplication with s8 output + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_elementwise_mul_s16_s8(const int16_t *input_1_vect, + const int16_t *input_2_vect, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t block_size, + const int32_t batch_size, + const int32_t batch_offset) +{ + + for (int i = 0; i < batch_size; i++) + { + int32_t loop_count = block_size; +#if defined(ARM_MATH_MVEI) + + const int16_t *input_1_ptr = input_1_vect; + const int16_t *input_2_ptr = input_2_vect; + int8_t *output_ptr = output; + + while (loop_count > 0) + { + mve_pred16_t pred = vctp32q(loop_count); + + int32x4_t input_1 = vldrhq_z_s32(input_1_ptr, pred); + int32x4_t input_2 = vldrhq_z_s32(input_2_ptr, pred); + + int32x4_t res_0 = vmulq_s32(input_1, input_2); + + res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); + res_0 = vaddq_n_s32(res_0, out_offset); + + res_0 = vmaxq_s32(res_0, vdupq_n_s32(NN_Q7_MIN)); + res_0 = vminq_s32(res_0, vdupq_n_s32(NN_Q7_MAX)); + + vstrbq_p_s32(output_ptr, res_0, pred); + input_1_ptr += 4; + input_2_ptr += 4; + + output_ptr += 4; + loop_count -= 4; + } + + input_1_vect += block_size; + input_2_vect += block_size; + output += block_size; + +#else + #if defined(ARM_MATH_DSP) + + while (loop_count > 1) + { + int32_t input_1 = arm_nn_read_q15x2_ia(&input_1_vect); + int32_t input_2 = arm_nn_read_q15x2_ia(&input_2_vect); + + int32_t mul_res = SMULBB(input_1, input_2); + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + mul_res = CLAMP(mul_res, NN_Q7_MAX, NN_Q7_MIN); + int32_t mul = (int16_t)(mul_res & 0xFF); + + mul_res = SMULTT(input_1, input_2); + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + mul_res = CLAMP(mul_res, NN_Q7_MAX, NN_Q7_MIN); + mul |= (int16_t)mul_res << 8; + + arm_nn_write_s8x2_ia(&output, mul); + loop_count -= 2; + } + #endif + for (int j = 0; j < loop_count; j++, input_1_vect++, input_2_vect++, output++) + { + /* C = A * B */ + int32_t mul_res = (*input_1_vect) * (*input_2_vect); + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + + mul_res = CLAMP(mul_res, NN_Q7_MAX, NN_Q7_MIN); + + *output = (int8_t)mul_res; + } + +#endif + + output += (batch_offset - 1) * block_size; + } + return ARM_CMSIS_NN_SUCCESS; +} +/** + * @} end of BasicMath group + */ diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s8.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s8.c new file mode 100644 index 0000000..484f214 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_elementwise_mul_s8.c @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_mul_s8 + * Description: Element wise multiplication + * + * $Date: 20 January 2023 + * $Revision: V.2.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup groupElementwise + * @{ + */ + +/* + * s8 element wise multiplication of two vectors + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_elementwise_mul_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) +{ + + int32_t loop_count; +#if defined(ARM_MATH_MVEI) + + loop_count = (block_size + 3) / 4; + uint32_t num_elements = block_size; + + for (int i = 0; i < loop_count; i++) + { + mve_pred16_t p = vctp32q(num_elements); + + int32x4_t input_1 = vldrbq_z_s32(input_1_vect, p); + input_1 = vaddq_n_s32(input_1, input_1_offset); + + int32x4_t input_2 = vldrbq_z_s32(input_2_vect, p); + input_2 = vaddq_n_s32(input_2, input_2_offset); + + int32x4_t res_0 = vmulq_s32(input_1, input_2); + + res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); + + res_0 += vdupq_n_s32(out_offset); + + res_0 = vmaxq_s32(res_0, vdupq_n_s32(out_activation_min)); + res_0 = vminq_s32(res_0, vdupq_n_s32(out_activation_max)); + + vstrbq_p_s32(output, res_0, p); + input_1_vect += 4; + input_2_vect += 4; + output += 4; + num_elements -= 4; + } + +#else + int32_t input_1; + int32_t input_2; + int32_t mul_res; + + #if defined(ARM_MATH_DSP) + int32_t a_1, b_1, a_2, b_2; + + int32_t offset_1_packed, offset_2_packed; + + int8_t r1, r2, r3, r4; + + offset_1_packed = (input_1_offset << 16U) | (input_1_offset & 0x0FFFFL); + offset_2_packed = (input_2_offset << 16U) | (input_2_offset & 0x0FFFFL); + + loop_count = block_size >> 2; + + while (loop_count > 0) + { + /* 4 outputs are calculated in one loop. The order of calculation is follows the order of output sign extension + intrinsic */ + input_1_vect = read_and_pad_reordered(input_1_vect, &b_1, &a_1); + input_2_vect = read_and_pad_reordered(input_2_vect, &b_2, &a_2); + + a_1 = SADD16(a_1, offset_1_packed); + b_1 = SADD16(b_1, offset_1_packed); + + a_2 = SADD16(a_2, offset_2_packed); + b_2 = SADD16(b_2, offset_2_packed); + + /* Mul 1 */ + mul_res = SMULBB(b_1, b_2); + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + r1 = (int8_t)mul_res; + + /* Mul 3 */ + mul_res = SMULTT(b_1, b_2); + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + r3 = (int8_t)mul_res; + + /* Mul 2 */ + mul_res = SMULBB(a_1, a_2); + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + r2 = (int8_t)mul_res; + + /* Mul 4 */ + mul_res = SMULTT(a_1, a_2); + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + r4 = (int8_t)mul_res; + + arm_nn_write_s8x4_ia(&output, PACK_S8x4_32x1(r1, r2, r3, r4)); + + loop_count--; + } + + loop_count = block_size & 0x3; + #else + loop_count = block_size; + #endif + + while (loop_count > 0) + { + /* C = A * B */ + + input_1 = *input_1_vect++ + input_1_offset; + input_2 = *input_2_vect++ + input_2_offset; + + mul_res = input_1 * input_2; + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift) + out_offset; + + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + + *output++ = (int8_t)mul_res; + + /* Decrement loop counter */ + loop_count--; + } +#endif + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_maximum_s8.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_maximum_s8.c new file mode 100644 index 0000000..98351ba --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_maximum_s8.c @@ -0,0 +1,263 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_maximum_s8 + * Description: Minimum and Maximum + * + * $Date: 08 October 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup minimumMaximum + * @{ + */ + +static arm_cmsis_nn_status +arm_max_no_broadcast_s8(const int8_t *input_1, const int8_t *input_2, int8_t *output, int32_t flat_size) +{ +#if defined(ARM_MATH_MVEI) + while (flat_size > 0) + { + mve_pred16_t p = vctp8q(flat_size); + + int8x16_t vec1 = vldrbq_z_s8(input_1, p); + input_1 += 16; + int8x16_t vec2 = vldrbq_z_s8(input_2, p); + input_2 += 16; + + vstrbq_p_s8(output, vmaxq_s8(vec1, vec2), p); + output += 16; + flat_size -= 16; + } +#else + while (flat_size > 0) + { + int8_t in1 = *input_1++; + int8_t in2 = *input_2++; + *output++ = in1 >= in2 ? in1 : in2; + --flat_size; + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +static arm_cmsis_nn_status +arm_max_scalar_s8(const int8_t *input_1, const int8_t *input_2, int8_t *output, int32_t flat_size) +{ +#if defined(ARM_MATH_MVEI) + int8x16_t scalar_vec = vdupq_n_s8(*input_1); + + while (flat_size > 0) + { + mve_pred16_t p = vctp8q(flat_size); + int8x16_t vec = vldrbq_z_s8(input_2, p); + input_2 += 16; + + vstrbq_p_s8(output, vmaxq_s8(scalar_vec, vec), p); + output += 16; + flat_size -= 16; + } +#else + int8_t in1 = *input_1; + while (flat_size > 0) + { + int8_t in2 = *input_2++; + *output++ = in1 >= in2 ? in1 : in2; + --flat_size; + } +#endif + return ARM_CMSIS_NN_SUCCESS; +} + +/* + * s8 maximum + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_maximum_s8(const cmsis_nn_context *ctx, + const int8_t *input_1_data, + const cmsis_nn_dims *input_1_dims, + const int8_t *input_2_data, + const cmsis_nn_dims *input_2_dims, + int8_t *output_data, + const cmsis_nn_dims *output_dims) +{ + (void)ctx; + const int32_t output_batch = output_dims->n; + const int32_t output_height = output_dims->h; + const int32_t output_width = output_dims->w; + + const int32_t input_1_batch = input_1_dims->n; + const int32_t input_1_height = input_1_dims->h; + const int32_t input_1_width = input_1_dims->w; + const int32_t input_1_channels = input_1_dims->c; + + const int32_t input_2_batch = input_2_dims->n; + const int32_t input_2_height = input_2_dims->h; + const int32_t input_2_width = input_2_dims->w; + const int32_t input_2_channels = input_2_dims->c; + + int32_t flat_size_1 = input_1_batch * input_1_height * input_1_width * input_1_channels; + int32_t flat_size_2 = input_2_batch * input_2_height * input_2_width * input_2_channels; + + if (arm_check_broadcast_required(input_1_dims, input_2_dims)) + { + if (flat_size_1 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_1_data, input_2_data, output_data, flat_size_2); + } + else if (flat_size_2 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_2_data, input_1_data, output_data, flat_size_1); + } + else + { + int32_t width_1_diff = input_1_width >= input_2_width ? 0 : input_1_channels; + int32_t width_2_diff = input_2_width >= input_1_width ? 0 : input_2_channels; + + int32_t height_1_diff = + input_1_height >= input_2_height ? width_1_diff : -input_1_width * (input_1_channels - width_1_diff); + int32_t height_2_diff = + input_2_height >= input_1_height ? width_2_diff : -input_2_width * (input_2_channels - width_2_diff); + + int32_t batch_1_diff = + input_1_batch >= input_2_batch ? input_1_channels * input_1_width * input_1_height : 0; + int32_t batch_2_diff = + input_2_batch >= input_1_batch ? input_2_channels * input_2_width * input_2_height : 0; + + for (int32_t i_out_batch = 0; i_out_batch < output_batch; i_out_batch++) + { + const int8_t *input_1_ptr = input_1_data; + const int8_t *input_2_ptr = input_2_data; + flat_size_1 = input_1_height * input_1_width * input_1_channels; + flat_size_2 = input_2_height * input_2_width * input_2_channels; + if (input_1_height == input_2_height && input_1_width == input_2_width && + input_1_channels == input_2_channels) + { + arm_max_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, flat_size_1); + output_data += flat_size_1; + } + else if (flat_size_1 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_1_ptr, input_2_ptr, output_data, flat_size_2); + output_data += flat_size_2; + } + else if (flat_size_2 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_2_ptr, input_1_ptr, output_data, flat_size_1); + output_data += flat_size_1; + } + else + { + flat_size_1 = input_1_width * input_1_channels; + flat_size_2 = input_2_width * input_2_channels; + for (int32_t i_out_height = 0; i_out_height < output_height; i_out_height++) + { + if (input_1_width == input_2_width && input_1_channels == input_2_channels) + { + arm_max_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, flat_size_1); + output_data += flat_size_1; + input_1_ptr += flat_size_1; + input_2_ptr += flat_size_1; + } + else if (flat_size_1 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_1_ptr, input_2_ptr, output_data, flat_size_2); + output_data += flat_size_2; + ++input_1_ptr; + input_2_ptr += flat_size_2; + } + else if (flat_size_2 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_2_ptr, input_1_ptr, output_data, flat_size_1); + output_data += flat_size_1; + ++input_2_ptr; + input_1_ptr += flat_size_1; + } + else + { + for (int32_t i_out_width = 0; i_out_width < output_width; i_out_width++) + { + if (input_1_channels == input_2_channels) + { + arm_max_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, input_1_channels); + output_data += input_1_channels; + input_1_ptr += input_1_channels; + input_2_ptr += input_1_channels; + } + else if (input_1_channels == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_1_ptr, input_2_ptr, output_data, input_2_channels); + output_data += input_2_channels; + input_1_ptr++; + input_2_ptr += input_2_channels; + } + else if (input_2_channels == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_2_ptr, input_1_ptr, output_data, input_1_channels); + output_data += input_1_channels; + input_1_ptr += input_1_channels; + input_2_ptr++; + } + input_1_ptr -= width_1_diff; + input_2_ptr -= width_2_diff; + } + } + input_1_ptr += height_1_diff; + input_2_ptr += height_2_diff; + } + } + input_1_data += batch_1_diff; + input_2_data += batch_2_diff; + } + } + } + else + { + arm_max_no_broadcast_s8(input_1_data, input_2_data, output_data, flat_size_1); + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_minimum_s8.c b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_minimum_s8.c new file mode 100644 index 0000000..6b7e91b --- /dev/null +++ b/src/third_party/cmsis_nn/Source/BasicMathFunctions/arm_minimum_s8.c @@ -0,0 +1,262 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_minimum_s8 + * Description: Minimum and Maximum + * + * $Date: 08 October 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup minimumMaximum + * @{ + */ + +static arm_cmsis_nn_status +arm_min_no_broadcast_s8(const int8_t *input_1, const int8_t *input_2, int8_t *output, int32_t flat_size) +{ +#if defined(ARM_MATH_MVEI) + while (flat_size > 0) + { + mve_pred16_t p = vctp8q(flat_size); + + int8x16_t vec1 = vldrbq_z_s8(input_1, p); + input_1 += 16; + int8x16_t vec2 = vldrbq_z_s8(input_2, p); + input_2 += 16; + + vstrbq_p_s8(output, vminq_s8(vec1, vec2), p); + output += 16; + flat_size -= 16; + } +#else + while (flat_size > 0) + { + int8_t in1 = *input_1++; + int8_t in2 = *input_2++; + *output++ = in1 >= in2 ? in2 : in1; + --flat_size; + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +static arm_cmsis_nn_status +arm_min_scalar_s8(const int8_t *input_1, const int8_t *input_2, int8_t *output, int32_t flat_size) +{ +#if defined(ARM_MATH_MVEI) + int8x16_t scalar_vec = vdupq_n_s8(*input_1); + + while (flat_size > 0) + { + mve_pred16_t p = vctp8q(flat_size); + + int8x16_t vec = vldrbq_z_s8(input_2, p); + input_2 += 16; + + vstrbq_p_s8(output, vminq_s8(scalar_vec, vec), p); + output += 16; + flat_size -= 16; + } +#else + int8_t in1 = *input_1; + while (flat_size > 0) + { + int8_t in2 = *input_2++; + *output++ = in1 >= in2 ? in2 : in1; + --flat_size; + } +#endif + return ARM_CMSIS_NN_SUCCESS; +} + +/* + * s8 minimum + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_minimum_s8(const cmsis_nn_context *ctx, + const int8_t *input_1_data, + const cmsis_nn_dims *input_1_dims, + const int8_t *input_2_data, + const cmsis_nn_dims *input_2_dims, + int8_t *output_data, + const cmsis_nn_dims *output_dims) +{ + (void)ctx; + const int32_t output_batch = output_dims->n; + const int32_t output_height = output_dims->h; + const int32_t output_width = output_dims->w; + + const int32_t input_1_batch = input_1_dims->n; + const int32_t input_1_height = input_1_dims->h; + const int32_t input_1_width = input_1_dims->w; + const int32_t input_1_channels = input_1_dims->c; + + const int32_t input_2_batch = input_2_dims->n; + const int32_t input_2_height = input_2_dims->h; + const int32_t input_2_width = input_2_dims->w; + const int32_t input_2_channels = input_2_dims->c; + + int32_t flat_size_1 = input_1_batch * input_1_height * input_1_width * input_1_channels; + int32_t flat_size_2 = input_2_batch * input_2_height * input_2_width * input_2_channels; + + if (arm_check_broadcast_required(input_1_dims, input_2_dims)) + { + if (flat_size_1 == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_1_data, input_2_data, output_data, flat_size_2); + } + else if (flat_size_2 == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_2_data, input_1_data, output_data, flat_size_1); + } + else + { + int32_t width_1_diff = input_1_width >= input_2_width ? 0 : input_1_channels; + int32_t width_2_diff = input_2_width >= input_1_width ? 0 : input_2_channels; + + int32_t height_1_diff = + input_1_height >= input_2_height ? width_1_diff : -input_1_width * (input_1_channels - width_1_diff); + int32_t height_2_diff = + input_2_height >= input_1_height ? width_2_diff : -input_2_width * (input_2_channels - width_2_diff); + + int32_t batch_1_diff = + input_1_batch >= input_2_batch ? input_1_channels * input_1_width * input_1_height : 0; + int32_t batch_2_diff = + input_2_batch >= input_1_batch ? input_2_channels * input_2_width * input_2_height : 0; + + for (int32_t i_out_batch = 0; i_out_batch < output_batch; i_out_batch++) + { + const int8_t *input_1_ptr = input_1_data; + const int8_t *input_2_ptr = input_2_data; + flat_size_1 = input_1_height * input_1_width * input_1_channels; + flat_size_2 = input_2_height * input_2_width * input_2_channels; + if (input_1_height == input_2_height && input_1_width == input_2_width && + input_1_channels == input_2_channels) + { + arm_min_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, flat_size_1); + output_data += flat_size_1; + } + else if (flat_size_1 == 1) + { + arm_min_scalar_s8(input_1_ptr, input_2_ptr, output_data, flat_size_2); + output_data += flat_size_2; + } + else if (flat_size_2 == 1) + { + arm_min_scalar_s8(input_2_ptr, input_1_ptr, output_data, flat_size_1); + output_data += flat_size_1; + } + else + { + flat_size_1 = input_1_width * input_1_channels; + flat_size_2 = input_2_width * input_2_channels; + for (int32_t i_out_height = 0; i_out_height < output_height; i_out_height++) + { + if (input_1_width == input_2_width && input_1_channels == input_2_channels) + { + arm_min_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, flat_size_1); + output_data += flat_size_1; + input_1_ptr += flat_size_1; + input_2_ptr += flat_size_1; + } + else if (flat_size_1 == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_1_ptr, input_2_ptr, output_data, flat_size_2); + output_data += flat_size_2; + ++input_1_ptr; + input_2_ptr += flat_size_2; + } + else if (flat_size_2 == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_2_ptr, input_1_ptr, output_data, flat_size_1); + output_data += flat_size_1; + ++input_2_ptr; + input_1_ptr += flat_size_1; + } + else + { + for (int32_t i_out_width = 0; i_out_width < output_width; i_out_width++) + { + if (input_1_channels == input_2_channels) + { + arm_min_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, input_1_channels); + output_data += input_1_channels; + input_1_ptr += input_1_channels; + input_2_ptr += input_1_channels; + } + else if (input_1_channels == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_1_ptr, input_2_ptr, output_data, input_2_channels); + output_data += input_2_channels; + input_1_ptr++; + input_2_ptr += input_2_channels; + } + else if (input_2_channels == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_2_ptr, input_1_ptr, output_data, input_1_channels); + output_data += input_1_channels; + input_1_ptr += input_1_channels; + input_2_ptr++; + } + input_1_ptr -= width_1_diff; + input_2_ptr -= width_2_diff; + } + } + input_1_ptr += height_1_diff; + input_2_ptr += height_2_diff; + } + } + input_1_data += batch_1_diff; + input_2_data += batch_2_diff; + } + } + } + else + { + arm_min_no_broadcast_s8(input_1_data, input_2_data, output_data, flat_size_1); + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c b/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_w.c similarity index 76% rename from src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c rename to src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_w.c index 3c71bcc..9ea9e02 100644 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c +++ b/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_w.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,17 +21,18 @@ * Title: arm_concatenation_s8_w.c * Description: s8 version of concatenation along the W axis * - * $Date: October 2019 - * $Revision: V.1.0.0 + * $Date: 26 October 2022 + * $Revision: V.1.0.1 * * Target Processor: Cortex-M cores * * -------------------------------------------------------------------- */ -#include "arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" /** - * @ingroup groupNN + * @ingroup Public */ /** @@ -39,12 +40,12 @@ * @{ */ - /* - * s8 version of concatenation along the W axis - * - * Refer to header file for details. - * - */ +/* + * s8 version of concatenation along the W axis + * + * Refer to header file for details. + * + */ void arm_concatenation_s8_w(const int8_t *input, const uint16_t input_x, const uint16_t input_y, @@ -57,7 +58,7 @@ void arm_concatenation_s8_w(const int8_t *input, output += offset_w * (input_x * input_y * input_z); - memcpy(output, input, input_copy_size); + arm_memcpy_s8(output, input, input_copy_size); } /** diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c b/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_x.c similarity index 77% rename from src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c rename to src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_x.c index 555893e..d02be29 100644 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c +++ b/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_x.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,17 +21,18 @@ * Title: arm_concatenation_s8_x.c * Description: s8 version of concatenation along the X axis * - * $Date: October 2019 - * $Revision: V.1.0.0 + * $Date: 26 October 2022 + * $Revision: V.1.0.2 * * Target Processor: Cortex-M cores * * -------------------------------------------------------------------- */ -#include "arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" /** - * @ingroup groupNN + * @ingroup Public */ /** @@ -39,12 +40,12 @@ * @{ */ - /* - * s8 version of concatenation along the X axis - * - * Refer to header file for details. - * - */ +/* + * s8 version of concatenation along the X axis + * + * Refer to header file for details. + * + */ void arm_concatenation_s8_x(const int8_t *input, const uint16_t input_x, const uint16_t input_y, @@ -63,8 +64,8 @@ void arm_concatenation_s8_x(const int8_t *input, // Copy per row for (i = 0; i < num_iterations; ++i) { - memcpy(output, input, input_x); - input += input_x; + arm_memcpy_s8(output, input, input_x); + input += input_x; output += output_x; } } diff --git a/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_y.c b/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_y.c new file mode 100644 index 0000000..78131fd --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_y.c @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_concatenation_s8_y.c + * Description: s8 version of concatenation along the Y axis + * + * $Date: 26 October 2022 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup Concatenation + * @{ + */ + +/* + * s8 version of concatenation along the Y axis + * + * Refer to header file for details. + * + */ +void arm_concatenation_s8_y(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_y, + const uint32_t offset_y) +{ + const uint32_t num_iterations = input_z * input_w; + const uint32_t input_copy_size = input_x * input_y; + const uint32_t output_stride = input_x * output_y; + + output += offset_y * input_x; + uint32_t i; + + // Copy per tile + for (i = 0; i < num_iterations; ++i) + { + arm_memcpy_s8(output, input, input_copy_size); + input += input_copy_size; + output += output_stride; + } +} + +/** + * @} end of Concatenation group + */ diff --git a/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_z.c b/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_z.c new file mode 100644 index 0000000..b742c3d --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConcatenationFunctions/arm_concatenation_s8_z.c @@ -0,0 +1,75 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_concatenation_s8_z.c + * Description: s8 version of concatenation along the Z axis + * + * $Date: 26 October 2022 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup Concatenation + * @{ + */ + +/* + * s8 version of concatenation along the Z axis + * + * Refer to header file for details. + * + */ +void arm_concatenation_s8_z(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_z, + const uint32_t offset_z) +{ + const uint32_t input_copy_size = input_x * input_y * input_z; + const uint32_t output_stride = input_x * input_y * output_z; + + output += offset_z * (input_x * input_y); + + uint32_t i; + + for (i = 0; i < input_w; ++i) + { + arm_memcpy_s8(output, input, input_copy_size); + input += input_copy_size; + output += output_stride; + } +} + +/** + * @} end of Concatenation group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1_x_n_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1_x_n_s4.c new file mode 100644 index 0000000..8f68931 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1_x_n_s4.c @@ -0,0 +1,201 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1_x_n_s4.c + * Description: s4 version of 1xN convolution using symmetric quantization. + * + * $Date: 10 April 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * 1xN s4 convolution function. + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_convolve_1_x_n_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + arm_cmsis_nn_status status = ARM_CMSIS_NN_SUCCESS; + int32_t buffer_size = arm_convolve_1_x_n_s4_get_buffer_size(conv_params, input_dims, filter_dims, output_dims); + /* The wrapper API is the ultimate reference for argument check */ + if ((input_dims->h != 1) || conv_params->dilation.w != 1 || (buffer_size != 0 && ctx->buf == NULL) || + conv_params->stride.w == 0 || (conv_params->stride.w * input_dims->c % 4 != 0)) + { + status = ARM_CMSIS_NN_ARG_ERROR; + goto out; + } + +#if defined(ARM_MATH_MVEI) + (void)bias_dims; + const uint16_t input_x = input_dims->w; + const uint16_t kernel_x = filter_dims->w; + const uint16_t output_x = output_dims->w; + const uint16_t output_ch = output_dims->c; + const uint16_t input_ch = input_dims->c; + const uint16_t pad_x = conv_params->padding.w; + const uint16_t stride_x = conv_params->stride.w; + + // Total pad for dilation of 1 + const int32_t total_pad = ((output_x - 1) * stride_x + kernel_x - input_x); + const int32_t asym_pad = total_pad % 2; + + if (pad_x * 2 + asym_pad != total_pad) + { + return ARM_CMSIS_NN_FAILURE; + } + + const int32_t right_pad_num = pad_x + asym_pad != 0 ? MAX(1, (pad_x + asym_pad + stride_x - 1) / stride_x) : 0; + const int32_t left_pad_num = pad_x != 0 ? MAX(1, (pad_x + stride_x - 1) / stride_x) : 0; + const int32_t no_pad_num = MAX(output_x - (right_pad_num + left_pad_num), 0); + + if (right_pad_num + no_pad_num + left_pad_num != output_x) + { + return ARM_CMSIS_NN_FAILURE; + } + + for (int i_batch = 0; i_batch < input_dims->n; i_batch++) + { + // Handle left padded sections + int32_t lhs_rows = left_pad_num; + const int32_t rhs_cols = kernel_x * input_dims->c; + const int32_t rhs_rows = output_dims->c; + const int32_t lhs_offset = input_ch * stride_x; + + int32_t out_idx = 0; + + for (int i = 0; i < lhs_rows; i++) + { + const int32_t est_input_x_idx = stride_x * i - pad_x; + const int32_t ker_begin_idx = -est_input_x_idx; + const int32_t actual_kernel_len = kernel_x - ker_begin_idx; + status = arm_nn_mat_mul_core_1x_s4(actual_kernel_len * input_ch, + ker_begin_idx * input_ch, + input_data, + filter_data + ((ker_begin_idx * input_ch) >> 1), + output_ch, + conv_params, + quant_params, + bias_data, + output_data); + output_data += output_ch; + } + + out_idx += lhs_rows; + int32_t input_start = stride_x * lhs_rows - pad_x; + + if (input_start < 0) + { + return ARM_CMSIS_NN_FAILURE; + } + /* Non padded elements */ + input_start *= input_ch; + lhs_rows = no_pad_num; + arm_nn_mat_mult_nt_t_s4(input_data + input_start, + filter_data, + bias_data, + output_data, + quant_params->multiplier, + quant_params->shift, + lhs_rows, + rhs_rows, + rhs_cols, + conv_params->input_offset, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + lhs_offset); + + output_data += lhs_rows * rhs_rows; + /* Right padded elements */ + out_idx += lhs_rows; + lhs_rows = output_x - out_idx; + + if (lhs_rows < 0) + { + return ARM_CMSIS_NN_FAILURE; + } + + for (int i = out_idx; i < output_x; i++) + { + const int32_t est_input_x_idx = stride_x * i - pad_x; + const int32_t ker_end_idx = MIN(kernel_x, input_x - est_input_x_idx); + status = arm_nn_mat_mul_core_1x_s4(ker_end_idx * input_ch, + (kernel_x - ker_end_idx) * input_ch, + input_data + est_input_x_idx * input_ch, + filter_data, + output_ch, + conv_params, + quant_params, + bias_data, + output_data); + output_data += output_ch; + } + /* Advance to the next batch */ + input_data += (input_x * input_ch); + } +#else + status = arm_convolve_s4(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + +#endif + +out: + /* Return to application */ + return status; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c new file mode 100644 index 0000000..cc93cb6 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c @@ -0,0 +1,235 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1_x_n_s8.c + * Description: s8 version of 1xN convolution using symmetric quantization. + * + * $Date: 04 November 2024 + * $Revision: V.3.6.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * 1xN s8 convolution function. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + arm_cmsis_nn_status status = ARM_CMSIS_NN_SUCCESS; + + /* The wrapper API is the ultimate reference for argument check */ + if ((input_dims->h != 1) || conv_params->dilation.w != 1 || ctx->buf == NULL || conv_params->stride.w == 0 || + (conv_params->stride.w * input_dims->c % 4 != 0)) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + +#if defined(ARM_MATH_MVEI) + (void)bias_dims; + + const int32_t input_x = input_dims->w; + const int32_t kernel_x = filter_dims->w; + const int32_t output_x = output_dims->w; + const int32_t input_ch = input_dims->c; + const int32_t pad_x = conv_params->padding.w; + const int32_t stride_x = conv_params->stride.w; + + // Total pad for dilation of 1 + const int32_t total_pad = ((output_x - 1) * stride_x + kernel_x - input_x); + const int32_t asym_pad = total_pad % 2; + + if (pad_x * 2 + asym_pad != total_pad) + { + return ARM_CMSIS_NN_FAILURE; + } + + const int32_t right_pad_num = pad_x + asym_pad != 0 ? MAX(1, (pad_x + asym_pad + stride_x - 1) / stride_x) : 0; + const int32_t left_pad_num = pad_x != 0 ? MAX(1, (pad_x + stride_x - 1) / stride_x) : 0; + const int32_t no_pad_num = MAX(output_x - (right_pad_num + left_pad_num), 0); + + const int32_t pad_size_left = pad_x * input_ch; + const int32_t pad_size_right = asym_pad ? right_pad_num * input_ch : pad_size_left; + + const int32_t rhs_cols = kernel_x * input_ch; + const int32_t rhs_rows = output_dims->c; + const int32_t lhs_offset = input_ch * stride_x; + + if (right_pad_num + no_pad_num + left_pad_num != output_x) + { + return arm_convolve_s8(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + NULL, + output_dims, + output_data); + } + + const uint32_t num_elem_left = kernel_x * input_ch; + const uint32_t num_elem_right = num_elem_left - input_ch; + + for (int i_batch = 0; i_batch < input_dims->n; i_batch++) + { + /* Handle left padded sections */ + int32_t lhs_rows = left_pad_num; + int8_t *im2col = ctx->buf; + + arm_memset_s8(im2col, (int8_t)-conv_params->input_offset, sizeof(int8_t) * (uint32_t)pad_size_left); + im2col += pad_size_left; + arm_memcpy_s8(im2col, input_data, sizeof(int8_t) * num_elem_left); + + arm_nn_mat_mult_nt_t_s8((int8_t *)ctx->buf, + filter_data, + bias_data, + output_data, + quant_params->multiplier, + quant_params->shift, + lhs_rows, + rhs_rows, + rhs_cols, + conv_params->input_offset, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + rhs_rows, + lhs_offset); + + output_data += lhs_rows * rhs_rows; + + /* Non padded elements */ + int32_t out_idx = lhs_rows; + int32_t input_start = stride_x * lhs_rows - pad_x; + + if (input_start < 0) + { + return ARM_CMSIS_NN_FAILURE; + } + + input_start *= input_ch; + lhs_rows = no_pad_num; + + arm_nn_mat_mult_nt_t_s8(input_data + input_start, + filter_data, + bias_data, + output_data, + quant_params->multiplier, + quant_params->shift, + lhs_rows, + rhs_rows, + rhs_cols, + conv_params->input_offset, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + rhs_rows, + lhs_offset); + + output_data += lhs_rows * rhs_rows; + out_idx += lhs_rows; + + /* Right padded elements */ + lhs_rows = output_x - out_idx; + + if (lhs_rows < 0) + { + return ARM_CMSIS_NN_FAILURE; + } + + im2col = ctx->buf; + input_start = (stride_x * (left_pad_num + no_pad_num) - pad_x) * input_ch; + + arm_memcpy_s8(im2col, input_data + input_start, sizeof(int8_t) * num_elem_right); + im2col += num_elem_right; + arm_memset_s8(im2col, (int8_t)-conv_params->input_offset, sizeof(int8_t) * (uint32_t)pad_size_right); + + arm_nn_mat_mult_nt_t_s8((int8_t *)ctx->buf, + filter_data, + bias_data, + output_data, + quant_params->multiplier, + quant_params->shift, + lhs_rows, + rhs_rows, + rhs_cols, + conv_params->input_offset, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + rhs_rows, + lhs_offset); + + output_data += lhs_rows * rhs_rows; + + /* Advance to the next batch */ + input_data += (input_x * input_ch); + } +#else + status = arm_convolve_s8(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + NULL, + output_dims, + output_data); + +#endif + + /* Return to application */ + return status; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s4.c new file mode 100644 index 0000000..3804638 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s4.c @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1x1_s4.c + * Description: Generic s4 version of 1x1 convolution + * + * $Date: 01 November 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * A more generic version of s4 1x1 convolution intended for non-unity strides. This is slower + * than the _fast() version if used for unity stride values. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_convolve_1x1_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)ctx; + (void)filter_dims; + (void)bias_dims; + if (conv_params->padding.w != 0 || conv_params->padding.h != 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + const int32_t lhs_rows = output_dims->w; + const int32_t rhs_rows = output_dims->c; + const int32_t rhs_cols = input_dims->c; + const int32_t stride_w = conv_params->stride.w; + const int32_t input_inc = input_dims->w * conv_params->stride.h * rhs_cols; + const int32_t output_inc = output_dims->w * rhs_rows; + const int32_t output_h = output_dims->h; + const int32_t batch = input_dims->n; + const int8_t *input_data_ref = input_data; + + for (int i_batch = 0; i_batch < batch; i_batch++) + { + input_data = input_data_ref + (i_batch * rhs_cols * input_dims->w * input_dims->h); + for (int i_output_h = 0; i_output_h < output_h; i_output_h++) + { + // Process one input row + arm_cmsis_nn_status result = arm_nn_mat_mult_nt_t_s4(input_data, + filter_data, + bias_data, + output_data, + quant_params->multiplier, + quant_params->shift, + lhs_rows, + rhs_rows, + rhs_cols, + conv_params->input_offset, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + rhs_cols * stride_w); + if (result != ARM_CMSIS_NN_SUCCESS) + { + return result; + } + input_data += input_inc; + output_data += output_inc; + } + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s4_fast.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s4_fast.c new file mode 100644 index 0000000..36d4c37 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s4_fast.c @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1x1_s4_fast.c + * Description: Fast s4 version of 1x1 convolution (non-square shape) + * + * $Date: 01 November 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Fast s4 version for 1x1 convolution (non-square shape) + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_convolve_1x1_s4_fast(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + if (conv_params->padding.w != 0 || conv_params->padding.h != 0 || conv_params->stride.w != 1 || + conv_params->stride.h != 1) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + (void)ctx; + (void)filter_dims; + (void)bias_dims; + + const int32_t lhs_rows = input_dims->w * input_dims->h * input_dims->n; + const int32_t rhs_rows = output_dims->c; + const int32_t rhs_cols = input_dims->c; + + arm_nn_mat_mult_nt_t_s4(input_data, + filter_data, + bias_data, + output_data, + quant_params->multiplier, + quant_params->shift, + lhs_rows, + rhs_rows, + rhs_cols, + conv_params->input_offset, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + rhs_cols); + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s8.c new file mode 100644 index 0000000..ec97bbd --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s8.c @@ -0,0 +1,116 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1x1_s8.c + * Description: Generic s8 version of 1x1 convolution + * + * $Date: 04 January 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * A more generic version of s8 1x1 convolution intended for non-unity strides. This is slower + * than the _fast() version if used for unity stride values. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_convolve_1x1_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)ctx; + (void)filter_dims; + (void)bias_dims; + if (conv_params->padding.w != 0 || conv_params->padding.h != 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + const int32_t lhs_rows = output_dims->w; + const int32_t rhs_rows = output_dims->c; + const int32_t rhs_cols = input_dims->c; + const int32_t stride_w = conv_params->stride.w; + const int32_t input_inc = input_dims->w * conv_params->stride.h * rhs_cols; + const int32_t output_inc = output_dims->w * rhs_rows; + const int32_t output_h = output_dims->h; + const int32_t batch = input_dims->n; + const int8_t *input_data_ref = input_data; + + for (int i_batch = 0; i_batch < batch; i_batch++) + { + input_data = input_data_ref + (i_batch * rhs_cols * input_dims->w * input_dims->h); + for (int i_output_h = 0; i_output_h < output_h; i_output_h++) + { + // Process one input row + arm_cmsis_nn_status result = arm_nn_mat_mult_nt_t_s8(input_data, + filter_data, + bias_data, + output_data, + quant_params->multiplier, + quant_params->shift, + lhs_rows, + rhs_rows, + rhs_cols, + conv_params->input_offset, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + rhs_rows, + rhs_cols * stride_w); + if (result != ARM_CMSIS_NN_SUCCESS) + { + return result; + } + input_data += input_inc; + output_data += output_inc; + } + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c new file mode 100644 index 0000000..2c997d3 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c @@ -0,0 +1,198 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1x1_s8_fast.c + * Description: Fast s8 version of 1x1 convolution (non-square shape) + * + * $Date: 05 November 2024 + * $Revision: V.3.6.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Fast s8 version for 1x1 convolution (non-square shape) + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + if (conv_params->padding.w != 0 || conv_params->padding.h != 0 || conv_params->stride.w != 1 || + conv_params->stride.h != 1) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + (void)filter_dims; + (void)bias_dims; + + const int32_t rhs_cols = input_dims->c; + const int32_t rhs_rows = output_dims->c; + int32_t lhs_rows = input_dims->w * input_dims->h * input_dims->n; + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) && defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + if (ctx->buf != NULL) /* Fall back to non buffered version if no additional memory buffer provided */ + { + const int32_t batch = input_dims->n; + const int32_t output_h = output_dims->h; + const int32_t output_w = output_dims->w; + const int32_t input_inc = input_dims->w * rhs_cols; + + for (int i_batch = 0; i_batch < batch; i_batch++) + { + const int32_t output_ch = output_dims->c; + const int8_t *ip = input_data; + int16_t *buffer_a = (int16_t *)ctx->buf; + int16_t *im2col_buf = (int16_t *)ctx->buf; + int8_t *out = output_data; + lhs_rows = 0; + + for (int i_out_y = 0; i_out_y < output_h; i_out_y++, ip += input_inc) + { + for (int32_t k_x = 0, i_out_x = 0; i_out_x < output_w; i_out_x++, k_x += rhs_cols) + { + arm_s8_to_s16_unordered_with_offset(ip + k_x, im2col_buf, rhs_cols, conv_params->input_offset); + im2col_buf += rhs_cols; + lhs_rows++; + if (lhs_rows == 2) + { + out = arm_nn_mat_mult_kernel_s8_s16(filter_data, + buffer_a, + output_ch, + quant_params->shift, + quant_params->multiplier, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + rhs_cols, + rhs_cols, + bias_data, + out); + im2col_buf = buffer_a; + lhs_rows = 0; + } + } + if (out == NULL) + { + return ARM_CMSIS_NN_NO_IMPL_ERROR; + } + } + + /* Handle left over columns */ + if (lhs_rows != 0) + { + const int8_t *ker_a = filter_data; + for (int i = 0; i < output_ch; i++) + { + /* Load the accumulator with bias first */ + int32_t sum = 0; + if (bias_data) + { + sum = bias_data[i]; + } + const int16_t *ip_as_col = buffer_a; + + /* 4 multiply and accumulates are done in one loop. */ + uint16_t col_count = rhs_cols >> 2; + while (col_count) + { + int32_t ker_a1, ker_a2; + int32_t ip_b1, ip_b2; + ker_a = read_and_pad_reordered(ker_a, &ker_a1, &ker_a2); + ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = SMLAD(ker_a1, ip_b1, sum); + ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = SMLAD(ker_a2, ip_b2, sum); + col_count--; + } + + /* Handle left over mac */ + col_count = rhs_cols & 0x3; + while (col_count) + { + int8_t ker_a1 = *ker_a++; + int16_t ip_b1 = *ip_as_col++; + sum += ker_a1 * ip_b1; + col_count--; + } + sum = arm_nn_requantize(sum, quant_params->multiplier[i], quant_params->shift[i]); + sum += conv_params->output_offset; + sum = MAX(sum, conv_params->activation.min); + sum = MIN(sum, conv_params->activation.max); + *out++ = (int8_t)sum; + } + } + /* Advance to the next batch */ + input_data += (input_dims->w * input_dims->h * rhs_cols); + output_data += (output_w * output_h * output_ch); + } + return ARM_CMSIS_NN_SUCCESS; + } +#else + (void)ctx; +#endif + + arm_nn_mat_mult_nt_t_s8(input_data, + filter_data, + bias_data, + output_data, + quant_params->multiplier, + quant_params->shift, + lhs_rows, + rhs_rows, + rhs_cols, + conv_params->input_offset, + conv_params->output_offset, + conv_params->activation.min, + conv_params->activation.max, + rhs_rows, + rhs_cols); + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_even_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_even_s4.c new file mode 100644 index 0000000..c93ba6d --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_even_s4.c @@ -0,0 +1,230 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_even_s4.c + * Description: s8 version of convolution using symmetric quantization with 4 bit weights. + * + * $Date: 05 Jun 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Basic s8 convolution function with int4 packed RHS (weights) and even RHS columns, + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_convolve_even_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *packed_filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)bias_dims; + +#if defined(ARM_MATH_MVEI) + + if (ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + int16_t *buffer_a = (int16_t *)ctx->buf; + + const int32_t input_batches = input_dims->n; + const uint16_t input_x = input_dims->w; + const uint16_t input_y = input_dims->h; + const uint16_t input_ch = input_dims->c; + const uint16_t kernel_x = filter_dims->w; + const uint16_t kernel_y = filter_dims->h; + const uint16_t output_x = output_dims->w; + const uint16_t output_y = output_dims->h; + const uint16_t output_ch = output_dims->c; + + const uint16_t pad_x = conv_params->padding.w; + const uint16_t pad_y = conv_params->padding.h; + const uint16_t stride_x = conv_params->stride.w; + const uint16_t stride_y = conv_params->stride.h; + const int32_t dilation_x = conv_params->dilation.w; + const int32_t dilation_y = conv_params->dilation.h; + const int32_t out_offset = conv_params->output_offset; + const int32_t out_activation_min = conv_params->activation.min; + const int32_t out_activation_max = conv_params->activation.max; + const int32_t rhs_cols = kernel_x * kernel_y * input_ch; + const int32_t input_offset = conv_params->input_offset; + + if (rhs_cols & 0x1) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + const int32_t blk_cnt = rhs_cols >> 5; + + int32_t *output_mult = quant_params->multiplier; + int32_t *output_shift = quant_params->shift; + + int i_batch; + + for (i_batch = 0; i_batch < input_batches; i_batch++) + { + /* Generate up to four columns from the input tensor a GEMM computation */ + int8_t *im2col_buf = (int8_t *)buffer_a; + const int32_t rhs_rows = output_dims->c; + int8_t *out = output_data; + int32_t lhs_rows = 0; + + /* This part implements the im2col function */ + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int32_t base_idx_x = stride_x * i_out_x - pad_x; + const int32_t base_idx_y = stride_y * i_out_y - pad_y; + + for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) + { + for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t k_y = base_idx_y + dilation_y * i_ker_y; + const int32_t k_x = base_idx_x + dilation_x * i_ker_x; + + if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x) + { + arm_memset_s8(im2col_buf, (int8_t)-input_offset, sizeof(int8_t) * input_ch); + } + else + { + arm_memcpy_s8(im2col_buf, input_data + (k_y * input_x + k_x) * input_ch, input_ch); + } + im2col_buf += input_ch; + } + } + + /* Reformat most of the buffer by interleaving it */ + int8_t *im2col_buf_interleaved = (int8_t *)buffer_a + lhs_rows * rhs_cols; + for (int j = blk_cnt; j > 0; --j) + { + int8x16x2_t x2 = vld2q_s8(im2col_buf_interleaved); + + vstrbq_s8(im2col_buf_interleaved, x2.val[1]); + im2col_buf_interleaved += 16; + + vstrbq_s8(im2col_buf_interleaved, x2.val[0]); + im2col_buf_interleaved += 16; + } + + lhs_rows++; + + /* Computation is filed for every 4 columns */ + if (lhs_rows == 4) + { + arm_nn_mat_mult_nt_interleaved_t_even_s4((int8_t *)buffer_a, + packed_filter_data, + bias_data, + out, + output_mult, + output_shift, + lhs_rows, + rhs_rows, + rhs_cols, + input_offset, + out_offset, + out_activation_min, + out_activation_max, + rhs_cols); + + out += lhs_rows * rhs_rows; + + lhs_rows = 0; + im2col_buf = (int8_t *)buffer_a; + } + } + } + + /* Handle left over columns */ + if (lhs_rows != 0) + { + arm_nn_mat_mult_nt_interleaved_t_even_s4((int8_t *)buffer_a, + packed_filter_data, + bias_data, + out, + output_mult, + output_shift, + lhs_rows, + rhs_rows, + rhs_cols, + input_offset, + out_offset, + out_activation_min, + out_activation_max, + rhs_cols); + out += lhs_rows * rhs_rows; + lhs_rows = 0; + im2col_buf = (int8_t *)buffer_a; + } + + /* Advance to the next batch */ + input_data += (input_x * input_y * input_ch); + output_data += (output_x * output_y * output_ch); + } +#else + (void)ctx; + (void)conv_params; + (void)quant_params; + (void)input_dims; + (void)input_data; + (void)filter_dims; + (void)packed_filter_data; + (void)bias_data; + (void)output_dims; + (void)output_data; + + return ARM_CMSIS_NN_NO_IMPL_ERROR; + +#endif // #if defined(ARM_MATH_MVEI) + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s16.c new file mode 100644 index 0000000..a03973a --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s16.c @@ -0,0 +1,102 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_get_buffer_sizes_s16.c + * Description: Collection of get buffer size functions for the various s16 convolution layer functions. + * + * $Date: 20 March 2024 + * $Revision: V.2.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/Internal/arm_nn_compiler.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup NNConv + */ + +/** + * @addtogroup GetBufferSizeNNConv + * @{ + */ + +__STATIC_INLINE int32_t arm_convolve_s16_get_buffer_size_mve(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims) +{ + int32_t col_length = input_dims->c * filter_dims->w * filter_dims->h; + // Get number of complete lanes with int16 elements (multiple of 8) for given col_length. This is dependent on + // implementation of arm_nn_mat_mult_nt_t_s16 + col_length = (col_length + 7) / 8; + // 4 -> number of im2col buffers, 8 -> 8 elements per Q register + return 4 * col_length * 8 * (int32_t)sizeof(int16_t); +} + +int32_t arm_convolve_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ +#if defined(ARM_MATH_MVEI) + return arm_convolve_s16_get_buffer_size_mve(input_dims, filter_dims); +#else + return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t); +#endif +} + +/* + * Get the required buffer size for arm_convolve_wrapper_s16. This is the recommended function convolve wrapper s16 + * function. + * + * Refer to header file for details. + * + */ +int32_t arm_convolve_wrapper_s16_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)conv_params; + (void)output_dims; + + return arm_convolve_s16_get_buffer_size(input_dims, filter_dims); +} + +int32_t arm_convolve_wrapper_s16_get_buffer_size_dsp(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + return arm_convolve_wrapper_s16_get_buffer_size(conv_params, input_dims, filter_dims, output_dims); +} + +int32_t arm_convolve_wrapper_s16_get_buffer_size_mve(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)conv_params; + (void)output_dims; + + return arm_convolve_s16_get_buffer_size_mve(input_dims, filter_dims); +} + +/** + * @} end of GetBufferSizeNNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s4.c new file mode 100644 index 0000000..ce82efe --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s4.c @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_get_buffer_sizes_s4.c + * Description: Collection of get buffer size functions for the various s4 convolution layer functions. + * + * $Date: 10 April 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/Internal/arm_nn_compiler.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup NNConv + */ + +/** + * @addtogroup GetBufferSizeNNConv + * @{ + */ + +__STATIC_INLINE int32_t arm_convolve_s4_get_buffer_size_mve(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims) +{ + int32_t col_length = input_dims->c * filter_dims->w * filter_dims->h; + // Get number of complete lanes with int8 elements (multiple of 16) for given col_length. This is dependent on + // implementation of arm_nn_mat_mult_nt_t_s4 + col_length = (col_length + 15) / 16; + // 4 -> number of im2col buffers, 16 -> 16 elements per Q register + return 4 * col_length * 16 * (int32_t)sizeof(int8_t); +} + +__STATIC_INLINE int32_t arm_convolve_1_x_n_s4_get_buffer_size_mve(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + const int32_t input_x = input_dims->w; + const int32_t pad_x = conv_params->padding.w; + const int32_t kernel_x = filter_dims->w; + const int32_t output_x = output_dims->w; + const int32_t stride_x = conv_params->stride.w; + const int32_t total_pad = ((output_x - 1) * stride_x + kernel_x - input_x); + const int32_t asym_pad = total_pad % 2; + + const int32_t right_pad_num = pad_x + asym_pad != 0 ? MAX(1, (pad_x + asym_pad + stride_x - 1) / stride_x) : 0; + const int32_t left_pad_num = pad_x != 0 ? MAX(1, (pad_x + stride_x - 1) / stride_x) : 0; + const int32_t no_pad_num = MAX(output_x - (right_pad_num + left_pad_num), 0); + + if (right_pad_num + no_pad_num + left_pad_num != output_x) + { + return arm_convolve_s4_get_buffer_size_mve(input_dims, filter_dims); + } + + return 0; +} + +int32_t arm_convolve_s4_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ + const int32_t rhs_cols = filter_dims->w * filter_dims->h * input_dims->c; + return (2 * rhs_cols) * (int32_t)sizeof(int16_t); +} + +int32_t arm_convolve_1_x_n_s4_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ +#if !defined(ARM_MATH_MVEI) + (void)conv_params; + (void)output_dims; + + return arm_convolve_s4_get_buffer_size(input_dims, filter_dims); +#else + return arm_convolve_1_x_n_s4_get_buffer_size_mve(conv_params, input_dims, filter_dims, output_dims); +#endif +} + +int32_t arm_convolve_1x1_s4_fast_get_buffer_size(const cmsis_nn_dims *input_dims) +{ + (void)input_dims; + return 0; +} + +/* + * Get the required buffer size for arm_convolve_wrapper_s4. This is the + * recommended convolve wrapper s4 function. + * + * Refer to header file for details. + * + */ +int32_t arm_convolve_wrapper_s4_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ +#if defined(ARM_MATH_MVEI) + return arm_convolve_wrapper_s8_get_buffer_size_mve(conv_params, input_dims, filter_dims, output_dims); +#else + (void)output_dims; + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (filter_dims->w == 1) && + (filter_dims->h == 1) && (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) + { + if ((conv_params->stride.w == 1) && (conv_params->stride.h == 1)) + { + return arm_convolve_1x1_s4_fast_get_buffer_size(input_dims); + } + else + { + return 0; + } + } + else + { + return arm_convolve_s4_get_buffer_size(input_dims, filter_dims); + } +#endif +} + +int32_t arm_convolve_wrapper_s4_get_buffer_size_mve(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) + +{ + (void)output_dims; + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (filter_dims->w == 1) && + (filter_dims->h == 1) && (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) + { + if ((conv_params->stride.w == 1) && (conv_params->stride.h == 1)) + { + return arm_convolve_1x1_s4_fast_get_buffer_size(input_dims); + } + else + { + return 0; + } + } + else if ((input_dims->h == 1) && (conv_params->dilation.w == 1) && (filter_dims->h == 1) && + (conv_params->stride.w * input_dims->c % 4 == 0)) + { + return arm_convolve_1_x_n_s4_get_buffer_size_mve(conv_params, input_dims, filter_dims, output_dims); + } + else + { + return arm_convolve_s4_get_buffer_size_mve(input_dims, filter_dims); + } +} + +int32_t arm_convolve_wrapper_s4_get_buffer_size_dsp(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + return arm_convolve_wrapper_s4_get_buffer_size(conv_params, input_dims, filter_dims, output_dims); +} +/** + * @} end of GetBufferSizeNNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s8.c new file mode 100644 index 0000000..7976fb6 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_get_buffer_sizes_s8.c @@ -0,0 +1,234 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_get_buffer_sizes_s8.c + * Description: Collection of get buffer size functions for the various s8 convolution layer functions. + * + * $Date: 31 October 2024 + * $Revision: V.2.2.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/Internal/arm_nn_compiler.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup NNConv + */ + +/** + * @addtogroup GetBufferSizeNNConv + * @{ + */ +__STATIC_INLINE int32_t arm_convolve_1x1_s8_fast_get_buffer_size_dsp(const cmsis_nn_dims *input_dims) +{ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + return (2 * input_dims->c) * (int32_t)sizeof(int16_t); +#else + (void)input_dims; + return 0; +#endif +} + +__STATIC_INLINE int32_t arm_convolve_s8_get_buffer_size_mve(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims) +{ + int32_t col_length = input_dims->c * filter_dims->w * filter_dims->h; + // Get number of complete lanes with int8 elements (multiple of 16) for given col_length. This is dependent on + // implementation of arm_nn_mat_mult_nt_t_s8 + col_length = (col_length + 15) / 16; + // 4 -> number of im2col buffers, 16 -> 16 elements per Q register + return 4 * col_length * 16 * (int32_t)sizeof(int8_t); +} + +__STATIC_INLINE int32_t arm_convolve_1_x_n_s8_get_buffer_size_mve(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + const int32_t input_x = input_dims->w; + const int32_t pad_x = conv_params->padding.w; + const int32_t kernel_x = filter_dims->w; + const int32_t output_x = output_dims->w; + const int32_t stride_x = conv_params->stride.w; + const int32_t total_pad = ((output_x - 1) * stride_x + kernel_x - input_x); + const int32_t asym_pad = total_pad % 2; + + const int32_t right_pad_num = pad_x + asym_pad != 0 ? MAX(1, (pad_x + asym_pad + stride_x - 1) / stride_x) : 0; + const int32_t left_pad_num = pad_x != 0 ? MAX(1, (pad_x + stride_x - 1) / stride_x) : 0; + const int32_t no_pad_num = MAX(output_x - (right_pad_num + left_pad_num), 0); + + if (right_pad_num + no_pad_num + left_pad_num != output_x) + { + return arm_convolve_s8_get_buffer_size_mve(input_dims, filter_dims); + } + + const int32_t pad_size_left = pad_x * input_dims->c; + const int32_t pad_size_right = asym_pad ? right_pad_num * input_dims->c : pad_size_left; + const int32_t num_elem_left = kernel_x * input_dims->c; + const int32_t num_elem_right = num_elem_left - input_dims->c; + const int32_t size_1_x_n = MAX(num_elem_left + pad_size_left, num_elem_right + pad_size_right); + + return size_1_x_n; +} + +int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ +#if defined(ARM_MATH_MVEI) + return arm_convolve_s8_get_buffer_size_mve(input_dims, filter_dims); +#else + const int32_t rhs_cols = filter_dims->w * filter_dims->h * input_dims->c; + const int32_t remainder = rhs_cols % 4; + const int32_t aligned_rhs_cols = remainder != 0 ? rhs_cols + 4 - remainder : rhs_cols; + return (2 * aligned_rhs_cols) * (int32_t)sizeof(int16_t); +#endif +} + +int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ +#if !defined(ARM_MATH_MVEI) + (void)conv_params; + (void)output_dims; + + return arm_convolve_s8_get_buffer_size(input_dims, filter_dims); +#else + return arm_convolve_1_x_n_s8_get_buffer_size_mve(conv_params, input_dims, filter_dims, output_dims); +#endif +} + +int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims *input_dims) +{ +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + return arm_convolve_1x1_s8_fast_get_buffer_size_dsp(input_dims); +#else + (void)input_dims; +#endif + return 0; +} + +/* + * Get the required buffer size for arm_convolve_wrapper_s8. This is the recommended function convolve wrapper s8 + * function. + * + * Refer to header file for details. + * + */ +int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ +#if defined(ARM_MATH_MVEI) + return arm_convolve_wrapper_s8_get_buffer_size_mve(conv_params, input_dims, filter_dims, output_dims); +#elif defined(ARM_MATH_DSP) + return arm_convolve_wrapper_s8_get_buffer_size_dsp(conv_params, input_dims, filter_dims, output_dims); +#else + (void)output_dims; + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (filter_dims->w == 1) && + (filter_dims->h == 1) && (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) + { + if ((conv_params->stride.w == 1) && (conv_params->stride.h == 1)) + { + return arm_convolve_1x1_s8_fast_get_buffer_size(input_dims); + } + else + { + return 0; + } + } + else if ((input_dims->h == 1) && (conv_params->dilation.w == 1) && (filter_dims->h == 1) && + (conv_params->stride.w * input_dims->c % 4 == 0)) + { + return arm_convolve_1_x_n_s8_get_buffer_size(conv_params, input_dims, filter_dims, output_dims); + } + else + { + return arm_convolve_s8_get_buffer_size(input_dims, filter_dims); + } +#endif +} + +int32_t arm_convolve_wrapper_s8_get_buffer_size_mve(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)output_dims; + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (filter_dims->w == 1) && + (filter_dims->h == 1) && (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) + { + if ((conv_params->stride.w == 1) && (conv_params->stride.h == 1)) + { + return arm_convolve_1x1_s8_fast_get_buffer_size(input_dims); + } + else + { + return 0; + } + } + else if ((input_dims->h == 1) && (conv_params->dilation.w == 1) && (filter_dims->h == 1) && + (conv_params->stride.w * input_dims->c % 4 == 0)) + { + return arm_convolve_1_x_n_s8_get_buffer_size_mve(conv_params, input_dims, filter_dims, output_dims); + } + else + { + return arm_convolve_s8_get_buffer_size_mve(input_dims, filter_dims); + } +} + +int32_t arm_convolve_wrapper_s8_get_buffer_size_dsp(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)output_dims; + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (filter_dims->w == 1) && + (filter_dims->h == 1) && (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) + { + if ((conv_params->stride.w == 1) && (conv_params->stride.h == 1)) + { + return arm_convolve_1x1_s8_fast_get_buffer_size_dsp(input_dims); + } + else + { + return 0; + } + } + else if ((input_dims->h == 1) && (conv_params->dilation.w == 1) && (filter_dims->h == 1) && + (conv_params->stride.w * input_dims->c % 4 == 0)) + { + return arm_convolve_1_x_n_s8_get_buffer_size(conv_params, input_dims, filter_dims, output_dims); + } + else + { + return arm_convolve_s8_get_buffer_size(input_dims, filter_dims); + } +} + +/** + * @} end of GetBufferSizeNNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s16.c new file mode 100644 index 0000000..ef4d623 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s16.c @@ -0,0 +1,292 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_s16.c + * Description: s16 version of convolution. + * + * $Date: 22 April 2024 + * $Revision: V.4.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Basic s16 convolution function. + * + * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels + * are multiples of 4 or atleast greater than 4. + * + */ +arm_cmsis_nn_status arm_convolve_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const cmsis_nn_bias_data *bias_data, + const cmsis_nn_dims *output_dims, + int16_t *output_data) +{ + (void)bias_dims; + + if (ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + int16_t *buffer_a = (int16_t *)ctx->buf; + + const int32_t input_batches = input_dims->n; + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + const int32_t input_ch = input_dims->c; + const int32_t kernel_x = filter_dims->w; + const int32_t kernel_y = filter_dims->h; + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_ch = output_dims->c; + const int32_t rhs_cols = input_ch * kernel_y * kernel_x; + + const int32_t dilation_x = conv_params->dilation.w; + const int32_t dilation_y = conv_params->dilation.h; + const int32_t pad_x = conv_params->padding.w; + const int32_t pad_y = conv_params->padding.h; + const int32_t stride_x = conv_params->stride.w; + const int32_t stride_y = conv_params->stride.h; + + const int32_t out_activation_min = conv_params->activation.min; + const int32_t out_activation_max = conv_params->activation.max; + int32_t *output_mult = quant_params->multiplier; + int32_t *output_shift = quant_params->shift; + +#if defined(ARM_MATH_MVEI) + const int32_t rhs_rows = output_dims->c; +#endif + + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + int16_t *im2col = buffer_a; + int16_t *out = output_data; + + int32_t lhs_rows = 0; + + /* This part implements the im2col function */ + for (int32_t i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (int32_t i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int32_t base_idx_x = stride_x * i_out_x - pad_x; + const int32_t base_idx_y = stride_y * i_out_y - pad_y; + + for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) + { + for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t k_y = base_idx_y + dilation_y * i_ker_y; + const int32_t k_x = base_idx_x + dilation_x * i_ker_x; + + if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x) + { + /* Filling 0 for out-of-bound paddings */ + arm_memset_s8((int8_t *)im2col, 0, sizeof(int16_t) * (uint32_t)input_ch); + } + else + { + arm_memcpy_s8((int8_t *)im2col, + (const int8_t *)(input_data + (k_y * input_x + k_x) * input_ch), + (uint32_t)input_ch * sizeof(int16_t)); + } + im2col += input_ch; + } + } + + lhs_rows++; +#if defined(ARM_MATH_MVEI) + /* Computation is filed for every 4 columns */ + if (lhs_rows == 4) + { + arm_nn_mat_mult_nt_t_s16(buffer_a, + filter_data, + bias_data, + out, + output_mult, + output_shift, + lhs_rows, + rhs_rows, + rhs_cols, + out_activation_min, + out_activation_max); + out += lhs_rows * output_ch; + + lhs_rows = 0; + im2col = buffer_a; + } +#else + /* Computation is filed for every 2 columns */ + if (lhs_rows == 2) + { + out = arm_nn_mat_mult_kernel_s16(filter_data, + buffer_a, + output_ch, + output_shift, + output_mult, + out_activation_min, + out_activation_max, + rhs_cols, + bias_data, + out); + + /* Counter reset */ + im2col = buffer_a; + lhs_rows = 0; + } +#endif + } + + if (out == NULL) + { + return ARM_CMSIS_NN_NO_IMPL_ERROR; + } + } + + /* Handle left over columns */ + if (lhs_rows != 0) + { +#if defined(ARM_MATH_MVEI) + arm_nn_mat_mult_nt_t_s16(buffer_a, + filter_data, + bias_data, + out, + output_mult, + output_shift, + lhs_rows, + rhs_rows, + rhs_cols, + out_activation_min, + out_activation_max); + out += lhs_rows * rhs_rows; + lhs_rows = 0; + im2col = buffer_a; +#else // #if defined(ARM_MATH_MVEI) + + const int64_t *bias_s64 = (const int64_t *)bias_data->data; + const int32_t *bias_s32 = (const int32_t *)bias_data->data; + const bool is_int32_bias = bias_data->is_int32_bias; + const int8_t *ker_a = filter_data; + int i; + + for (i = 0; i < output_ch; i++) + { + /* Init the accumulator*/ + int32_t sum = 0; + + /* Point to the beginning of the im2col buffer where the input is available as a rearranged column */ + const int16_t *ip_as_col = buffer_a; + + #if defined(ARM_MATH_DSP) + /* 4 multiply and accumulates are done in one loop. */ + int32_t col_count = rhs_cols >> 2; + + while (col_count) + { + int32_t ker_a1, ker_a2; + int32_t ip_b1, ip_b2; + + ker_a = read_and_pad(ker_a, &ker_a1, &ker_a2); + + ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = SMLAD(ker_a1, ip_b1, sum); + ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = SMLAD(ker_a2, ip_b2, sum); + + col_count--; + } + /* Handle left over mac */ + col_count = rhs_cols & 0x3; + #else + uint16_t col_count = rhs_cols; + + #endif + + while (col_count) + { + int8_t ker_a1 = *ker_a++; + int16_t ip_b1 = *ip_as_col++; + sum += ker_a1 * ip_b1; + col_count--; + } + + if (is_int32_bias) + { + if (bias_s32) + { + sum += bias_s32[i]; + } + + sum = arm_nn_requantize(sum, output_mult[i], output_shift[i]); + } + else + { + int64_t acc_64 = sum; + + if (bias_s64) + { + acc_64 += bias_s64[i]; + } + + int32_t reduced_multiplier = REDUCE_MULTIPLIER(output_mult[i]); + sum = arm_nn_requantize_s64(acc_64, reduced_multiplier, output_shift[i]); + } + + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + *out++ = (int16_t)sum; + } + lhs_rows = 0; + +#endif // #if defined(ARM_MATH_MVEI) + } + + /* Advance to the next batch */ + input_data += (input_x * input_y * input_ch); + output_data += (output_x * output_y * output_ch); + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s4.c new file mode 100644 index 0000000..b9668bc --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s4.c @@ -0,0 +1,330 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_s4.c + * Description: s8 version of convolution using symmetric quantization with 4 bit weights. + * + * $Date: 17 May 2024 + * $Revision: V.1.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Basic s8 convolution function with int4 weights. + * + * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels + * are multiples of 4 or at least greater than 4. + * + */ +arm_cmsis_nn_status arm_convolve_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *packed_filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)bias_dims; + + if (ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + int16_t *buffer_a = (int16_t *)ctx->buf; + + const int32_t input_batches = input_dims->n; + const uint16_t input_x = input_dims->w; + const uint16_t input_y = input_dims->h; + const uint16_t input_ch = input_dims->c; + const uint16_t kernel_x = filter_dims->w; + const uint16_t kernel_y = filter_dims->h; + const uint16_t output_x = output_dims->w; + const uint16_t output_y = output_dims->h; + const uint16_t output_ch = output_dims->c; + + const uint16_t pad_x = conv_params->padding.w; + const uint16_t pad_y = conv_params->padding.h; + const uint16_t stride_x = conv_params->stride.w; + const uint16_t stride_y = conv_params->stride.h; + const int32_t dilation_x = conv_params->dilation.w; + const int32_t dilation_y = conv_params->dilation.h; + const int32_t out_offset = conv_params->output_offset; + const int32_t out_activation_min = conv_params->activation.min; + const int32_t out_activation_max = conv_params->activation.max; + const int32_t rhs_cols = kernel_x * kernel_y * input_ch; + const int32_t input_offset = conv_params->input_offset; + + int32_t *output_mult = quant_params->multiplier; + int32_t *output_shift = quant_params->shift; + + int i_batch; + for (i_batch = 0; i_batch < input_batches; i_batch++) + { +#if defined(ARM_MATH_MVEI) + /* Generate up to four columns from the input tensor a GEMM computation */ + int8_t *im2col_buf = (int8_t *)buffer_a; + const int32_t rhs_rows = output_dims->c; + int8_t *out = output_data; + int32_t lhs_rows = 0; + + /* This part implements the im2col function */ + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int32_t base_idx_x = stride_x * i_out_x - pad_x; + const int32_t base_idx_y = stride_y * i_out_y - pad_y; + + for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) + { + for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t k_y = base_idx_y + dilation_y * i_ker_y; + const int32_t k_x = base_idx_x + dilation_x * i_ker_x; + + if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x) + { + arm_memset_s8(im2col_buf, (int8_t)-input_offset, sizeof(int8_t) * input_ch); + } + else + { + arm_memcpy_s8(im2col_buf, input_data + (k_y * input_x + k_x) * input_ch, input_ch); + } + im2col_buf += input_ch; + } + } + lhs_rows++; + + /* Computation is filed for every 4 columns */ + if (lhs_rows == 4) + { + arm_nn_mat_mult_nt_t_s4((int8_t *)buffer_a, + packed_filter_data, + bias_data, + out, + output_mult, + output_shift, + lhs_rows, + rhs_rows, + rhs_cols, + input_offset, + out_offset, + out_activation_min, + out_activation_max, + rhs_cols); + out += lhs_rows * rhs_rows; + + lhs_rows = 0; + im2col_buf = (int8_t *)buffer_a; + } + } + } + + /* Handle left over columns */ + if (lhs_rows != 0) + { + arm_nn_mat_mult_nt_t_s4((int8_t *)buffer_a, + packed_filter_data, + bias_data, + out, + output_mult, + output_shift, + lhs_rows, + rhs_rows, + rhs_cols, + input_offset, + out_offset, + out_activation_min, + out_activation_max, + rhs_cols); + out += lhs_rows * rhs_rows; + lhs_rows = 0; + im2col_buf = (int8_t *)buffer_a; + } +#else // #if defined(ARM_MATH_MVEI) + int16_t *two_column_buf = buffer_a; + int8_t *out = output_data; + int32_t lhs_rows = 0; + + /* This part implements the im2col function */ + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int32_t base_idx_x = stride_x * i_out_x - pad_x; + const int32_t base_idx_y = stride_y * i_out_y - pad_y; + + for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) + { + for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t k_y = base_idx_y + dilation_y * i_ker_y; + const int32_t k_x = base_idx_x + dilation_x * i_ker_x; + + if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x) + { + /* Filling 0 for out-of-bound paddings */ + memset(two_column_buf, 0, sizeof(int16_t) * input_ch); + } + else + { + /* Copying the pixel data to column */ + arm_q7_to_q15_with_offset( + input_data + (k_y * input_x + k_x) * input_ch, two_column_buf, input_ch, input_offset); + } + two_column_buf += input_ch; + } + } + lhs_rows++; + /* Computation is filed for every 2 columns */ + if (lhs_rows == 2) + { + out = arm_nn_mat_mult_kernel_s4_s16(packed_filter_data, + buffer_a, + output_ch, + output_shift, + output_mult, + out_offset, + out_activation_min, + out_activation_max, + rhs_cols, + bias_data, + out); + + /* counter reset */ + two_column_buf = buffer_a; + lhs_rows = 0; + } + } + + if (out == NULL) + { + return ARM_CMSIS_NN_NO_IMPL_ERROR; + } + } + + /* Handle left over columns */ + if (lhs_rows != 0) + { + const int8_t *ker_a_ptr = packed_filter_data; + int i; + int8_t spilled_ker_a = 0; + for (i = 0; i < output_ch; i++) + { + /* Load the accumulator with bias first */ + int32_t sum = 0; + if (bias_data) + { + sum = bias_data[i]; + } + + const int16_t *ip_as_col = buffer_a; + + if (rhs_cols % 2 && (i % 2)) + { + int16_t ip_b0 = *ip_as_col++; + sum += spilled_ker_a * ip_b0; + } + + #if defined(ARM_MATH_DSP) + /* 4 multiply and accumulates are done in one loop. */ + uint16_t col_count = rhs_cols / 4; + while (col_count) + { + int32_t ker_a1, ker_a2; + int32_t ip_b1, ip_b2; + + read_and_pad_s4_ordered(ker_a_ptr, &ker_a1, &ker_a2); + ker_a_ptr += 2; + ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = SMLAD(ker_a1, ip_b1, sum); + ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = SMLAD(ker_a2, ip_b2, sum); + + col_count--; + } + col_count = (rhs_cols & 0x3) >> 1; + #else + uint16_t col_count = rhs_cols >> 1; + #endif + + while (col_count) + { + int8_t ker_a0 = (int8_t)(*ker_a_ptr << 4) >> 4; + int8_t ker_a1 = *ker_a_ptr >> 4; + ker_a_ptr++; + + int16_t ip_b0 = *ip_as_col++; + sum += ker_a0 * ip_b0; + + ip_b0 = *ip_as_col++; + sum += ker_a1 * ip_b0; + + col_count--; + } + + if (rhs_cols % 2 && !(i % 2)) + { + int8_t ker_a0 = (int8_t)(*ker_a_ptr << 4) >> 4; + spilled_ker_a = *ker_a_ptr >> 4; + ker_a_ptr++; + int16_t ip_b0 = *ip_as_col; + + sum += ker_a0 * ip_b0; + } + + sum = arm_nn_requantize(sum, output_mult[i], output_shift[i]); + sum += out_offset; + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + *out++ = (int8_t)sum; + } + } +#endif + /* Advance to the next batch */ + input_data += (input_x * input_y * input_ch); + output_data += (output_x * output_y * output_ch); + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s8.c new file mode 100644 index 0000000..2a0f0f5 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_s8.c @@ -0,0 +1,399 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_s8.c + * Description: s8 version of convolution using symmetric quantization. + * + * $Date: 04 November 2024 + * $Revision: V.4.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Basic s8 convolution function. + * + * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels + * are multiples of 4 or atleast greater than 4. + * + */ +arm_cmsis_nn_status arm_convolve_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *upscale_dims, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)bias_dims; + + if (ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + int16_t *buffer_a = (int16_t *)ctx->buf; + + const int32_t input_batches = input_dims->n; + const uint16_t input_x = input_dims->w; + const uint16_t input_y = input_dims->h; + const uint16_t input_ch = input_dims->c; + const uint16_t kernel_x = filter_dims->w; + const uint16_t kernel_y = filter_dims->h; + const uint16_t kernel_ch = filter_dims->c; + const uint16_t output_x = output_dims->w; + const uint16_t output_y = output_dims->h; + const uint16_t output_ch = output_dims->c; + + const uint16_t pad_x = conv_params->padding.w; + const uint16_t pad_y = conv_params->padding.h; + const uint16_t stride_x = conv_params->stride.w; + const uint16_t stride_y = conv_params->stride.h; + const int32_t dilation_x = conv_params->dilation.w; + const int32_t dilation_y = conv_params->dilation.h; + const int32_t out_offset = conv_params->output_offset; + const int32_t out_activation_min = conv_params->activation.min; + const int32_t out_activation_max = conv_params->activation.max; + const int32_t input_offset = conv_params->input_offset; + + const int32_t groups = input_ch / kernel_ch; + const int32_t rhs_cols = kernel_x * kernel_y * kernel_ch; + const int32_t output_ch_per_group = output_ch / groups; + + const int32_t *output_mult = quant_params->multiplier; + const int32_t *output_shift = quant_params->shift; + + if (input_ch % groups != 0 || output_ch % groups != 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + // For upscale_dims == 2, the actual index of the input data is the index of the upscaled input divided by two. In + // the ordinary case, there is no difference. The division is implemented as a rshift for optimization purposes. + uint32_t y_rshift = 0; + uint32_t x_rshift = 0; + + if (upscale_dims) + { + y_rshift = upscale_dims->h == 2 ? 1 : 0; + x_rshift = upscale_dims->w == 2 ? 1 : 0; + } + + const int32_t input_x_rshifted = input_x >> x_rshift; + const int32_t input_y_rshifted = input_y >> y_rshift; + + const int32_t remainder = rhs_cols % 4; + const int32_t aligned_rhs_cols = remainder != 0 ? rhs_cols + 4 - remainder : rhs_cols; + + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + +#if defined(ARM_MATH_MVEI) + const int32_t aligned_rhs_cols_offset = aligned_rhs_cols - rhs_cols; + + /* Generate up to four columns from the input tensor a GEMM computation */ + int8_t *im2col_buf = (int8_t *)buffer_a; +#else + /* Use as a ping-pong buffer for unordered elements */ + int8_t *im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2; + int16_t *im2col_buf_start_s16 = buffer_a; +#endif + int32_t lhs_rows = 0; + + const int8_t *filter_data_ptr = &filter_data[0]; + const int32_t *bias_data_ptr = &bias_data[0]; + const int32_t *output_mult_ptr = &output_mult[0]; + const int32_t *output_shift_ptr = &output_shift[0]; + + /* This part implements the im2col function */ + for (int32_t i_group = 0; i_group < groups; i_group++) + { + int8_t *out = output_data + i_group * output_ch_per_group; + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int32_t base_idx_x = stride_x * i_out_x - pad_x; + const int32_t base_idx_y = stride_y * i_out_y - pad_y; + + if (y_rshift == 1 || x_rshift == 1) + { + // Fill complete buf with -input_offset + arm_memset_s8( + im2col_buf, (int8_t)-input_offset, sizeof(int8_t) * kernel_ch * kernel_x * kernel_y); + for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) + { + const int32_t k_y = base_idx_y + dilation_y * i_ker_y; + + // Don't copy data when padding, or for every second row if stride_y == 2 + if ((k_y < 0 || k_y >= input_y) || (k_y % 2 && y_rshift == 1)) + { + im2col_buf += kernel_ch * kernel_x; + } + else + { + const int32_t k_y_rshifted = k_y >> y_rshift; + for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t k_x = base_idx_x + dilation_x * i_ker_x; + + // Don't copy data when padding, or for every second element if stride_x == 2 + if ((k_x >= 0 && k_x < input_x) && ((k_x % 2 == 0) || x_rshift == 0)) + { + const int32_t k_x_rshifted = k_x >> x_rshift; + arm_memcpy_s8(im2col_buf, + input_data + + (k_y_rshifted * input_x_rshifted + k_x_rshifted) * input_ch, + sizeof(int8_t) * kernel_ch); + } + im2col_buf += kernel_ch; + } + } + } + } + else + { + for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) + { + for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t k_y = base_idx_y + dilation_y * i_ker_y; + const int32_t k_x = base_idx_x + dilation_x * i_ker_x; + + if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x) + { + arm_memset_s8(im2col_buf, (int8_t)-input_offset, sizeof(int8_t) * kernel_ch); + } + else + { + arm_memcpy_s8(im2col_buf, + input_data + (k_y * input_x + k_x) * input_ch + i_group * kernel_ch, + sizeof(int8_t) * kernel_ch); + } + im2col_buf += kernel_ch; + } + } + } + lhs_rows++; + +#if defined(ARM_MATH_MVEI) + im2col_buf += aligned_rhs_cols_offset; + + /* Computation is filed for every 4 columns */ + if (lhs_rows == 4) + { + arm_nn_mat_mult_nt_t_s8((int8_t *)buffer_a, + filter_data_ptr, + bias_data_ptr, + out, + output_mult_ptr, + output_shift_ptr, + lhs_rows, + output_ch_per_group, + rhs_cols, + input_offset, + out_offset, + out_activation_min, + out_activation_max, + output_ch, + aligned_rhs_cols); + + out += lhs_rows * output_ch; + + lhs_rows = 0; + im2col_buf = (int8_t *)buffer_a; + } +#else + #if defined(ARM_MATH_DSP) + /* Copy one column with input offset and no ordering */ + arm_s8_to_s16_unordered_with_offset( + im2col_buf - rhs_cols, im2col_buf_start_s16, rhs_cols, (int16_t)input_offset); + #else + + arm_q7_to_q15_with_offset( + im2col_buf - rhs_cols, im2col_buf_start_s16, rhs_cols, (int16_t)input_offset); + + #endif + im2col_buf_start_s16 += aligned_rhs_cols; + + if (lhs_rows == 2) + { + if (groups > 1) + { + out = arm_nn_mat_mult_kernel_row_offset_s8_s16(filter_data_ptr, + buffer_a, + output_ch_per_group, + output_shift_ptr, + output_mult_ptr, + out_offset, + out_activation_min, + out_activation_max, + rhs_cols, + aligned_rhs_cols, + bias_data_ptr, + output_ch, + out); + } + else + { + out = arm_nn_mat_mult_kernel_s8_s16(filter_data_ptr, + buffer_a, + output_ch_per_group, + output_shift_ptr, + output_mult_ptr, + out_offset, + out_activation_min, + out_activation_max, + rhs_cols, + aligned_rhs_cols, + bias_data_ptr, + out); + } + + /* counter reset */ + im2col_buf_start_s16 = buffer_a; + im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2; + lhs_rows = 0; + } +#endif + } + } + + if (out == NULL) + { + return ARM_CMSIS_NN_NO_IMPL_ERROR; + } + + /* Handle left over columns */ + if (lhs_rows != 0) + { +#if defined(ARM_MATH_MVEI) + arm_nn_mat_mult_nt_t_s8((int8_t *)buffer_a, + filter_data_ptr, + bias_data_ptr, + out, + output_mult_ptr, + output_shift_ptr, + lhs_rows, + output_ch_per_group, + rhs_cols, + input_offset, + out_offset, + out_activation_min, + out_activation_max, + output_ch, + aligned_rhs_cols); + + out += lhs_rows * output_ch; + lhs_rows = 0; + im2col_buf = (int8_t *)buffer_a; +#else // #if defined(ARM_MATH_MVEI) + + const int8_t *ker_a = filter_data_ptr; + int i; + + for (i = 0; i < output_ch_per_group; i++) + { + /* Load the accumulator with bias first */ + int32_t sum = 0; + if (bias_data_ptr) + { + sum = bias_data_ptr[i]; + } + + const int16_t *ip_as_col = buffer_a; + + #if defined(ARM_MATH_DSP) + /* 4 multiply and accumulates are done in one loop. */ + uint16_t col_count = rhs_cols / 4; + while (col_count) + { + int32_t ker_a1, ker_a2; + int32_t ip_b1, ip_b2; + + ker_a = read_and_pad_reordered(ker_a, &ker_a1, &ker_a2); + + ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = SMLAD(ker_a1, ip_b1, sum); + ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = SMLAD(ker_a2, ip_b2, sum); + + col_count--; + } + /* Handle left over mac */ + col_count = rhs_cols & 0x3; + #else + uint16_t col_count = rhs_cols; + + #endif + while (col_count) + { + int8_t ker_a1 = *ker_a++; + int16_t ip_b1 = *ip_as_col++; + + sum += ker_a1 * ip_b1; + col_count--; + } + + sum = arm_nn_requantize(sum, output_mult_ptr[i], output_shift_ptr[i]); + sum += out_offset; + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + *out++ = (int8_t)sum; + } + + im2col_buf_start_s16 = buffer_a; + im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2; + lhs_rows = 0; +#endif // #if defined(ARM_MATH_MVEI) + } + filter_data_ptr += output_ch_per_group * rhs_cols; + bias_data_ptr += output_ch_per_group; + output_mult_ptr += output_ch_per_group; + output_shift_ptr += output_ch_per_group; + } + /* Advance to the next batch */ + input_data += (input_x_rshifted * input_y_rshifted * input_ch); + output_data += (output_x * output_y * output_ch); + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s16.c new file mode 100644 index 0000000..6625ee9 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s16.c @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright 2021-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_wrapper_s16.c + * Description: s16 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. + * + * $Date: 23 April 2024 + * $Revision: V.3.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Convolution layer + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_convolve_wrapper_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const cmsis_nn_bias_data *bias_data, + const cmsis_nn_dims *output_dims, + int16_t *output_data) +{ + return arm_convolve_s16(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s4.c new file mode 100644 index 0000000..ad3980c --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s4.c @@ -0,0 +1,142 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_wrapper_s4.c + * Description: s4 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. See header files for details. + * + * $Date: 27 May 2024 + * $Revision: V.1.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Convolution layer + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_convolve_wrapper_s4(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (filter_dims->w == 1) && + (filter_dims->h == 1) && (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) + { + if ((conv_params->stride.w == 1) && (conv_params->stride.h == 1)) + { + return arm_convolve_1x1_s4_fast(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } + else + { + return arm_convolve_1x1_s4(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } + } + else if ((input_dims->h == 1) && conv_params->dilation.w == 1 && (filter_dims->h == 1) && + ((conv_params->stride.w * input_dims->c) % 4 == 0) && (input_dims->c == filter_dims->c)) + { + return arm_convolve_1_x_n_s4(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } +#if defined(ARM_MATH_MVEI) + else if (((filter_dims->h * filter_dims->w * input_dims->c) & 0x1) == 0) + { + return arm_convolve_even_s4(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } +#endif + else + { + return arm_convolve_s4(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } +} +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c new file mode 100644 index 0000000..097a41e --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c @@ -0,0 +1,129 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_wrapper_s8.c + * Description: s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. + * + * $Date: 04 November 2024 + * $Revision: V.2.5.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Convolution layer + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_convolve_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (filter_dims->w == 1) && + (filter_dims->h == 1) && (conv_params->dilation.w == 1 && conv_params->dilation.h == 1) && + (input_dims->c == filter_dims->c)) + { + if ((conv_params->stride.w == 1) && (conv_params->stride.h == 1)) + { + return arm_convolve_1x1_s8_fast(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } + else + { + return arm_convolve_1x1_s8(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } + } + else if ((input_dims->h == 1) && conv_params->dilation.w == 1 && (filter_dims->h == 1) && + ((conv_params->stride.w * input_dims->c) % 4 == 0) && (input_dims->c == filter_dims->c)) + { + return arm_convolve_1_x_n_s8(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } + else + { + return arm_convolve_s8(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + NULL, + output_dims, + output_data); + } +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c new file mode 100644 index 0000000..3d8e6c2 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c @@ -0,0 +1,282 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_3x3_s8.c + * Description: Optimized s8 depthwise convolution function for channel + * multiplier of 1 and 3x3 kernel size. + * + * $Date: 5 January 2023 + * $Revision: V.3.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Optimized s8 depthwise convolution function with constraint that + * in_channel == out_channel and kernel_x == kernel_y == 3 with pads at most 1 + * + * Refer prototype header file for details. + * + */ + +arm_cmsis_nn_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + (void)ctx; + (void)bias_dims; + + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + const int32_t input_ch = input_dims->c; + const int32_t output_ch = output_dims->c; + const int32_t pad_x = dw_conv_params->padding.w; + const int32_t pad_y = dw_conv_params->padding.h; + const int32_t stride_x = dw_conv_params->stride.w; + const int32_t stride_y = dw_conv_params->stride.h; + const int32_t *output_shift = quant_params->shift; + const int32_t *output_mult = quant_params->multiplier; + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_offset = dw_conv_params->output_offset; + const int32_t input_offset = dw_conv_params->input_offset; + const int32_t output_activation_min = dw_conv_params->activation.min; + const int32_t output_activation_max = dw_conv_params->activation.max; + + /* Check input constraints input_ch == output_ch */ + if (input_ch != output_ch) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + /* Check input constraints pad_x <= 1 */ + if (pad_x > 1 || filter_dims->w != 3 || filter_dims->h != 3) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + const int32_t *bias_base = bias; + for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) + { + for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) + { + int32_t in_ch = 0; + int32_t ker_w_start = MAX(0, -in_w); + + bias = bias_base; + for (; in_ch <= (input_ch - 4); in_ch += 4) + { + int32_t out_buff0 = 0; + int32_t out_buff1 = 0; + int32_t out_buff2 = 0; + int32_t out_buff3 = 0; + if (bias) + { + out_buff0 = *bias++; + out_buff1 = *bias++; + out_buff2 = *bias++; + out_buff3 = *bias++; + } + + const int8_t *input_ptr = input + (in_h + ker_h_start) * (input_ch * input_x) + in_w * input_ch + in_ch; + const int8_t *kernel_ptr = kernel + ker_h_start * (input_ch * 3) + in_ch; +#if defined(ARM_MATH_DSP) + const uint32_t lhs_offset_s16x2 = PKHBT(input_offset, input_offset, 16); + + for (int32_t ker_h = ker_h_start; ker_h < MIN(3, input_y - in_h); ++ker_h) + { + int32_t in_val = 0; + int32_t ker_val = 0; + int32_t in_val_1 = 0; + int32_t ker_val_1 = 0; + + if (ker_w_start == 0) + { + in_val = arm_nn_read_s8x4(input_ptr); + ker_val = arm_nn_read_s8x4(kernel_ptr); + + in_val_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)in_val, 8); + ker_val_1 = SXTB16_RORn((uint32_t)ker_val, 8); + + out_buff1 = SMLABB(in_val_1, ker_val_1, out_buff1); + in_val = SXTAB16(lhs_offset_s16x2, (uint32_t)in_val); + out_buff3 = SMLATT(in_val_1, ker_val_1, out_buff3); + ker_val = SXTB16((uint32_t)ker_val); + out_buff0 = SMLABB(in_val, ker_val, out_buff0); + out_buff2 = SMLATT(in_val, ker_val, out_buff2); + } + + in_val = arm_nn_read_s8x4(input_ptr + input_ch); + ker_val = arm_nn_read_s8x4(kernel_ptr + input_ch); + in_val_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)in_val, 8); + ker_val_1 = SXTB16_RORn((uint32_t)ker_val, 8); + + out_buff1 = SMLABB(in_val_1, ker_val_1, out_buff1); + in_val = SXTAB16(lhs_offset_s16x2, (uint32_t)in_val); + out_buff3 = SMLATT(in_val_1, ker_val_1, out_buff3); + ker_val = SXTB16((uint32_t)ker_val); + out_buff0 = SMLABB(in_val, ker_val, out_buff0); + out_buff2 = SMLATT(in_val, ker_val, out_buff2); + + if ((input_x - in_w) >= 3) + { + in_val = arm_nn_read_s8x4(input_ptr + (input_ch << 1)); + ker_val = arm_nn_read_s8x4(kernel_ptr + (input_ch << 1)); + in_val_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)in_val, 8); + ker_val_1 = SXTB16_RORn((uint32_t)ker_val, 8); + + out_buff1 = SMLABB(in_val_1, ker_val_1, out_buff1); + in_val = SXTAB16(lhs_offset_s16x2, (uint32_t)in_val); + out_buff3 = SMLATT(in_val_1, ker_val_1, out_buff3); + ker_val = SXTB16((uint32_t)ker_val); + out_buff0 = SMLABB(in_val, ker_val, out_buff0); + out_buff2 = SMLATT(in_val, ker_val, out_buff2); + } + + input_ptr += (input_ch * input_x); + kernel_ptr += (input_ch * 3); + } + +#else + + for (int32_t ker_h = ker_h_start; ker_h < MIN(3, input_y - in_h); ++ker_h) + { + int32_t in_val = 0; + int32_t ker_val = 0; + + if (ker_w_start == 0) + { + in_val = arm_nn_read_s8x4(input_ptr); + ker_val = arm_nn_read_s8x4(kernel_ptr); + out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; + out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); + out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); + out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); + } + + in_val = arm_nn_read_s8x4(input_ptr + input_ch); + ker_val = arm_nn_read_s8x4(kernel_ptr + input_ch); + + out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; + out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); + out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); + out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); + + if ((input_x - in_w) >= 3) + { + in_val = arm_nn_read_s8x4(input_ptr + (input_ch << 1)); + ker_val = arm_nn_read_s8x4(kernel_ptr + (input_ch << 1)); + + out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; + out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); + out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); + out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); + } + + input_ptr += (input_ch * input_x); + kernel_ptr += (input_ch * 3); + } +#endif + + out_buff0 = arm_nn_requantize(out_buff0, output_mult[in_ch + 0], output_shift[in_ch + 0]); + out_buff1 = arm_nn_requantize(out_buff1, output_mult[in_ch + 1], output_shift[in_ch + 1]); + out_buff2 = arm_nn_requantize(out_buff2, output_mult[in_ch + 2], output_shift[in_ch + 2]); + out_buff3 = arm_nn_requantize(out_buff3, output_mult[in_ch + 3], output_shift[in_ch + 3]); + + out_buff0 += output_offset; + out_buff1 += output_offset; + out_buff2 += output_offset; + out_buff3 += output_offset; + + out_buff0 = MIN(MAX(out_buff0, output_activation_min), output_activation_max); + out_buff1 = MIN(MAX(out_buff1, output_activation_min), output_activation_max); + out_buff2 = MIN(MAX(out_buff2, output_activation_min), output_activation_max); + out_buff3 = MIN(MAX(out_buff3, output_activation_min), output_activation_max); + + output[out_idx++] = (int8_t)out_buff0; + output[out_idx++] = (int8_t)out_buff1; + output[out_idx++] = (int8_t)out_buff2; + output[out_idx++] = (int8_t)out_buff3; + } + + // Leftover + for (; in_ch < input_ch; ++in_ch) + { + int32_t out_buff = 0; + if (bias) + { + out_buff = *bias++; + } + + const int8_t *input_ptr = input + (in_h + ker_h_start) * (input_ch * input_x) + in_w * input_ch + in_ch; + const int8_t *kernel_ptr = kernel + ker_h_start * (input_ch * 3) + in_ch; + + for (int32_t ker_h = ker_h_start; ker_h < MIN(3, input_y - in_h); ++ker_h) + { + if (ker_w_start == 0) + { + out_buff += (*(input_ptr) + input_offset) * *(kernel_ptr); + } + + out_buff += (*(input_ptr + input_ch) + input_offset) * *(kernel_ptr + input_ch); + + if ((input_x - in_w) >= 3) + { + out_buff += (*(input_ptr + (input_ch << 1)) + input_offset) * *(kernel_ptr + (input_ch << 1)); + } + + input_ptr += (input_ch * input_x); + kernel_ptr += (input_ch * 3); + } + + out_buff = arm_nn_requantize(out_buff, output_mult[in_ch], output_shift[in_ch]); + out_buff += output_offset; + out_buff = MIN(MAX(out_buff, output_activation_min), output_activation_max); + output[out_idx++] = (int8_t)out_buff; + } + } + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_fast_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_fast_s16.c new file mode 100644 index 0000000..93f6bbe --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_fast_s16.c @@ -0,0 +1,451 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_fast_s16.c + * Description: Optimized s16 depthwise separable convolution function for + * channel multiplier of 1. + * + * $Date: 19 March 2024 + * $Revision: V.1.4.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Optimized s16 depthwise convolution function with constraint that in_channel equals out_channel + * + * Refer prototype header file for details. + * + */ + +arm_cmsis_nn_status arm_depthwise_conv_fast_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int64_t *bias, + const cmsis_nn_dims *output_dims, + int16_t *output) +{ + const int32_t input_ch = input_dims->c; + const int32_t output_ch = output_dims->c; + + /* Check input constraints input_ch == output_ch */ + if (input_ch != output_ch) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (filter_dims->w * filter_dims->h >= MAX_COL_COUNT) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (ctx->buf == NULL && arm_depthwise_conv_fast_s16_get_buffer_size(input_dims, filter_dims) > 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + +#if defined(ARM_MATH_DSP) + (void)bias_dims; + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + const int32_t input_batches = input_dims->n; + const int32_t kernel_x = filter_dims->w; + const int32_t kernel_y = filter_dims->h; + const int32_t pad_x = dw_conv_params->padding.w; + const int32_t pad_y = dw_conv_params->padding.h; + const int32_t stride_x = dw_conv_params->stride.w; + const int32_t stride_y = dw_conv_params->stride.h; + const int32_t *output_shift = quant_params->shift; + const int32_t *output_mult = quant_params->multiplier; + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_activation_min = dw_conv_params->activation.min; + const int32_t output_activation_max = dw_conv_params->activation.max; + int16_t *buffer_a = (int16_t *)ctx->buf; + + #if defined(ARM_MATH_MVEI) + int16_t *lhs_buffer = buffer_a; + int16_t *out = output; + int buffer_count = 0; + const int32_t kernel_size = kernel_x * kernel_y; + + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + /* This part implements the im2col function */ + for (int i_out_y = 0, base_idx_y = -pad_y; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) + { + for (int i_out_x = 0, base_idx_x = -pad_x; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) + { + for (int i_ker_y = base_idx_y; i_ker_y < base_idx_y + kernel_y; i_ker_y++) + { + for (int i_ker_x = base_idx_x; i_ker_x < base_idx_x + kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + memset(lhs_buffer, (int16_t)0, (uint32_t)(input_ch * sizeof(int16_t))); + } + else + { + arm_memcpy_q15(lhs_buffer, + (int16_t *)(input + (i_ker_y * input_x + i_ker_x) * input_ch), + (uint32_t)(input_ch * sizeof(int16_t))); + } + lhs_buffer += input_ch; + } + } + buffer_count++; + if (buffer_count == 4) + { + lhs_buffer = buffer_a; + + out = arm_nn_depthwise_conv_nt_t_s16(lhs_buffer, + kernel, + input_ch, + output_shift, + output_mult, + output_activation_min, + output_activation_max, + kernel_size, + bias, + out); + buffer_count = 0; + } + } + } + input += input_x * input_y * input_ch; + } + + /* Handle left over buffers */ + lhs_buffer = buffer_a; + for (int i_buf = 0; i_buf < buffer_count; i_buf++) + { + int32_t loop_count = (input_ch + 3) / 4; + int32_t num_ch_to_process = input_ch; + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; num_ch_to_process -= 4, offset += 4, i_loop_cnt++) + { + const int8_t *row_0 = kernel + offset; + const int16_t *col_0 = lhs_buffer + (kernel_size * input_ch * i_buf) + offset; + + int32x4_t out_0 = vdupq_n_s32(0); + + for (int i_ker = 0; i_ker < kernel_size; i_ker++) + { + const int32x4_t ker_0 = vldrbq_s32(row_0); + + int32x4_t ip_0 = vldrhq_s32(col_0); + out_0 += vmulq_s32(ip_0, ker_0); + + col_0 += input_ch; + row_0 += input_ch; + } + + int64_t in_requantize_0 = (int64_t)out_0[0]; + int64_t in_requantize_1 = (int64_t)out_0[1]; + int64_t in_requantize_2 = (int64_t)out_0[2]; + int64_t in_requantize_3 = (int64_t)out_0[3]; + + if (bias) + { + in_requantize_0 += bias[offset]; + in_requantize_1 += bias[offset + 1]; + in_requantize_2 += bias[offset + 2]; + in_requantize_3 += bias[offset + 3]; + } + + int32_t reduced_multiplier_0 = REDUCE_MULTIPLIER(output_mult[offset]); + int32_t reduced_multiplier_1 = REDUCE_MULTIPLIER(output_mult[offset + 1]); + int32_t reduced_multiplier_2 = REDUCE_MULTIPLIER(output_mult[offset + 2]); + int32_t reduced_multiplier_3 = REDUCE_MULTIPLIER(output_mult[offset + 3]); + + out_0[0] = arm_nn_requantize_s64(in_requantize_0, reduced_multiplier_0, output_shift[offset]); + out_0[1] = arm_nn_requantize_s64(in_requantize_1, reduced_multiplier_1, output_shift[offset + 1]); + out_0[2] = arm_nn_requantize_s64(in_requantize_2, reduced_multiplier_2, output_shift[offset + 2]); + out_0[3] = arm_nn_requantize_s64(in_requantize_3, reduced_multiplier_3, output_shift[offset + 3]); + + out_0 = vmaxq_s32(out_0, vdupq_n_s32(output_activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(output_activation_max)); + + mve_pred16_t p = vctp32q((uint32_t)num_ch_to_process); + vstrhq_p_s32(out, out_0, p); + + out += 4; + } + + const int tail_ch = input_ch & 0x3; + if (tail_ch != 0) + { + out -= (4 - tail_ch); + } + } + + #else // ARM_MATH_DSP + + /* Run the following code in cores using DSP extension */ + int16_t *const col_buffer_start = buffer_a; + int16_t *col_buffer = col_buffer_start; + const int64_t *const bias_start_pos = bias; + const int32_t *const out_mult_start_pos = output_mult; + const int32_t *const out_shift_start_pos = output_shift; + uint16_t row_count; + uint16_t row_shift; + int32_t result; + + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + + /* Out of bounds is only considered for the y axis as it provides a contiguous zero'ing opportunity than + along the x axis */ + const int ker_y_start = MAX(0, -base_idx_y); + /* Condition for kernel end dimension: (base_idx_y + ker_y_end) < input_y */ + const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); + + int32_t index = 0; + if (ker_y_start != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * ker_y_start * sizeof(int16_t)); + index += (kernel_x * input_ch) * ker_y_start; + } + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + i_ker_y; + + for (int i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t idx_x = base_idx_x + i_ker_x; + + if (idx_x < 0 || idx_x >= input_x) + { + memset(&col_buffer[index], 0, input_ch * sizeof(int16_t)); + } + else + { + arm_memcpy_q15(&col_buffer[index], + input + (idx_y * input_x + idx_x) * input_ch, + input_ch * sizeof(int16_t)); + } + index += input_ch; + } + } + + const int diff = kernel_y - ker_y_end; + if (diff != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * diff * sizeof(int16_t)); + } + + row_count = output_ch / 4; + row_shift = 0; + bias = bias_start_pos; + output_mult = out_mult_start_pos; + output_shift = out_shift_start_pos; + + while (row_count) + { + int32_t sum_1 = 0; + int32_t sum_2 = 0; + int32_t sum_3 = 0; + int32_t sum_4 = 0; + + int32_t output_mult_1 = REDUCE_MULTIPLIER(output_mult[0]); + int32_t output_mult_2 = REDUCE_MULTIPLIER(output_mult[1]); + int32_t output_mult_3 = REDUCE_MULTIPLIER(output_mult[2]); + int32_t output_mult_4 = REDUCE_MULTIPLIER(output_mult[3]); + output_mult += 4; + + uint16_t col_count = (kernel_x * kernel_y) / 2; + int16_t *col_pos = col_buffer_start + row_shift; + const int8_t *row_pos = kernel + row_shift; + row_shift += 4; + + while (col_count) + { + /* General idea is to read 4 + 4 (input, kernel) pair and re-arrange them in the right order to + use in a SMLAD instruction . One run of this loop produces 4 partial outputs with 8 MACs. */ + int32_t row_a1, row_a2, row_b1, row_b2, col_a, row_c, col_b, col_c; + + /* Read 4 weights */ + row_b1 = arm_nn_read_s8x4(row_pos); + row_a1 = arm_nn_read_s8x4(row_pos + input_ch); + col_a = arm_nn_read_s16x2(col_pos); + col_b = arm_nn_read_s16x2(col_pos + input_ch); + + row_a2 = SXTB16(row_b1); + row_b1 = SXTB16(ROR(row_b1, 8)); + + row_b2 = SXTB16(row_a1); + row_a1 = SXTB16(ROR(row_a1, 8)); + + col_c = PKHBT(col_b, col_a, 16); + col_a = PKHTB(col_b, col_a, 16); + row_c = PKHBT(row_b2, row_a2, 16); + sum_1 = SMLAD(col_c, row_c, sum_1); + + row_c = PKHBT(row_b1, row_a1, 16); + sum_2 = SMLAD(col_a, row_c, sum_2); + + col_a = arm_nn_read_s16x2(col_pos + 2); + col_b = arm_nn_read_s16x2(col_pos + input_ch + 2); + + col_c = PKHBT(col_b, col_a, 16); + col_a = PKHTB(col_b, col_a, 16); + row_c = PKHTB(row_a2, row_b2, 16); + sum_3 = SMLAD(col_c, row_c, sum_3); + + row_c = PKHTB(row_a1, row_b1, 16); + sum_4 = SMLAD(col_a, row_c, sum_4); + + row_pos += input_ch << 1; + col_pos += input_ch << 1; + col_count--; + } + + col_count = (kernel_x * kernel_y) & 0x1; + while (col_count) + { + sum_1 += row_pos[0] * col_pos[0]; + sum_2 += row_pos[1] * col_pos[1]; + sum_3 += row_pos[2] * col_pos[2]; + sum_4 += row_pos[3] * col_pos[3]; + + row_pos += input_ch; + col_pos += input_ch; + + col_count--; + } + + int64_t acc_1 = sum_1; + int64_t acc_2 = sum_2; + int64_t acc_3 = sum_3; + int64_t acc_4 = sum_4; + + if (bias) + { + acc_1 += *bias++; + acc_2 += *bias++; + acc_3 += *bias++; + acc_4 += *bias++; + } + + result = arm_nn_requantize_s64(acc_1, output_mult_1, *output_shift++); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (int16_t)result; + + result = arm_nn_requantize_s64(acc_2, output_mult_2, *output_shift++); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (int16_t)result; + + result = arm_nn_requantize_s64(acc_3, output_mult_3, *output_shift++); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (int16_t)result; + + result = arm_nn_requantize_s64(acc_4, output_mult_4, *output_shift++); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (int16_t)result; + + row_count--; + } + + row_count = output_ch & 0x3; + while (row_count) + { + int16_t *col_pos = col_buffer_start + row_shift; + const int8_t *row_pos = kernel + row_shift; + int32_t sum = 0; + const uint16_t col_count = (kernel_x * kernel_y); + row_shift += 1; + + for (int i = 0; i < col_count; i++) + { + sum += row_pos[i * input_ch] * col_pos[i * input_ch]; + } + int64_t acc = sum; + if (bias) + { + acc += *bias++; + } + result = arm_nn_requantize_s64(acc, REDUCE_MULTIPLIER(*output_mult), *output_shift++); + output_mult++; + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (int16_t)result; + + row_count--; + } + // clear counter and pointers + col_buffer = col_buffer_start; + } + } + + /* Advance to the next batch */ + input += (input_x * input_y * input_ch); + } + #endif +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + return arm_depthwise_conv_s16(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + kernel, + bias_dims, + bias, + output_dims, + output); +#endif /* ARM_MATH_MVEI | ARM_MATH_DSP */ + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s16.c new file mode 100644 index 0000000..fb0b8e1 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s16.c @@ -0,0 +1,123 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_get_buffer_sizes_s16.c + * Description: Collection of get buffer size functions for the various s16 convolution layer functions. + * + * $Date: 13 January 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup NNconv + */ + +/** + * @addtogroup GetBufferSizeNNConv + * @{ + */ +__STATIC_INLINE int32_t arm_depthwise_conv_fast_s16_get_buffer_size_mve(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims) +{ + /* The + 8 accounts for a worst case out of bounds read of the lhs buffers in the *_nt_t_* function. */ + return 4 * input_dims->c * filter_dims->w * filter_dims->h * sizeof(int16_t) + 8; +} + +__STATIC_INLINE int32_t arm_depthwise_conv_fast_s16_get_buffer_size_dsp(const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims) +{ + return input_dims->c * filter_dims->w * filter_dims->h * sizeof(int16_t); +} + +int32_t arm_depthwise_conv_fast_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ +#if defined(ARM_MATH_DSP) + #if defined(ARM_MATH_MVEI) + return arm_depthwise_conv_fast_s16_get_buffer_size_mve(input_dims, filter_dims); + #else // ARM_MATH_DSP + return arm_depthwise_conv_fast_s16_get_buffer_size_dsp(input_dims, filter_dims); + #endif +#else + (void)input_dims; + (void)filter_dims; + return 0; +#endif +} + +int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)output_dims; + + int32_t size = 0; + + if (USE_FAST_DW_CONV_S16_FUNCTION(dw_conv_params, filter_dims, input_dims)) + { + size = arm_depthwise_conv_fast_s16_get_buffer_size(input_dims, filter_dims); + } + + return size; +} + +int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size_mve(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)output_dims; + + int32_t size = 0; + + if (USE_FAST_DW_CONV_S16_FUNCTION(dw_conv_params, filter_dims, input_dims)) + { + size = arm_depthwise_conv_fast_s16_get_buffer_size_mve(input_dims, filter_dims); + } + + return size; +} + +int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size_dsp(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)output_dims; + + int32_t size = 0; + + if (USE_FAST_DW_CONV_S16_FUNCTION(dw_conv_params, filter_dims, input_dims)) + { + size = arm_depthwise_conv_fast_s16_get_buffer_size_dsp(input_dims, filter_dims); + } + + return size; +} + +/** + * @} end of GetBufferSizeNNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s4.c new file mode 100644 index 0000000..141e6bc --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s4.c @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_get_buffer_sizes_s4.c + * Description: Collection of get buffer size functions for the various s4 depthwise convolution layer functions. + * + * $Date: 17 April 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup NNConv + */ + +/** + * @addtogroup GetBufferSizeNNConv + * @{ + */ + +int32_t arm_depthwise_conv_s4_opt_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ +#if defined(ARM_MATH_MVEI) + return arm_depthwise_conv_s8_opt_get_buffer_size_mve(input_dims, filter_dims); +#else + return arm_depthwise_conv_s8_opt_get_buffer_size_dsp(input_dims, filter_dims); +#endif +} + +int32_t arm_depthwise_conv_wrapper_s4_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + int32_t size = 0; + + if (input_dims->c == output_dims->c && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) + { + size = arm_depthwise_conv_s4_opt_get_buffer_size(input_dims, filter_dims); + } + + return size; +} + +int32_t arm_depthwise_conv_wrapper_s4_get_buffer_size_dsp(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + return arm_depthwise_conv_wrapper_s4_get_buffer_size(dw_conv_params, input_dims, filter_dims, output_dims); +} + +int32_t arm_depthwise_conv_wrapper_s4_get_buffer_size_mve(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + int32_t size = 0; + + if (input_dims->c == output_dims->c && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) + { + size = arm_depthwise_conv_s8_opt_get_buffer_size_mve(input_dims, filter_dims); + } + + return size; +} + +/** + * @} end of GetBufferSizeNNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s8.c new file mode 100644 index 0000000..70d6eb5 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_get_buffer_sizes_s8.c @@ -0,0 +1,170 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_get_buffer_sizes_s8.c + * Description: Collection of get buffer size functions for the various s8 convolution layer functions. + * + * $Date: 1 November 2024 + * $Revision: V.1.3.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup NNConv + */ + +/** + * @addtogroup GetBufferSizeNNConv + * @{ + */ + +__STATIC_INLINE int32_t +arm_deptwise_conv_s8_one_in_ch_get_buffer_size_mve(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + const cmsis_nn_dims filter_conv_dims = {filter_dims->c, filter_dims->h, filter_dims->w, filter_dims->n}; + const cmsis_nn_conv_params conv_params = {dw_conv_params->input_offset, + dw_conv_params->output_offset, + dw_conv_params->stride, + dw_conv_params->padding, + dw_conv_params->dilation, + dw_conv_params->activation}; + + int32_t size = + arm_convolve_wrapper_s8_get_buffer_size_mve(&conv_params, input_dims, &filter_conv_dims, output_dims); + size += filter_dims->c * filter_dims->h * filter_dims->w * filter_dims->n; + + return size; +} + +int32_t arm_depthwise_conv_s8_opt_get_buffer_size_mve(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ + (void)input_dims; + return (4 * CH_IN_BLOCK_MVE * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int8_t); +} + +int32_t arm_depthwise_conv_s8_opt_get_buffer_size_dsp(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ + return (input_dims->c * filter_dims->w * filter_dims->h) * sizeof(int16_t); +} + +int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ +#if defined(ARM_MATH_MVEI) + return arm_depthwise_conv_s8_opt_get_buffer_size_mve(input_dims, filter_dims); +#elif defined(ARM_MATH_DSP) + return arm_depthwise_conv_s8_opt_get_buffer_size_dsp(input_dims, filter_dims); +#else + (void)input_dims; + (void)filter_dims; + return 0; +#endif +} + +int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + int32_t size = 0; + +#if defined(ARM_MATH_MVEI) + if (input_dims->c == 1 && output_dims->c > CONVERT_DW_CONV_WITH_ONE_INPUT_CH_AND_OUTPUT_CH_ABOVE_THRESHOLD) + { + return arm_deptwise_conv_s8_one_in_ch_get_buffer_size_mve(dw_conv_params, input_dims, filter_dims, output_dims); + } +#endif + + if (input_dims->c == output_dims->c && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) + { +#if !defined(ARM_MATH_MVEI) + if (filter_dims->w == 3 && filter_dims->h == 3 && dw_conv_params->padding.h <= 1 && + dw_conv_params->padding.w <= 1) + { + return size; + } +#endif + size = arm_depthwise_conv_s8_opt_get_buffer_size(input_dims, filter_dims); + } + + return size; +} + +int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size_dsp(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + int32_t size = 0; + + if (input_dims->c == output_dims->c && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) + { + if (filter_dims->w == 3 && filter_dims->h == 3 && dw_conv_params->padding.h <= 1 && + dw_conv_params->padding.w <= 1) + { + return size; + } + size = arm_depthwise_conv_s8_opt_get_buffer_size_dsp(input_dims, filter_dims); + } + + return size; +} + +int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size_mve(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + int32_t size = 0; + + if (input_dims->c == output_dims->c && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) + { + size = arm_depthwise_conv_s8_opt_get_buffer_size_mve(input_dims, filter_dims); + } + + if (input_dims->c == 1 && output_dims->c > CONVERT_DW_CONV_WITH_ONE_INPUT_CH_AND_OUTPUT_CH_ABOVE_THRESHOLD) + { + const int32_t to_conv_size = + arm_deptwise_conv_s8_one_in_ch_get_buffer_size_mve(dw_conv_params, input_dims, filter_dims, output_dims); + + /* Special case since this is compiler dependent. + Note it is recommended to use arm_depthwise_conv_wrapper_s8_get_buffer_size() instead. */ + if (to_conv_size > size) + { + return to_conv_size; + } + } + + return size; +} + +/** + * @} end of GetBufferSizeNNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s16.c new file mode 100644 index 0000000..6587b87 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s16.c @@ -0,0 +1,292 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_s16.c + * Description: s16 version of depthwise convolution. + * + * $Date: 26 October 2022 + * $Revision: V.2.0.1 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +static void __attribute__((unused)) depthwise_conv_s16_mult_4_s16(const int16_t *input, + const int32_t input_x, + const int32_t input_y, + const int32_t input_ch, + const int8_t *kernel, + const int32_t output_ch, + const int32_t ch_mult, + const int32_t kernel_x, + const int32_t kernel_y, + const int32_t pad_x, + const int32_t pad_y, + const int32_t stride_x, + const int32_t stride_y, + const int64_t *bias, + int16_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t output_x, + const int32_t output_y, + const int32_t output_activation_min, + const int32_t output_activation_max) +{ + for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) + { + for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) + { + for (int32_t in_ch = 0, out_ch = 0, ker_w_start = MAX(0, -in_w); out_ch < output_ch; + ++in_ch, out_ch += ch_mult) + { + for (int mult_tile = 0; mult_tile < ch_mult; mult_tile += 4) + { + int32_t out_buff32[4] = {REDUCE_MULTIPLIER(output_mult[out_ch + 0 + mult_tile]), + REDUCE_MULTIPLIER(output_mult[out_ch + 1 + mult_tile]), + REDUCE_MULTIPLIER(output_mult[out_ch + 2 + mult_tile]), + REDUCE_MULTIPLIER(output_mult[out_ch + 3 + mult_tile])}; + + int64_t out_buff[4] = {0, 0, 0, 0}; + + if (bias) + { + out_buff[0] = bias[out_ch + 0 + mult_tile]; + out_buff[1] = bias[out_ch + 1 + mult_tile]; + out_buff[2] = bias[out_ch + 2 + mult_tile]; + out_buff[3] = bias[out_ch + 3 + mult_tile]; + } + + for (int32_t ker_h = ker_h_start; ker_h < MIN(kernel_y, input_y - in_h); ++ker_h) + { + int32_t ker_idx = ker_h * (output_ch * kernel_x) + ker_w_start * output_ch + out_ch; + int32_t in_idx = (in_h + ker_h) * (input_ch * input_x) + in_w * input_ch + in_ch; +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) +#pragma clang loop unroll(disable) +#endif + for (int32_t ker_w = ker_w_start; ker_w < MIN(kernel_x, input_x - in_w); + ++ker_w, ker_idx += output_ch) + { + // TODO: Unroll of 4 with 64 bit accumulator will probably result in too much register + // spills. Try with unroll of 2 when enabling this. + int32_t in_val = input[in_idx + ker_w * input_ch]; + out_buff[0] += in_val * kernel[ker_idx + 0 + mult_tile]; + out_buff[1] += in_val * kernel[ker_idx + 1 + mult_tile]; + out_buff[2] += in_val * kernel[ker_idx + 2 + mult_tile]; + out_buff[3] += in_val * kernel[ker_idx + 3 + mult_tile]; + } + } + + out_buff32[0] = + arm_nn_requantize_s64(out_buff[0], out_buff32[0], output_shift[out_ch + 0 + mult_tile]); + out_buff32[1] = + arm_nn_requantize_s64(out_buff[1], out_buff32[1], output_shift[out_ch + 1 + mult_tile]); + out_buff32[2] = + arm_nn_requantize_s64(out_buff[2], out_buff32[2], output_shift[out_ch + 2 + mult_tile]); + out_buff32[3] = + arm_nn_requantize_s64(out_buff[3], out_buff32[3], output_shift[out_ch + 3 + mult_tile]); + + out_buff32[0] = MIN(MAX(out_buff32[0], output_activation_min), output_activation_max); + out_buff32[1] = MIN(MAX(out_buff32[1], output_activation_min), output_activation_max); + out_buff32[2] = MIN(MAX(out_buff32[2], output_activation_min), output_activation_max); + out_buff32[3] = MIN(MAX(out_buff32[3], output_activation_min), output_activation_max); + + output[out_idx++] = (int16_t)out_buff32[0]; + output[out_idx++] = (int16_t)out_buff32[1]; + output[out_idx++] = (int16_t)out_buff32[2]; + output[out_idx++] = (int16_t)out_buff32[3]; + } + } + } + } +} + +static void depthwise_conv_s16_generic_s16(const int16_t *input, + const uint16_t input_batches, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const int8_t *kernel, + const uint16_t ch_mult, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int64_t *bias, + int16_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_activation_min, + const int32_t output_activation_max, + const uint16_t dilation_x, + const uint16_t dilation_y) + +{ + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) + { + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + + const int32_t reduced_multiplier = REDUCE_MULTIPLIER(output_mult[idx_out_ch]); + int64_t acc_0 = 0; + + int ker_y_start; + int ker_x_start; + int ker_y_end; + int ker_x_end; + + if (dilation_x > 1) + { + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + ker_x_start = MAX(0, start_x_max); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + ker_x_end = MIN(kernel_x, end_min_x); + } + else + { + ker_x_start = MAX(0, -base_idx_x); + ker_x_end = MIN(kernel_x, input_x - base_idx_x); + } + + if (dilation_y > 1) + { + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + ker_y_start = MAX(0, start_y_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + ker_y_end = MIN(kernel_y, end_min_y); + } + else + { + ker_y_start = MAX(0, -base_idx_y); + ker_y_end = MIN(kernel_y, input_y - base_idx_y); + } + + if (bias) + { + acc_0 = bias[idx_out_ch]; + } + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + dilation_y * i_ker_y; + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + const int32_t idx_x = base_idx_x + dilation_x * i_ker_x; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + int32_t ker_idx_0 = (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; + + acc_0 += input[idx_0] * kernel[ker_idx_0]; + } + } + + /* Requantize and clamp output to provided range */ + int32_t result = arm_nn_requantize_s64(acc_0, reduced_multiplier, output_shift[idx_out_ch]); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (int16_t)result; + } + } + } + } + /* Advance to the next batch */ + input += (input_x * input_y * input_ch); + } +} + +/* + * Basic s16 depthwise convolution function. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int64_t *bias, + const cmsis_nn_dims *output_dims, + int16_t *output) +{ + const uint16_t dilation_x = dw_conv_params->dilation.w; + const uint16_t dilation_y = dw_conv_params->dilation.h; + + (void)bias_dims; + (void)ctx; + + depthwise_conv_s16_generic_s16(input, + input_dims->n, + input_dims->w, + input_dims->h, + input_dims->c, + kernel, + dw_conv_params->ch_mult, + filter_dims->w, + filter_dims->h, + dw_conv_params->padding.w, + dw_conv_params->padding.h, + dw_conv_params->stride.w, + dw_conv_params->stride.h, + bias, + output, + quant_params->shift, + quant_params->multiplier, + output_dims->w, + output_dims->h, + dw_conv_params->activation.min, + dw_conv_params->activation.max, + dilation_x, + dilation_y); + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s4.c new file mode 100644 index 0000000..13f2338 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s4.c @@ -0,0 +1,541 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_s4.c + * Description: s4 version of depthwise convolution. + * + * $Date: 13 February 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +static void depthwise_conv_s4_generic(const int8_t *input, + const int32_t input_batches, + const int32_t input_x, + const int32_t input_y, + const int32_t input_ch, + const int8_t *kernel, + const int32_t output_ch, + const int32_t ch_mult, + const int32_t kernel_x, + const int32_t kernel_y, + const int32_t pad_x, + const int32_t pad_y, + const int32_t stride_x, + const int32_t stride_y, + const int32_t *bias, + int8_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t output_x, + const int32_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const int32_t dilation_x, + const int32_t dilation_y) + +{ + (void)output_ch; + int i_out = 0; + int i_batch; + + const int32_t kernel_index_offset = input_ch >> 1; + if (!(input_ch % 2)) + { + for (i_batch = 0; i_batch < input_batches; i_batch++) + { + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + int idx_out_ch_s4 = 0; + int get_low_nibble = 1; + + // If ch_mult is 1 we can process 2 outputs at a time by doing 2 input_ch iterations + if (ch_mult == 1) + { + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch += 2, idx_out_ch_s4++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + + int ker_y_start; + int ker_x_start; + int ker_y_end; + int ker_x_end; + + if (dilation_x > 1) + { + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + ker_x_start = MAX(0, start_x_max); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + ker_x_end = MIN(kernel_x, end_min_x); + } + else + { + ker_x_start = MAX(0, -base_idx_x); + ker_x_end = MIN(kernel_x, input_x - base_idx_x); + } + + if (dilation_y > 1) + { + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + ker_y_start = MAX(0, start_y_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + ker_y_end = MIN(kernel_y, end_min_y); + } + else + { + ker_y_start = MAX(0, -base_idx_y); + ker_y_end = MIN(kernel_y, input_y - base_idx_y); + } + + if (bias) + { + acc_0 = bias[i_input_ch]; + acc_1 = bias[i_input_ch + 1]; + } + + int32_t idx_y = base_idx_y + dilation_y * ker_y_start; + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + int32_t idx_x = base_idx_x + dilation_x * ker_x_start; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + + int32_t ker_idx_0 = + (i_ker_y * kernel_x + ker_x_start) * kernel_index_offset + idx_out_ch_s4; + + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + int8_t ker_val0, ker_val1; + + ker_val0 = ((int8_t)(kernel[ker_idx_0] << 4) >> 4); + ker_val1 = (kernel[ker_idx_0] >> 4); + + acc_0 += (input[idx_0] + input_offset) * ker_val0; + acc_1 += (input[idx_0 + 1] + input_offset) * ker_val1; + + idx_0 += dilation_x * input_ch; + idx_x += dilation_x; + ker_idx_0 += kernel_index_offset; + } + idx_y += dilation_y; + } + + /* Requantize and clamp output to provided range */ + acc_0 = arm_nn_requantize(acc_0, output_mult[i_input_ch], output_shift[i_input_ch]); + acc_0 += output_offset; + acc_0 = MAX(acc_0, output_activation_min); + acc_0 = MIN(acc_0, output_activation_max); + output[i_out++] = acc_0; + + acc_1 = arm_nn_requantize(acc_1, output_mult[i_input_ch + 1], output_shift[i_input_ch + 1]); + acc_1 += output_offset; + acc_1 = MAX(acc_1, output_activation_min); + acc_1 = MIN(acc_1, output_activation_max); + output[i_out++] = acc_1; + } + } + // if ch_mult is odd and greater than 1, we need to continue to process 1 output at a time + else if (ch_mult % 2) + { + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) + { + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + if (idx_out_ch && (idx_out_ch % 2 == 0)) + { + idx_out_ch_s4++; + } + + int32_t acc_0 = 0; + + int ker_y_start; + int ker_x_start; + int ker_y_end; + int ker_x_end; + + if (dilation_x > 1) + { + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + ker_x_start = MAX(0, start_x_max); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + ker_x_end = MIN(kernel_x, end_min_x); + } + else + { + ker_x_start = MAX(0, -base_idx_x); + ker_x_end = MIN(kernel_x, input_x - base_idx_x); + } + + if (dilation_y > 1) + { + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + ker_y_start = MAX(0, start_y_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + ker_y_end = MIN(kernel_y, end_min_y); + } + else + { + ker_y_start = MAX(0, -base_idx_y); + ker_y_end = MIN(kernel_y, input_y - base_idx_y); + } + + if (bias) + { + acc_0 = bias[idx_out_ch]; + } + + int32_t idx_y = base_idx_y + dilation_y * ker_y_start; + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + int32_t idx_x = base_idx_x + dilation_x * ker_x_start; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + + int32_t ker_idx_0 = + (i_ker_y * kernel_x + ker_x_start) * (kernel_index_offset * ch_mult) + + idx_out_ch_s4; + + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + int8_t ker_val0; + + if (get_low_nibble) + { + ker_val0 = ((int8_t)(kernel[ker_idx_0] << 4) >> 4); + } + else + { + ker_val0 = (kernel[ker_idx_0] >> 4); + } + + acc_0 += (input[idx_0] + input_offset) * ker_val0; + + idx_0 += dilation_x * input_ch; + idx_x += dilation_x; + ker_idx_0 += (kernel_index_offset * ch_mult); + } + idx_y += dilation_y; + } + get_low_nibble = !get_low_nibble; + + /* Requantize and clamp output to provided range */ + acc_0 = arm_nn_requantize(acc_0, output_mult[idx_out_ch], output_shift[idx_out_ch]); + acc_0 += output_offset; + acc_0 = MAX(acc_0, output_activation_min); + acc_0 = MIN(acc_0, output_activation_max); + output[i_out++] = acc_0; + } + } + } + // if ch_mult is even then we can do 2 outputs at a time by processing 2 ch_mult iterations + else + { + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + // ch_mult is limited to being a multiple of input_ch. + // This means that we can assume ch_mult is a multiple of 2 given that input_ch is even + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult += 2, idx_out_ch_s4++) + { + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + + int32_t acc_0 = 0; + int32_t acc_1 = 0; + + int ker_y_start; + int ker_x_start; + int ker_y_end; + int ker_x_end; + + if (dilation_x > 1) + { + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + ker_x_start = MAX(0, start_x_max); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + ker_x_end = MIN(kernel_x, end_min_x); + } + else + { + ker_x_start = MAX(0, -base_idx_x); + ker_x_end = MIN(kernel_x, input_x - base_idx_x); + } + + if (dilation_y > 1) + { + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + ker_y_start = MAX(0, start_y_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + ker_y_end = MIN(kernel_y, end_min_y); + } + else + { + ker_y_start = MAX(0, -base_idx_y); + ker_y_end = MIN(kernel_y, input_y - base_idx_y); + } + + if (bias) + { + acc_0 = bias[idx_out_ch]; + acc_1 = bias[idx_out_ch + 1]; + } + + int32_t idx_y = base_idx_y + dilation_y * ker_y_start; + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + int32_t idx_x = base_idx_x + dilation_x * ker_x_start; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + + int32_t ker_idx_0 = + (i_ker_y * kernel_x + ker_x_start) * (kernel_index_offset * ch_mult) + + idx_out_ch_s4; + + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + int8_t ker_val0, ker_val1; + + ker_val0 = ((int8_t)(kernel[ker_idx_0] << 4) >> 4); + ker_val1 = (kernel[ker_idx_0] >> 4); + + acc_0 += (input[idx_0] + input_offset) * ker_val0; + acc_1 += (input[idx_0] + input_offset) * ker_val1; + + idx_0 += dilation_x * input_ch; + idx_x += dilation_x; + ker_idx_0 += (kernel_index_offset * ch_mult); + } + idx_y += dilation_y; + } + + /* Requantize and clamp output to provided range */ + acc_0 = arm_nn_requantize(acc_0, output_mult[idx_out_ch], output_shift[idx_out_ch]); + acc_0 += output_offset; + acc_0 = MAX(acc_0, output_activation_min); + acc_0 = MIN(acc_0, output_activation_max); + output[i_out++] = acc_0; + + acc_1 = + arm_nn_requantize(acc_1, output_mult[idx_out_ch + 1], output_shift[idx_out_ch + 1]); + acc_1 += output_offset; + acc_1 = MAX(acc_1, output_activation_min); + acc_1 = MIN(acc_1, output_activation_max); + output[i_out++] = acc_1; + } + } + } + } + } + /* Advance to the next batch */ + input += (input_x * input_y * input_ch); + } + } + else + { + for (i_batch = 0; i_batch < input_batches; i_batch++) + { + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + int idx_out_ch_s4 = 0; + int get_low_nibble = 1; + + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) + { + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + if (idx_out_ch && (idx_out_ch % 2 == 0)) + { + idx_out_ch_s4++; + } + + int16_t kernel_index_offset_uneven = 0; + int32_t acc_0 = 0; + + int ker_y_start; + int ker_x_start; + int ker_y_end; + int ker_x_end; + + if (dilation_x > 1) + { + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + ker_x_start = MAX(0, start_x_max); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + ker_x_end = MIN(kernel_x, end_min_x); + } + else + { + ker_x_start = MAX(0, -base_idx_x); + ker_x_end = MIN(kernel_x, input_x - base_idx_x); + } + + if (dilation_y > 1) + { + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + ker_y_start = MAX(0, start_y_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + ker_y_end = MIN(kernel_y, end_min_y); + } + else + { + ker_y_start = MAX(0, -base_idx_y); + ker_y_end = MIN(kernel_y, input_y - base_idx_y); + } + + if (bias) + { + acc_0 = bias[idx_out_ch]; + } + int32_t idx_y = base_idx_y + dilation_y * ker_y_start; + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + int32_t idx_x = base_idx_x + dilation_x * ker_x_start; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + + int32_t ker_idx_0 = + (i_ker_y * kernel_x + ker_x_start) * (kernel_index_offset * ch_mult) + + idx_out_ch_s4 + kernel_index_offset_uneven; + + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + int8_t ker_val; + + if (get_low_nibble) + { + get_low_nibble = 0; + ker_val = ((int8_t)(kernel[ker_idx_0] << 4) >> 4); + } + else + { + ker_val = (kernel[ker_idx_0] >> 4); + get_low_nibble = 1; + kernel_index_offset_uneven++; + } + + acc_0 += (input[idx_0] + input_offset) * ker_val; + idx_0 += dilation_x * input_ch; + idx_x += dilation_x; + ker_idx_0 += (kernel_index_offset * ch_mult) + get_low_nibble; + } + idx_y += dilation_y; + } + if ((kernel_x * kernel_y) % 2) + { + get_low_nibble = !get_low_nibble; + } + get_low_nibble = !get_low_nibble; + + /* Requantize and clamp output to provided range */ + acc_0 = arm_nn_requantize(acc_0, output_mult[idx_out_ch], output_shift[idx_out_ch]); + acc_0 += output_offset; + acc_0 = MAX(acc_0, output_activation_min); + acc_0 = MIN(acc_0, output_activation_max); + + output[i_out++] = acc_0; + } + } + } + } + + /* Advance to the next batch */ + input += (input_x * input_y * input_ch); + } + } +} + +/* + * Basic s4 depthwise convolution function. + * + * Refer header file for details. + * Optimization using DSP extension is not available for the generic case where channel multiplier is > 1. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_s4(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + (void)bias_dims; + (void)ctx; + + const int32_t dilation_x = dw_conv_params->dilation.w; + const int32_t dilation_y = dw_conv_params->dilation.h; + depthwise_conv_s4_generic(input, + input_dims->n, + input_dims->w, + input_dims->h, + input_dims->c, + kernel, + output_dims->c, + dw_conv_params->ch_mult, + filter_dims->w, + filter_dims->h, + dw_conv_params->padding.w, + dw_conv_params->padding.h, + dw_conv_params->stride.w, + dw_conv_params->stride.h, + bias, + output, + quant_params->shift, + quant_params->multiplier, + output_dims->w, + output_dims->h, + dw_conv_params->output_offset, + dw_conv_params->input_offset, + dw_conv_params->activation.min, + dw_conv_params->activation.max, + dilation_x, + dilation_y); + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s4_opt.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s4_opt.c new file mode 100644 index 0000000..0ad231a --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s4_opt.c @@ -0,0 +1,689 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_s4_opt.c + * Description: Optimized s4 depthwise separable convolution function for + * channel multiplier of 1. + * + * $Date: 17 April 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Optimized s4 depthwise convolution function with constraint that in_channel equals out_channel + * + * Refer prototype header file for details. + * + */ + +arm_cmsis_nn_status arm_depthwise_conv_s4_opt(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + (void)bias_dims; + + const int32_t input_ch = input_dims->c; + const int32_t output_ch = output_dims->c; + + /* Check depth multiplier is 1 */ + if (input_ch != output_ch) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t kernel_y = filter_dims->h; + const int32_t pad_x = dw_conv_params->padding.w; + const int32_t pad_y = dw_conv_params->padding.h; + const int32_t stride_x = dw_conv_params->stride.w; + const int32_t stride_y = dw_conv_params->stride.h; + const int32_t *output_shift = quant_params->shift; + const int32_t *output_mult = quant_params->multiplier; + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_offset = dw_conv_params->output_offset; + const int32_t input_offset = dw_conv_params->input_offset; + const int32_t output_activation_min = dw_conv_params->activation.min; + const int32_t output_activation_max = dw_conv_params->activation.max; + int16_t *buffer_a = (int16_t *)ctx->buf; + +#ifdef ARM_MATH_MVEI + /* Generate two columns from the input tensor */ + int8_t *lhs_buffer = (int8_t *)buffer_a; + int8_t *out = output; + int buffer_count = 0; + const int32_t kernel_size = kernel_x * kernel_y; + + const int32_t ch_loop = (input_ch + (S4_CH_IN_BLOCK_MVE - 1)) / S4_CH_IN_BLOCK_MVE; + int32_t remaining_ch = output_ch; + int32_t active_ch = MIN(S4_CH_IN_BLOCK_MVE, remaining_ch); + remaining_ch -= S4_CH_IN_BLOCK_MVE; + + for (int i_ch = 0; i_ch < ch_loop; i_ch++) + { + out = output + i_ch * S4_CH_IN_BLOCK_MVE; + const int8_t *input_slice = input + (i_ch * S4_CH_IN_BLOCK_MVE); + + for (int i_out_y = 0, base_idx_y = -pad_y; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) + { + for (int i_out_x = 0, base_idx_x = -pad_x; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) + { + for (int i_ker_y = base_idx_y; i_ker_y < base_idx_y + kernel_y; i_ker_y++) + { + for (int i_ker_x = base_idx_x; i_ker_x < base_idx_x + kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + arm_memset_s8(lhs_buffer, (int8_t)-input_offset, (uint32_t)active_ch); + } + else + { + arm_memcpy_s8(lhs_buffer, + input_slice + (i_ker_y * input_x + i_ker_x) * input_ch, + (uint32_t)active_ch); + } + lhs_buffer += S4_CH_IN_BLOCK_MVE; + } + } + buffer_count++; + + if (buffer_count == 4) + { + const int32_t block_offset = i_ch * S4_CH_IN_BLOCK_MVE; + lhs_buffer = (int8_t *)buffer_a; + arm_nn_depthwise_conv_nt_t_s4(lhs_buffer, + kernel + (block_offset >> 1), + input_offset, + active_ch, + input_ch, + output_shift + block_offset, + output_mult + block_offset, + output_offset, + output_activation_min, + output_activation_max, + kernel_size, + bias + block_offset, + out); + + out += (4 * input_ch); + buffer_count = 0; + } + } + } + /* Handle left over buffers */ + lhs_buffer = (int8_t *)buffer_a; + + int8_t *out_base = out; + const uint32x4_t gather_offset = {0, 0, 1, 1}; + const mve_pred16_t lower_nibble_mask = 3855; // 0000111100001111 + for (int i_buf = 0; i_buf < buffer_count; i_buf++) + { + int32_t loop_count = (active_ch + 3) / 4; + int32_t num_ch_to_process = active_ch; + out = out_base + (i_buf * input_ch); + for (int i_loop_cnt = 0, offset = i_ch * S4_CH_IN_BLOCK_MVE; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, i_loop_cnt++) + { + const int8_t *col_0 = lhs_buffer + (kernel_size * S4_CH_IN_BLOCK_MVE * i_buf) + (i_loop_cnt * 4); + const int8_t *row_0 = kernel + (offset >> 1); + int32x4_t out_0 = vdupq_n_s32(0); + if (bias) + { + out_0 = vldrwq_s32(&bias[offset]); + } + + if (input_ch % 2) + { + int get_low_nibble = 1; + for (int i_ker = 0; i_ker < kernel_size; i_ker++) + { + int32x4_t ker_0; + if (get_low_nibble) + { + ker_0 = vldrbq_gather_offset_s32(row_0, gather_offset); + ker_0 = vrshlq_m_n_s32(ker_0, 28, lower_nibble_mask); + ker_0 = vshrq_m_n_s32(ker_0, ker_0, 24, lower_nibble_mask); + + ker_0 = vshrq_n_s32(ker_0, 4); + } + else + { + int8_t temp[] = {row_0[0] >> 4, + (int8_t)(row_0[1] << 4) >> 4, + row_0[1] >> 4, + (int8_t)(row_0[2] << 4) >> 4}; + ker_0 = vldrbq_s32(temp); + } + + int32x4_t ip_0 = vldrbq_s32(col_0); + ip_0 = vaddq_n_s32(ip_0, input_offset); + out_0 += vmulq_s32(ip_0, ker_0); + + get_low_nibble = !get_low_nibble; + col_0 += S4_CH_IN_BLOCK_MVE; + row_0 += (input_ch >> 1) + get_low_nibble; + } + } + else + { + for (int i_ker = 0; i_ker < kernel_size; i_ker++) + { + int32x4_t ker_0 = vldrbq_gather_offset_s32(row_0, gather_offset); + ker_0 = vrshlq_m_n_s32(ker_0, 28, lower_nibble_mask); + ker_0 = vshrq_m_n_s32(ker_0, ker_0, 24, lower_nibble_mask); + + ker_0 = vshrq_n_s32(ker_0, 4); + + int32x4_t ip_0 = vldrbq_s32(col_0); + ip_0 = vaddq_n_s32(ip_0, input_offset); + out_0 += vmulq_s32(ip_0, ker_0); + + col_0 += S4_CH_IN_BLOCK_MVE; + row_0 += input_ch >> 1; + } + } + + const int32x4_t mult = vldrwq_s32(&output_mult[offset]); + const int32x4_t shift = vldrwq_s32(&output_shift[offset]); + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, output_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(output_activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(output_activation_max)); + mve_pred16_t p = vctp32q((uint32_t)num_ch_to_process); + vstrbq_p_s32(out, out_0, p); + + out += 4; + } + } + buffer_count = 0; + + active_ch = MIN(S4_CH_IN_BLOCK_MVE, remaining_ch); + remaining_ch -= S4_CH_IN_BLOCK_MVE; + } +#else + int16_t *const col_buffer_start = buffer_a; + int16_t *col_buffer = col_buffer_start; + const int32_t *const bias_start_pos = bias; + const int32_t *const out_mult_start_pos = output_mult; + const int32_t *const out_shift_start_pos = output_shift; + const uint16_t num_cols = kernel_x * kernel_y; + uint16_t row_count; + uint16_t row_shift = 0; + uint16_t col_shift = 0; + + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + + /* Out of bounds is only considered for the y axis as it provides a contiguous zero'ing opportunity than + along the x axis */ + const int ker_y_start = MAX(0, -base_idx_y); + /* Condition for kernel end dimension: (base_idx_y + ker_y_end) < input_y */ + const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); + + int32_t index = 0; + if (ker_y_start != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * ker_y_start * sizeof(int16_t)); + index += (kernel_x * input_ch) * ker_y_start; + } + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + i_ker_y; + + for (int i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t idx_x = base_idx_x + i_ker_x; + if (idx_x < 0 || idx_x >= input_x) + { + memset(&col_buffer[index], 0, input_ch * sizeof(int16_t)); + } + else + { + arm_q7_to_q15_with_offset((int8_t *)input + (idx_y * input_x + idx_x) * input_ch, + &col_buffer[index], + input_ch, + (int16_t)input_offset); + } + index += input_ch; + } + } + + const int diff = kernel_y - ker_y_end; + if (diff != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * diff * sizeof(int16_t)); + } + + row_count = output_ch / 4; + row_shift = 0; + col_shift = 0; + bias = bias_start_pos; + output_mult = out_mult_start_pos; + output_shift = out_shift_start_pos; + + if (output_ch % 2) /* Uneven number of channels */ + { + int get_low_nibble = 1; + + while (row_count) + { + int32_t sum = 0; + int32_t sum_2 = 0; + int32_t sum_3 = 0; + int32_t sum_4 = 0; + if (bias) + { + sum = *bias++; + sum_2 = *bias++; + sum_3 = *bias++; + sum_4 = *bias++; + } + + uint16_t col_count = num_cols / 2; + int16_t *col_pos = col_buffer_start + col_shift; + const int8_t *row_pos = kernel + row_shift; + + row_shift += 2; + col_shift += 4; + + while (col_count) + { + #ifdef ARM_MATH_DSP + /* General idea is to read 4 + 4 (input, kernel) pair and re-arrange them in the right order to + use in a SMLAD instruction . One run of this loop produces 4 partial outputs with 8 MACs. */ + /* Note: variable names can be improved here to align with rows and columns. */ + int32_t ip_a1, ip_a2, ip_b1, ip_b2, op_a, op_b, op_c; + + /* Read 4 weights */ + read_and_pad_s4(row_pos, &ip_a2, &ip_b1); + read_and_pad_s4_uneven(row_pos + (input_ch >> 1), &ip_a1, &ip_b2); + + op_a = arm_nn_read_s16x2(col_pos); + op_b = arm_nn_read_s16x2(col_pos + input_ch); + + op_c = PKHBT(op_b, op_a, 16); + op_a = PKHTB(op_b, op_a, 16); + op_b = PKHBT(ip_b2, ip_a2, 16); + sum = SMLAD(op_c, op_b, sum); + + op_b = PKHBT(ip_b1, ip_a1, 16); + + sum_2 = SMLAD(op_a, op_b, sum_2); + + op_a = arm_nn_read_s16x2(col_pos + 2); + op_b = arm_nn_read_s16x2(col_pos + input_ch + 2); + + op_c = PKHBT(op_b, op_a, 16); + op_a = PKHTB(op_b, op_a, 16); + op_b = PKHTB(ip_a2, ip_b2, 16); + sum_3 = SMLAD(op_c, op_b, sum_3); + + op_b = PKHTB(ip_a1, ip_b1, 16); + sum_4 = SMLAD(op_a, op_b, sum_4); + + #else + int8_t ker0, ker1, ker2, ker3, ker00, ker11; + + ker00 = row_pos[0]; + ker11 = row_pos[1]; + ker0 = (int8_t)(ker00 << 4) >> 4; + ker1 = ker00 >> 4; + ker2 = (int8_t)(ker11 << 4) >> 4; + ker3 = ker11 >> 4; + + sum += ker0 * col_pos[0]; + sum_2 += ker1 * col_pos[1]; + sum_3 += ker2 * col_pos[2]; + sum_4 += ker3 * col_pos[3]; + + ker11 = row_pos[1 + (input_ch >> 1)]; + ker0 = row_pos[0 + (input_ch >> 1)] >> 4; + ker1 = (int8_t)(ker11 << 4) >> 4; + ker2 = ker11 >> 4; + ker3 = (int8_t)(row_pos[2 + (input_ch >> 1)] << 4) >> 4; + + sum += ker0 * col_pos[0 + input_ch]; + sum_2 += ker1 * col_pos[1 + input_ch]; + sum_3 += ker2 * col_pos[2 + input_ch]; + sum_4 += ker3 * col_pos[3 + input_ch]; + + #endif + row_pos += (input_ch); + col_pos += input_ch << 1; + + col_count--; + } + + col_count = num_cols & 0x1; + + while (col_count) + { + int8_t ker0, ker1, ker2, ker3, ker00, ker11; + + ker00 = row_pos[0]; + ker11 = row_pos[1]; + + ker0 = (int8_t)(ker00 << 4) >> 4; + ker1 = ker00 >> 4; + + ker2 = (int8_t)(ker11 << 4) >> 4; + ker3 = ker11 >> 4; + + sum += ker0 * col_pos[0]; + sum_2 += ker1 * col_pos[1]; + sum_3 += ker2 * col_pos[2]; + sum_4 += ker3 * col_pos[3]; + + row_pos += input_ch >> 1; + col_pos += input_ch; + + col_count--; + } + + sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); + sum += output_offset; + sum = MAX(sum, output_activation_min); + sum = MIN(sum, output_activation_max); + *output++ = (int8_t)sum; + + sum_2 = arm_nn_requantize(sum_2, *output_mult++, *output_shift++); + sum_2 += output_offset; + sum_2 = MAX(sum_2, output_activation_min); + sum_2 = MIN(sum_2, output_activation_max); + *output++ = (int8_t)sum_2; + sum_3 = arm_nn_requantize(sum_3, *output_mult++, *output_shift++); + sum_3 += output_offset; + sum_3 = MAX(sum_3, output_activation_min); + sum_3 = MIN(sum_3, output_activation_max); + *output++ = (int8_t)sum_3; + + sum_4 = arm_nn_requantize(sum_4, *output_mult++, *output_shift++); + sum_4 += output_offset; + sum_4 = MAX(sum_4, output_activation_min); + sum_4 = MIN(sum_4, output_activation_max); + *output++ = (int8_t)sum_4; + + row_count--; + } + + row_count = output_ch & 0x3; + + while (row_count) + { + const int16_t *col_pos = col_buffer_start + col_shift; + const int8_t *row_pos = kernel + row_shift; + int32_t sum = 0; + int col_index = 0; + + if (bias) + { + sum = *bias++; + } + + col_shift += 1; + + for (int i = 0; i < num_cols; i++) + { + int8_t rhs = row_pos[i * (input_ch >> 1) + col_index]; + int8_t rhs0; + int16_t lhs0 = col_pos[i * input_ch]; + + if (get_low_nibble) + { + rhs0 = (int8_t)(rhs << 4) >> 4; + get_low_nibble = 0; + } + else + { + rhs0 = rhs >> 4; + get_low_nibble = 1; + col_index++; + } + + sum += rhs0 * lhs0; + } + + if (num_cols % 2 == 0) + { + get_low_nibble = !get_low_nibble; + } + + sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); + sum += output_offset; + sum = MAX(sum, output_activation_min); + sum = MIN(sum, output_activation_max); + *output++ = (int8_t)sum; + + row_count--; + + /* Last row */ + if (row_count == 1) + { + row_shift += 1; + } + } + } + else /* Even number of channels */ + { + while (row_count) + { + int32_t sum = 0; + int32_t sum_2 = 0; + int32_t sum_3 = 0; + int32_t sum_4 = 0; + if (bias) + { + sum = *bias++; + sum_2 = *bias++; + sum_3 = *bias++; + sum_4 = *bias++; + } + + uint16_t col_count = num_cols / 2; + int16_t *col_pos = col_buffer_start + col_shift; + const int8_t *row_pos = kernel + row_shift; + + row_shift += 2; + col_shift += 4; + + #ifdef ARM_MATH_DSP + while (col_count) + { + /* General idea is to read 4 + 4 (input, kernel) pair and re-arrange them in the right order to + use in a SMLAD instruction . One run of this loop produces 4 partial outputs with 8 MACs. */ + /* Note: variable names can be improved here to align with rows and columns. */ + int32_t ip_a1, ip_a2, ip_b1, ip_b2, op_a, op_b, op_c; + + /* Read 4 weights */ + read_and_pad_s4(row_pos, &ip_a2, &ip_b1); + read_and_pad_s4(row_pos + (input_ch >> 1), &ip_b2, &ip_a1); + + op_a = arm_nn_read_s16x2(col_pos); + op_b = arm_nn_read_s16x2(col_pos + input_ch); + + op_c = PKHBT(op_b, op_a, 16); + op_a = PKHTB(op_b, op_a, 16); + op_b = PKHBT(ip_b2, ip_a2, 16); + sum = SMLAD(op_c, op_b, sum); + + op_b = PKHBT(ip_b1, ip_a1, 16); + + sum_2 = SMLAD(op_a, op_b, sum_2); + + op_a = arm_nn_read_s16x2(col_pos + 2); + op_b = arm_nn_read_s16x2(col_pos + input_ch + 2); + + op_c = PKHBT(op_b, op_a, 16); + op_a = PKHTB(op_b, op_a, 16); + op_b = PKHTB(ip_a2, ip_b2, 16); + sum_3 = SMLAD(op_c, op_b, sum_3); + + op_b = PKHTB(ip_a1, ip_b1, 16); + sum_4 = SMLAD(op_a, op_b, sum_4); + + row_pos += (input_ch); + col_pos += input_ch << 1; + + col_count--; + } + + col_count = num_cols & 0x1; + #else + col_count = num_cols; + #endif + while (col_count) + { + int8_t ker0, ker1, ker2, ker3, ker00, ker11; + + ker00 = row_pos[0]; + ker11 = row_pos[1]; + + ker0 = (int8_t)(ker00 << 4) >> 4; + ker1 = ker00 >> 4; + + ker2 = (int8_t)(ker11 << 4) >> 4; + ker3 = ker11 >> 4; + + sum += ker0 * col_pos[0]; + sum_2 += ker1 * col_pos[1]; + sum_3 += ker2 * col_pos[2]; + sum_4 += ker3 * col_pos[3]; + + row_pos += input_ch >> 1; + col_pos += input_ch; + + col_count--; + } + + sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); + sum += output_offset; + sum = MAX(sum, output_activation_min); + sum = MIN(sum, output_activation_max); + *output++ = (int8_t)sum; + + sum_2 = arm_nn_requantize(sum_2, *output_mult++, *output_shift++); + sum_2 += output_offset; + sum_2 = MAX(sum_2, output_activation_min); + sum_2 = MIN(sum_2, output_activation_max); + *output++ = (int8_t)sum_2; + sum_3 = arm_nn_requantize(sum_3, *output_mult++, *output_shift++); + sum_3 += output_offset; + sum_3 = MAX(sum_3, output_activation_min); + sum_3 = MIN(sum_3, output_activation_max); + *output++ = (int8_t)sum_3; + + sum_4 = arm_nn_requantize(sum_4, *output_mult++, *output_shift++); + sum_4 += output_offset; + sum_4 = MAX(sum_4, output_activation_min); + sum_4 = MIN(sum_4, output_activation_max); + *output++ = (int8_t)sum_4; + + row_count--; + } + + if (output_ch & 0x2) + { + const int16_t *col_pos = col_buffer_start + col_shift; + const int16_t *col_pos_2 = col_buffer_start + col_shift + 1; + const int8_t *row_pos = kernel + row_shift; + int32_t sum = 0; + int32_t sum2 = 0; + + if (bias) + { + sum = *bias++; + sum2 = *bias++; + } + + for (int i = 0; i < num_cols; i++) + { + int8_t rhs = row_pos[i * (input_ch >> 1)]; + + int8_t rhs_low = (int8_t)(rhs << 4) >> 4; + int8_t rhs_high = rhs >> 4; + + int16_t lhs0 = col_pos[i * input_ch]; + int16_t lhs1 = col_pos_2[i * input_ch]; + + sum += rhs_low * lhs0; + sum2 += rhs_high * lhs1; + } + + sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); + sum += output_offset; + sum = MAX(sum, output_activation_min); + sum = MIN(sum, output_activation_max); + *output++ = (int8_t)sum; + sum2 = arm_nn_requantize(sum2, *output_mult++, *output_shift++); + sum2 += output_offset; + sum2 = MAX(sum2, output_activation_min); + sum2 = MIN(sum2, output_activation_max); + *output++ = (int8_t)sum2; + } + } + + /* Clear counter and pointers */ + col_buffer = col_buffer_start; + } + } +#endif // ARM_MATH_MVEI + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c new file mode 100644 index 0000000..0c67079 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c @@ -0,0 +1,354 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_s8.c + * Description: s8 version of depthwise convolution. + * + * $Date: 26 October 2022 + * $Revision: V.3.0.4 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +#if !defined(__ARMCC_VERSION) +__attribute__((optimize("no-unroll-loops"))) +#endif +static void +depthwise_conv_s8_mult_4(const int8_t *input, + const int32_t input_x, + const int32_t input_y, + const int32_t input_ch, + const int8_t *kernel, + const int32_t output_ch, + const int32_t ch_mult, + const int32_t kernel_x, + const int32_t kernel_y, + const int32_t pad_x, + const int32_t pad_y, + const int32_t stride_x, + const int32_t stride_y, + const int32_t *bias, + int8_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t output_x, + const int32_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max) +{ + const int32_t *bias_base = bias; + const int32_t *mult_base = output_mult; + const int32_t *shift_base = output_shift; + const int8_t *kernel_base = kernel; + + for (int32_t in_h = -pad_y, out_h = 0; out_h < output_y; in_h += stride_y, ++out_h) + { + for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) + { + bias = bias_base; + output_mult = mult_base; + output_shift = shift_base; + for (int32_t in_ch = 0, out_ch = 0, ker_w_start = MAX(0, -in_w); out_ch < output_ch; + ++in_ch, out_ch += ch_mult) + { + for (int mult_tile = 0; mult_tile < ch_mult; mult_tile += 4) + { + int32_t out_buff[4] = {0, 0, 0, 0}; + if (bias) + { + out_buff[0] = *bias++; + out_buff[1] = *bias++; + out_buff[2] = *bias++; + out_buff[3] = *bias++; + } + + for (int32_t ker_h = ker_h_start; ker_h < MIN(kernel_y, input_y - in_h); ++ker_h) + { + int32_t ker_idx = ker_h * (output_ch * kernel_x) + ker_w_start * output_ch + out_ch; + kernel = kernel_base + mult_tile + ker_idx; + int32_t in_idx = (in_h + ker_h) * (input_ch * input_x) + in_w * input_ch + in_ch; +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) +#pragma clang loop unroll(disable) +#endif + for (int32_t ker_w = ker_w_start; ker_w < MIN(kernel_x, input_x - in_w); + ++ker_w, kernel += output_ch) + { + int32_t in_val = input[in_idx + ker_w * input_ch] + input_offset; + out_buff[0] += in_val * kernel[0]; + out_buff[1] += in_val * kernel[1]; + out_buff[2] += in_val * kernel[2]; + out_buff[3] += in_val * kernel[3]; + } + } +#if defined(ARM_MATH_MVEI) + int32x4_t res = vldrwq_s32(out_buff); + res = arm_requantize_mve_32x4(res, vldrwq_s32(output_mult), vldrwq_s32(output_shift)); + output_mult += 4; + output_shift += 4; + res = vaddq_n_s32(res, output_offset); + + res = vmaxq_s32(res, vdupq_n_s32(output_activation_min)); + res = vminq_s32(res, vdupq_n_s32(output_activation_max)); + vstrbq_s32(output, res); + output += 4; +#else + out_buff[0] = arm_nn_requantize(out_buff[0], *output_mult++, *output_shift++); + out_buff[1] = arm_nn_requantize(out_buff[1], *output_mult++, *output_shift++); + out_buff[2] = arm_nn_requantize(out_buff[2], *output_mult++, *output_shift++); + out_buff[3] = arm_nn_requantize(out_buff[3], *output_mult++, *output_shift++); + + out_buff[0] += output_offset; + out_buff[1] += output_offset; + out_buff[2] += output_offset; + out_buff[3] += output_offset; + + out_buff[0] = MIN(MAX(out_buff[0], output_activation_min), output_activation_max); + out_buff[1] = MIN(MAX(out_buff[1], output_activation_min), output_activation_max); + out_buff[2] = MIN(MAX(out_buff[2], output_activation_min), output_activation_max); + out_buff[3] = MIN(MAX(out_buff[3], output_activation_min), output_activation_max); + + *output++ = (int8_t)out_buff[0]; + *output++ = (int8_t)out_buff[1]; + *output++ = (int8_t)out_buff[2]; + *output++ = (int8_t)out_buff[3]; + +#endif + } + } + } + } +} + +static void depthwise_conv_s8_generic(const int8_t *input, + const uint16_t input_batches, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const int8_t *kernel, + const uint16_t output_ch, + const uint16_t ch_mult, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + int8_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const uint16_t dilation_x, + const uint16_t dilation_y) + +{ + (void)output_ch; + int i_out = 0; + int i_batch; + + for (i_batch = 0; i_batch < input_batches; i_batch++) + { + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) + { + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + int32_t acc_0 = 0; + + int ker_y_start; + int ker_x_start; + int ker_y_end; + int ker_x_end; + + if (dilation_x > 1) + { + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + ker_x_start = MAX(0, start_x_max); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + ker_x_end = MIN(kernel_x, end_min_x); + } + else + { + ker_x_start = MAX(0, -base_idx_x); + ker_x_end = MIN(kernel_x, input_x - base_idx_x); + } + + if (dilation_y > 1) + { + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + ker_y_start = MAX(0, start_y_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + ker_y_end = MIN(kernel_y, end_min_y); + } + else + { + ker_y_start = MAX(0, -base_idx_y); + ker_y_end = MIN(kernel_y, input_y - base_idx_y); + } + + if (bias) + { + acc_0 = bias[idx_out_ch]; + } + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + dilation_y * i_ker_y; + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + const int32_t idx_x = base_idx_x + dilation_x * i_ker_x; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + int32_t ker_idx_0 = (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; + + acc_0 += (input[idx_0] + input_offset) * kernel[ker_idx_0]; + } + } + + /* Requantize and clamp output to provided range */ + acc_0 = arm_nn_requantize(acc_0, output_mult[idx_out_ch], output_shift[idx_out_ch]); + acc_0 += output_offset; + acc_0 = MAX(acc_0, output_activation_min); + acc_0 = MIN(acc_0, output_activation_max); + + output[i_out++] = acc_0; + } + } + } + } + /* Advance to the next batch */ + input += (input_x * input_y * input_ch); + } +} + +/* + * Basic s8 depthwise convolution function. + * + * Refer header file for details. + * Optimization using DSP extension is not available for the generic case where channel multiplier is > 1. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + const uint16_t dilation_x = dw_conv_params->dilation.w; + const uint16_t dilation_y = dw_conv_params->dilation.h; + + (void)bias_dims; + (void)ctx; + + if (dw_conv_params->ch_mult % 4 == 0 && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) + { + depthwise_conv_s8_mult_4(input, + input_dims->w, + input_dims->h, + input_dims->c, + kernel, + output_dims->c, + dw_conv_params->ch_mult, + filter_dims->w, + filter_dims->h, + dw_conv_params->padding.w, + dw_conv_params->padding.h, + dw_conv_params->stride.w, + dw_conv_params->stride.h, + bias, + output, + quant_params->shift, + quant_params->multiplier, + output_dims->w, + output_dims->h, + dw_conv_params->output_offset, + dw_conv_params->input_offset, + dw_conv_params->activation.min, + dw_conv_params->activation.max); + } + else + { + depthwise_conv_s8_generic(input, + input_dims->n, + input_dims->w, + input_dims->h, + input_dims->c, + kernel, + output_dims->c, + dw_conv_params->ch_mult, + filter_dims->w, + filter_dims->h, + dw_conv_params->padding.w, + dw_conv_params->padding.h, + dw_conv_params->stride.w, + dw_conv_params->stride.h, + bias, + output, + quant_params->shift, + quant_params->multiplier, + output_dims->w, + output_dims->h, + dw_conv_params->output_offset, + dw_conv_params->input_offset, + dw_conv_params->activation.min, + dw_conv_params->activation.max, + dilation_x, + dilation_y); + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c new file mode 100644 index 0000000..4e6fe7a --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c @@ -0,0 +1,426 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_s8_opt.c + * Description: Optimized s8 depthwise separable convolution function for + * channel multiplier of 1. + * + * $Date: 22 March 2023 + * $Revision: V.3.5.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel + * + * Refer prototype header file for details. + * + */ + +arm_cmsis_nn_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + const int32_t input_ch = input_dims->c; + const int32_t output_ch = output_dims->c; + + /* Check depth multiplier is 1 */ + if (input_ch != output_ch) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (ctx->buf == NULL && arm_depthwise_conv_s8_opt_get_buffer_size(input_dims, filter_dims) > 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } +#ifdef ARM_MATH_DSP + (void)bias_dims; + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t kernel_y = filter_dims->h; + const int32_t pad_x = dw_conv_params->padding.w; + const int32_t pad_y = dw_conv_params->padding.h; + const int32_t stride_x = dw_conv_params->stride.w; + const int32_t stride_y = dw_conv_params->stride.h; + const int32_t *output_shift = quant_params->shift; + const int32_t *output_mult = quant_params->multiplier; + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_offset = dw_conv_params->output_offset; + const int32_t input_offset = dw_conv_params->input_offset; + const int32_t output_activation_min = dw_conv_params->activation.min; + const int32_t output_activation_max = dw_conv_params->activation.max; + int16_t *buffer_a = (int16_t *)ctx->buf; + + #ifdef ARM_MATH_MVEI + /* Generate two columns from the input tensor */ + int8_t *lhs_buffer = (int8_t *)buffer_a; + int8_t *out = output; + int buffer_count = 0; + const int32_t kernel_size = kernel_x * kernel_y; + + const int32_t ch_loop = (input_ch + (CH_IN_BLOCK_MVE - 1)) / CH_IN_BLOCK_MVE; + int32_t remaining_ch = output_ch; + int32_t active_ch = MIN(CH_IN_BLOCK_MVE, remaining_ch); + remaining_ch -= CH_IN_BLOCK_MVE; + + for (int i_ch = 0; i_ch < ch_loop; i_ch++) + { + out = output + i_ch * CH_IN_BLOCK_MVE; + const int8_t *input_slice = input + (i_ch * CH_IN_BLOCK_MVE); + + for (int i_out_y = 0, base_idx_y = -pad_y; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) + { + for (int i_out_x = 0, base_idx_x = -pad_x; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) + { + for (int i_ker_y = base_idx_y; i_ker_y < base_idx_y + kernel_y; i_ker_y++) + { + for (int i_ker_x = base_idx_x; i_ker_x < base_idx_x + kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + arm_memset_s8(lhs_buffer, (int8_t)-input_offset, (uint32_t)active_ch); + } + else + { + arm_memcpy_s8(lhs_buffer, + input_slice + (i_ker_y * input_x + i_ker_x) * input_ch, + (uint32_t)active_ch); + } + lhs_buffer += CH_IN_BLOCK_MVE; + } + } + buffer_count++; + + if (buffer_count == 4) + { + const int32_t block_offset = i_ch * CH_IN_BLOCK_MVE; + lhs_buffer = (int8_t *)buffer_a; + + arm_nn_depthwise_conv_nt_t_s8(lhs_buffer, + kernel + block_offset, + input_offset, + active_ch, + input_ch, + output_shift + block_offset, + output_mult + block_offset, + output_offset, + output_activation_min, + output_activation_max, + kernel_size, + bias + block_offset, + out); + + out += (4 * input_ch); + buffer_count = 0; + } + } + } + /* Handle left over buffers */ + lhs_buffer = (int8_t *)buffer_a; + + int8_t *out_base = out; + for (int i_buf = 0; i_buf < buffer_count; i_buf++) + { + int32_t loop_count = (active_ch + 3) / 4; + int32_t num_ch_to_process = active_ch; + out = out_base + (i_buf * input_ch); + for (int i_loop_cnt = 0, offset = i_ch * CH_IN_BLOCK_MVE; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, i_loop_cnt++) + { + const int8_t *col_0 = lhs_buffer + (kernel_size * CH_IN_BLOCK_MVE * i_buf) + (i_loop_cnt * 4); + const int8_t *row_0 = kernel + offset; + int32x4_t out_0 = vdupq_n_s32(0); + if (bias) + { + out_0 = vldrwq_s32(&bias[offset]); + } + + for (int i_ker = 0; i_ker < kernel_size; i_ker++) + { + const int32x4_t ker_0 = vldrbq_s32(row_0); + int32x4_t ip_0 = vldrbq_s32(col_0); + ip_0 = vaddq_n_s32(ip_0, input_offset); + out_0 += vmulq_s32(ip_0, ker_0); + + col_0 += CH_IN_BLOCK_MVE; + row_0 += input_ch; + } + + const int32x4_t mult = vldrwq_s32(&output_mult[offset]); + const int32x4_t shift = vldrwq_s32(&output_shift[offset]); + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, output_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(output_activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(output_activation_max)); + mve_pred16_t p = vctp32q((uint32_t)num_ch_to_process); + vstrbq_p_s32(out, out_0, p); + + out += 4; + } + } + buffer_count = 0; + + active_ch = MIN(CH_IN_BLOCK_MVE, remaining_ch); + remaining_ch -= CH_IN_BLOCK_MVE; + } + + #else // ARM_MATH_DSP + /* Run the following code in cores using DSP extension */ + int16_t *const col_buffer_start = buffer_a; + int16_t *col_buffer = col_buffer_start; + const int32_t *const bias_start_pos = bias; + const int32_t *const out_mult_start_pos = output_mult; + const int32_t *const out_shift_start_pos = output_shift; + uint16_t row_count; + uint16_t row_shift; + + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + + /* Out of bounds is only considered for the y axis as it provides a contiguous zero'ing opportunity than + along the x axis */ + const int ker_y_start = MAX(0, -base_idx_y); + /* Condition for kernel end dimension: (base_idx_y + ker_y_end) < input_y */ + const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); + + int32_t index = 0; + if (ker_y_start != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * ker_y_start * sizeof(int16_t)); + index += (kernel_x * input_ch) * ker_y_start; + } + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + i_ker_y; + + for (int i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t idx_x = base_idx_x + i_ker_x; + if (idx_x < 0 || idx_x >= input_x) + { + memset(&col_buffer[index], 0, input_ch * sizeof(int16_t)); + } + else + { + arm_q7_to_q15_with_offset((int8_t *)input + (idx_y * input_x + idx_x) * input_ch, + &col_buffer[index], + input_ch, + (int16_t)input_offset); + } + index += input_ch; + } + } + + const int diff = kernel_y - ker_y_end; + if (diff != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * diff * sizeof(int16_t)); + } + + row_count = output_ch / 4; + row_shift = 0; + bias = bias_start_pos; + output_mult = out_mult_start_pos; + output_shift = out_shift_start_pos; + + while (row_count) + { + int32_t sum = 0; + int32_t sum_2 = 0; + int32_t sum_3 = 0; + int32_t sum_4 = 0; + if (bias) + { + sum = *bias++; + sum_2 = *bias++; + sum_3 = *bias++; + sum_4 = *bias++; + } + + uint16_t col_count = (kernel_x * kernel_y) / 2; + int16_t *col_pos = col_buffer_start + row_shift; + const int8_t *row_pos = kernel + row_shift; + row_shift += 4; + + while (col_count) + { + /* General idea is to read 4 + 4 (input, kernel) pair and re-arrange them in the right order to + use in a SMLAD instruction . One run of this loop produces 4 partial outputs with 8 MACs. */ + /* Note: variable names can be improved here to align with rows and columns. */ + int32_t ip_a1, ip_a2, ip_b1, ip_b2, op_a, op_b, op_c; + /* Read 4 weights */ + ip_b1 = arm_nn_read_s8x4(row_pos); + ip_a1 = arm_nn_read_s8x4(row_pos + input_ch); + op_a = arm_nn_read_s16x2(col_pos); + op_b = arm_nn_read_s16x2(col_pos + input_ch); + + ip_a2 = SXTB16(ip_b1); + ip_b1 = SXTB16(ROR(ip_b1, 8)); + + ip_b2 = SXTB16(ip_a1); + ip_a1 = SXTB16(ROR(ip_a1, 8)); + + op_c = PKHBT(op_b, op_a, 16); + op_a = PKHTB(op_b, op_a, 16); + op_b = PKHBT(ip_b2, ip_a2, 16); + sum = SMLAD(op_c, op_b, sum); + + op_b = PKHBT(ip_b1, ip_a1, 16); + sum_2 = SMLAD(op_a, op_b, sum_2); + + op_a = arm_nn_read_s16x2(col_pos + 2); + op_b = arm_nn_read_s16x2(col_pos + input_ch + 2); + + op_c = PKHBT(op_b, op_a, 16); + op_a = PKHTB(op_b, op_a, 16); + op_b = PKHTB(ip_a2, ip_b2, 16); + sum_3 = SMLAD(op_c, op_b, sum_3); + + op_b = PKHTB(ip_a1, ip_b1, 16); + sum_4 = SMLAD(op_a, op_b, sum_4); + + row_pos += input_ch << 1; + col_pos += input_ch << 1; + col_count--; + } + + col_count = (kernel_x * kernel_y) & 0x1; + while (col_count) + { + sum += row_pos[0] * col_pos[0]; + sum_2 += row_pos[1] * col_pos[1]; + sum_3 += row_pos[2] * col_pos[2]; + sum_4 += row_pos[3] * col_pos[3]; + + row_pos += input_ch; + col_pos += input_ch; + + col_count--; + } + sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); + sum += output_offset; + sum = MAX(sum, output_activation_min); + sum = MIN(sum, output_activation_max); + *output++ = (int8_t)sum; + + sum_2 = arm_nn_requantize(sum_2, *output_mult++, *output_shift++); + sum_2 += output_offset; + sum_2 = MAX(sum_2, output_activation_min); + sum_2 = MIN(sum_2, output_activation_max); + *output++ = (int8_t)sum_2; + sum_3 = arm_nn_requantize(sum_3, *output_mult++, *output_shift++); + sum_3 += output_offset; + sum_3 = MAX(sum_3, output_activation_min); + sum_3 = MIN(sum_3, output_activation_max); + *output++ = (int8_t)sum_3; + + sum_4 = arm_nn_requantize(sum_4, *output_mult++, *output_shift++); + sum_4 += output_offset; + sum_4 = MAX(sum_4, output_activation_min); + sum_4 = MIN(sum_4, output_activation_max); + *output++ = (int8_t)sum_4; + + row_count--; + } + + row_count = output_ch & 0x3; + while (row_count) + { + int16_t *col_pos = col_buffer_start + row_shift; + const int8_t *row_pos = kernel + row_shift; + int32_t sum = 0; + if (bias) + { + sum = *bias++; + } + const uint16_t col_count = (kernel_x * kernel_y); + row_shift += 1; + + for (int i = 0; i < col_count; i++) + { + sum += row_pos[i * input_ch] * col_pos[i * input_ch]; + } + sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); + sum += output_offset; + sum = MAX(sum, output_activation_min); + sum = MIN(sum, output_activation_max); + *output++ = (int8_t)sum; + + row_count--; + } + + // clear counter and pointers + col_buffer = col_buffer_start; + } + } + #endif +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + return arm_depthwise_conv_s8(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + kernel, + bias_dims, + bias, + output_dims, + output); +#endif /* ARM_MATH_MVEI | ARM_MATH_DSP */ + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s16.c new file mode 100644 index 0000000..8a2ff21 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s16.c @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_wrapper_s16.c + * Description: Wrapper API to select appropriate depthwise conv API based + * on dimensions. + * + * $Date: 20 January 2023 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * s16 Depthwise conv wrapper function + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *filter, + const cmsis_nn_dims *bias_dims, + const int64_t *bias, + const cmsis_nn_dims *output_dims, + int16_t *output) +{ + arm_cmsis_nn_status status = ARM_CMSIS_NN_SUCCESS; + + if (USE_FAST_DW_CONV_S16_FUNCTION(dw_conv_params, filter_dims, input_dims)) + { + status = arm_depthwise_conv_fast_s16(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + else + { + status = arm_depthwise_conv_s16(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + + /* Return to application */ + return status; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s4.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s4.c new file mode 100644 index 0000000..72305cb --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s4.c @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_wrapper_s4.c + * Description: Wrapper API to select appropriate depthwise conv s4 API based + * on dimensions. + * + * $Date: 30 October 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * s4 Depthwise conv wrapper function + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s4(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *filter, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + arm_cmsis_nn_status status = ARM_CMSIS_NN_SUCCESS; + if (1 == dw_conv_params->ch_mult && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) + { + status = arm_depthwise_conv_s4_opt(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + else + { + status = arm_depthwise_conv_s4(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + + /* Return to application */ + return status; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c new file mode 100644 index 0000000..6423c16 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c @@ -0,0 +1,182 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_wrapper_s8.c + * Description: Wrapper API to select appropriate depthwise conv API based + * on dimensions. + * + * $Date: 04 November 2024 + * $Revision: V.2.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +#if defined(ARM_MATH_MVEI) +static arm_cmsis_nn_status arm_depthwise_conv_to_conv_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *filter, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + const cmsis_nn_conv_params conv_params = {dw_conv_params->input_offset, + dw_conv_params->output_offset, + dw_conv_params->stride, + dw_conv_params->padding, + dw_conv_params->dilation, + dw_conv_params->activation}; + const cmsis_nn_dims filter_output_dims = {filter_dims->c, filter_dims->h, filter_dims->w, filter_dims->n}; + int8_t *w_buf = + ctx->buf + arm_convolve_wrapper_s8_get_buffer_size(&conv_params, input_dims, &filter_output_dims, output_dims); + const uint32_t perm[4] = {3, 1, 2, 0}; + const cmsis_nn_transpose_params transpose_params = {4, perm}; + + arm_cmsis_nn_status status = arm_transpose_s8(filter, w_buf, filter_dims, &filter_output_dims, &transpose_params); + + if (status == ARM_CMSIS_NN_SUCCESS) + { + status = arm_convolve_wrapper_s8(ctx, + &conv_params, + quant_params, + input_dims, + input, + &filter_output_dims, + (const int8_t *)w_buf, + bias_dims, + bias, + output_dims, + output); + } + return status; +} +#endif + +/* + * s8 Depthwise conv wrapper function + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *filter, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + arm_cmsis_nn_status status = ARM_CMSIS_NN_SUCCESS; + +#if defined(ARM_MATH_MVEI) + if (input_dims->c == 1 && output_dims->c > CONVERT_DW_CONV_WITH_ONE_INPUT_CH_AND_OUTPUT_CH_ABOVE_THRESHOLD) + { + return arm_depthwise_conv_to_conv_s8(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } +#endif + + if (1 == dw_conv_params->ch_mult && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) + { +#if !defined(ARM_MATH_MVEI) + if (filter_dims->w == 3 && filter_dims->h == 3 && dw_conv_params->padding.h <= 1 && + dw_conv_params->padding.w <= 1) + { + status = arm_depthwise_conv_3x3_s8(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + else +#endif + { + status = arm_depthwise_conv_s8_opt(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + } + else + { + status = arm_depthwise_conv_s8(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + + /* Return to application */ + return status; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c similarity index 84% rename from src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c rename to src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c index 97511ff..341f157 100644 --- a/src/third_party/cmsis/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,34 +21,33 @@ * Title: arm_nn_depthwise_conv_s8_core.c * Description: Depthwise convolution on im2col buffers. * - * $Date: May 29, 2020 - * $Revision: V.1.0.3 + * $Date: 26 October 2022 + * $Revision: V.1.0.5 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ -#include "arm_math.h" -#include "arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" /* - * Depthwise conv on an im2col buffer where the input channel equals - * output channel. - * - * Refer header file for details. - * - */ - -q7_t *arm_nn_depthwise_conv_s8_core(const q7_t *row, - const q15_t *col, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t kernel_size, - const int32_t *const output_bias, - q7_t *out) + * Depthwise conv on an im2col buffer where the input channel equals + * output channel. + * + * Refer header file for details. + * + */ + +int8_t *arm_nn_depthwise_conv_s8_core(const int8_t *row, + const int16_t *col, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t kernel_size, + const int32_t *const output_bias, + int8_t *out) { #if defined(ARM_MATH_MVEI) int32_t ch_per_loop = num_ch / 4; diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_row_offset_s8_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_row_offset_s8_s16.c new file mode 100644 index 0000000..d96c474 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_row_offset_s8_s16.c @@ -0,0 +1,253 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_row_offset_s8_s16.c + * Description: Matrix-multiplication function for grouped convolution + * + * $Date: 04 January 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/* + * Matrix-multiplication function for convolution with per-channel requantization, supporting an address offset between + * rows. + * + * Refer header file for details. + * + */ + +int8_t *arm_nn_mat_mult_kernel_row_offset_s8_s16(const int8_t *input_a, + const int16_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const int32_t num_col_a, + const int32_t aligned_num_col_a, + const int32_t *const output_bias, + const int32_t row_address_offset, + int8_t *out_0) +{ + +#if !defined(ARM_MATH_MVEI) + /* set up the second output pointers */ + + int8_t *out_1 = out_0 + row_address_offset; + const int32_t *bias = output_bias; + + uint16_t row_count = output_ch / 2; + const int8_t *ip_a0 = input_a; + /* this loop over rows in A */ + while (row_count) + { + /* setup pointers for B */ + const int16_t *ip_b0 = input_b; + const int16_t *ip_b1 = ip_b0 + aligned_num_col_a; + + /* align the second pointer for A */ + const int8_t *ip_a1 = ip_a0 + num_col_a; + + int32_t ch_0_out_0 = 0; + int32_t ch_0_out_1 = 0; + int32_t ch_1_out_0 = 0; + int32_t ch_1_out_1 = 0; + /* Init accumulator with bias for channel N and N + 1 */ + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias++; + ch_1_out_0 = *bias; + ch_1_out_1 = *bias++; + } + + #if defined(ARM_MATH_DSP) + int32_t col_count = num_col_a / 4; + /* accumulate over the vector */ + while (col_count) + { + int32_t a01, a02, a11, a12; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); + ip_a1 = read_and_pad_reordered(ip_a1, &a11, &a12); + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a11, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a11, b1, ch_1_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a12, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a12, b1, ch_1_out_1); + + col_count--; + } /* while over col_count */ + + col_count = num_col_a & 0x3; + + #else + int32_t col_count = num_col_a; + #endif + while (col_count) + { + int8_t a0 = *ip_a0++; + int16_t b0 = *ip_b0++; + int8_t a1 = *ip_a1++; + int16_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + ch_1_out_0 += a1 * b0; + ch_1_out_1 += a1 * b1; + col_count--; + } /* while over col_count */ + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int8_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int8_t)ch_0_out_1; + out_mult++; + out_shift++; + + ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); + ch_1_out_0 += out_offset; + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0++ = (int8_t)ch_1_out_0; + + ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); + ch_1_out_1 += out_offset; + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1++ = (int8_t)ch_1_out_1; + out_mult++; + out_shift++; + + /* skip row */ + ip_a0 += num_col_a; + row_count--; + } + + /* compute the last odd numbered row if any */ + if (output_ch & 0x1) + { + /* setup pointers for B */ + const int16_t *ip_b0 = input_b; + const int16_t *ip_b1 = ip_b0 + aligned_num_col_a; + + int32_t ch_0_out_0 = 0; + int32_t ch_0_out_1 = 0; + + /* load the bias */ + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias++; + } + + #if defined(ARM_MATH_DSP) + int32_t col_count = num_col_a >> 2; + while (col_count) + { + int32_t a01, a02; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + + col_count--; + } + col_count = num_col_a & 0x3; + + #else + int32_t col_count = num_col_a; + #endif + while (col_count) + { + int8_t a0 = *ip_a0++; + int16_t b0 = *ip_b0++; + int16_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + col_count--; + } + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int8_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int8_t)ch_0_out_1; + out_mult++; + out_shift++; + } + + out_0 += 2 * row_address_offset - output_ch; + + /* return the new output pointer with offset */ + return out_0; +#else + (void)input_a; + (void)input_b; + (void)output_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)aligned_num_col_a, (void)num_col_a; + (void)output_bias; + (void)row_address_offset; + (void)out_0; + return NULL; +#endif +} diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s16.c new file mode 100644 index 0000000..dd88b69 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s16.c @@ -0,0 +1,369 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_s16.c + * Description: Matrix-multiplication function for 16 bits convolution + * + * $Date: 12 April 2024 + * $Revision: V.3.0.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * Matrix-multiplication function for convolution with per-channel requantization. + * + * Refer header file for details. + * + */ +int16_t *arm_nn_mat_mult_kernel_s16(const int8_t *input_a, + const int16_t *input_b, + const int32_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max, + const int32_t num_col_a, + const cmsis_nn_bias_data *const bias_data, + int16_t *out_0) +{ +#if !defined(ARM_MATH_MVEI) + const int64_t *bias_s64 = (const int64_t *)bias_data->data; + const int32_t *bias_s32 = (const int32_t *)bias_data->data; + const bool is_int32_bias = bias_data->is_int32_bias; + + const int32_t num_col_a_fast = is_int32_bias ? num_col_a : (num_col_a > MAX_COL_COUNT ? MAX_COL_COUNT : num_col_a); + const int32_t num_col_a_slow = num_col_a - MAX_COL_COUNT; + + int16_t *out_1 = out_0 + output_ch; + int32_t row_count = output_ch / 2; + const int8_t *ip_a0 = input_a; + + /* This loop over rows in A */ + while (row_count) + { + /* Setup pointers for B */ + const int16_t *ip_b0 = input_b; + const int16_t *ip_b1 = ip_b0 + num_col_a; + + /* Align the second pointer for A */ + const int8_t *ip_a1 = ip_a0 + num_col_a; + + /* Init accumulator for channel N and N + 1 */ + int32_t ch_0_out_0 = 0; + int32_t ch_0_out_1 = 0; + int32_t ch_1_out_0 = 0; + int32_t ch_1_out_1 = 0; + + #if defined(ARM_MATH_DSP) + uint16_t col_count = num_col_a_fast / 4; + + /* Accumulate over the vector */ + while (col_count) + { + int32_t a01, a02, a11, a12; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad(ip_a0, &a01, &a02); + ip_a1 = read_and_pad(ip_a1, &a11, &a12); + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a11, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a11, b1, ch_1_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a12, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a12, b1, ch_1_out_1); + + col_count--; + } + col_count = num_col_a_fast & 0x3; + #else + int32_t col_count = num_col_a_fast; + #endif + + while (col_count) + { + int8_t a0 = *ip_a0++; + int16_t b0 = *ip_b0++; + int8_t a1 = *ip_a1++; + int16_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + ch_1_out_0 += a1 * b0; + ch_1_out_1 += a1 * b1; + col_count--; + } + + if (is_int32_bias) + { + if (bias_s32) + { + ch_0_out_0 += *bias_s32; + ch_0_out_1 += *bias_s32++; + ch_1_out_0 += *bias_s32; + ch_1_out_1 += *bias_s32++; + } + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + out_mult++; + out_shift++; + + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int16_t)ch_0_out_0; + + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int16_t)ch_0_out_1; + + ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); + ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); + out_mult++; + out_shift++; + + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0++ = (int16_t)ch_1_out_0; + + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1++ = (int16_t)ch_1_out_1; + } + else + { + int64_t ch_0_out_0_s64 = ch_0_out_0; + int64_t ch_0_out_1_s64 = ch_0_out_1; + int64_t ch_1_out_0_s64 = ch_1_out_0; + int64_t ch_1_out_1_s64 = ch_1_out_1; + + if (num_col_a > MAX_COL_COUNT) + { + col_count = num_col_a_slow; + while (col_count) + { + int8_t a0 = *ip_a0++; + int16_t b0 = *ip_b0++; + int8_t a1 = *ip_a1++; + int16_t b1 = *ip_b1++; + + ch_0_out_0_s64 += a0 * b0; + ch_0_out_1_s64 += a0 * b1; + ch_1_out_0_s64 += a1 * b0; + ch_1_out_1_s64 += a1 * b1; + col_count--; + } + } + + if (bias_s64) + { + ch_0_out_0_s64 += *bias_s64; + ch_0_out_1_s64 += *bias_s64++; + ch_1_out_0_s64 += *bias_s64; + ch_1_out_1_s64 += *bias_s64++; + } + + int32_t reduced_multiplier = REDUCE_MULTIPLIER(*out_mult); + ch_0_out_0 = arm_nn_requantize_s64(ch_0_out_0_s64, reduced_multiplier, *out_shift); + ch_0_out_1 = arm_nn_requantize_s64(ch_0_out_1_s64, reduced_multiplier, *out_shift); + out_mult++; + out_shift++; + + reduced_multiplier = REDUCE_MULTIPLIER(*out_mult); + ch_1_out_0 = arm_nn_requantize_s64(ch_1_out_0_s64, reduced_multiplier, *out_shift); + ch_1_out_1 = arm_nn_requantize_s64(ch_1_out_1_s64, reduced_multiplier, *out_shift); + + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int16_t)ch_0_out_0; + + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int16_t)ch_0_out_1; + + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0++ = (int16_t)ch_1_out_0; + + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1++ = (int16_t)ch_1_out_1; + + out_mult++; + out_shift++; + } + + /* Skip row */ + ip_a0 += num_col_a; + row_count--; + } + + /* Compute the last odd numbered row if any */ + if (output_ch & 0x1) + { + /* Setup pointers for B */ + const int16_t *ip_b0 = input_b; + const int16_t *ip_b1 = ip_b0 + num_col_a; + + int32_t ch_0_out_0 = 0; + int32_t ch_0_out_1 = 0; + + #if defined(ARM_MATH_DSP) + uint16_t col_count = num_col_a_fast >> 2; + while (col_count) + { + int32_t a01, a02; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad(ip_a0, &a01, &a02); + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + + col_count--; + } + col_count = num_col_a & 0x3; + #else + int32_t col_count = num_col_a_fast; + #endif + while (col_count) + { + int8_t a0 = *ip_a0++; + int16_t b0 = *ip_b0++; + int16_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + col_count--; + } + + if (is_int32_bias) + { + if (bias_s32) + { + ch_0_out_0 += *bias_s32; + ch_0_out_1 += *bias_s32++; + } + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + out_mult++; + out_shift++; + + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int16_t)ch_0_out_0; + + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int16_t)ch_0_out_1; + } + else + { + int64_t ch_0_out_0_s64 = ch_0_out_0; + int64_t ch_0_out_1_s64 = ch_0_out_1; + + if (num_col_a > MAX_COL_COUNT) + { + col_count = num_col_a_slow; + while (col_count) + { + int8_t a0 = *ip_a0++; + int16_t b0 = *ip_b0++; + int16_t b1 = *ip_b1++; + + ch_0_out_0_s64 += a0 * b0; + ch_0_out_1_s64 += a0 * b1; + col_count--; + } + } + + if (bias_s64) + { + ch_0_out_0_s64 += *bias_s64; + ch_0_out_1_s64 += *bias_s64++; + } + + int32_t reduced_multiplier = REDUCE_MULTIPLIER(*out_mult); + ch_0_out_0 = arm_nn_requantize_s64(ch_0_out_0_s64, reduced_multiplier, *out_shift); + ch_0_out_1 = arm_nn_requantize_s64(ch_0_out_1_s64, reduced_multiplier, *out_shift); + + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int16_t)ch_0_out_0; + + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int16_t)ch_0_out_1; + out_mult++; + out_shift++; + } + } + + out_0 += output_ch; + + /* Return the new output pointer with offset */ + return out_0; +#else + (void)input_a; + (void)input_b; + (void)output_ch; + (void)out_shift; + (void)out_mult; + (void)activation_min; + (void)activation_max; + (void)num_col_a; + (void)bias_data; + (void)out_0; + + return NULL; +#endif +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s4_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s4_s16.c new file mode 100644 index 0000000..685939a --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s4_s16.c @@ -0,0 +1,439 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_s4_s16.c + * Description: Matrix-multiplication function for convolution + * + * $Date: 28 October 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/* + * Matrix-multiplication function for convolution with per-channel requantization and 4bit weights. + * + * Refer header file for details. + * + */ + +int8_t *arm_nn_mat_mult_kernel_s4_s16(const int8_t *packed_input_a, + const int16_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t num_col_a, + const int32_t *const output_bias, + int8_t *OPTIONAL_RESTRICT_KEYWORD out_0) +{ + + /* set up the second output pointers */ + int8_t *out_1 = out_0 + output_ch; + const int32_t *bias = output_bias; + + uint16_t row_count = output_ch / 4; + const int8_t *packed_ip_a0 = packed_input_a; + /* this loop over rows in A */ + while (row_count) + { + int8_t spillover0 = 0; + int8_t spillover1 = 0; + /* setup pointers for B */ + const int16_t *ip_b0 = input_b; + const int16_t *ip_b1 = ip_b0 + num_col_a; + + /* Align the second pointer for A. + * This will skip a row so that we can ensure the that spilled rows + * don't offset the symmetry. + */ + const int8_t *packed_ip_a1 = packed_ip_a0 + num_col_a; + + int32_t ch_0_out_0 = 0; + int32_t ch_0_out_1 = 0; + int32_t ch_1_out_0 = 0; + int32_t ch_1_out_1 = 0; + /* Init accumulator with bias for channel N and N + 1 */ + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias; + bias += 2; + ch_1_out_0 = *bias; + ch_1_out_1 = *bias--; + } + +#if defined(ARM_MATH_DSP) + int32_t col_count = num_col_a / 4; + /* accumulate over the vector */ + + while (col_count) + { + int32_t a01, a02, a11, a12; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + read_and_pad_s4_ordered(packed_ip_a0, &a01, &a02); + read_and_pad_s4_ordered(packed_ip_a1, &a11, &a12); + packed_ip_a0 += 2; + packed_ip_a1 += 2; + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a11, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a11, b1, ch_1_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a12, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a12, b1, ch_1_out_1); + + col_count--; + } /* while over col_count */ + col_count = (num_col_a & 0x3) >> 1; +#else + int32_t col_count = num_col_a >> 1; +#endif + while (col_count) + { + int8_t lower_a0 = (int8_t)(packed_ip_a0[0] << 4) >> 4; + int8_t higher_a0 = packed_ip_a0[0] >> 4; + int16_t b0 = *ip_b0++; + + int8_t lower_a1 = (int8_t)(packed_ip_a1[0] << 4) >> 4; + int8_t higher_a1 = packed_ip_a1[0] >> 4; + int16_t b1 = *ip_b1++; + + packed_ip_a0++; + packed_ip_a1++; + + ch_0_out_0 += lower_a0 * b0; + ch_0_out_1 += lower_a0 * b1; + ch_1_out_0 += lower_a1 * b0; + ch_1_out_1 += lower_a1 * b1; + + b0 = *ip_b0++; + b1 = *ip_b1++; + + ch_0_out_0 += higher_a0 * b0; + ch_0_out_1 += higher_a0 * b1; + ch_1_out_0 += higher_a1 * b0; + ch_1_out_1 += higher_a1 * b1; + + col_count--; + } /* while over col_count */ + /* left over column */ + if (num_col_a % 2) + { + int8_t lower_a0 = (int8_t)(packed_ip_a0[0] << 4) >> 4; + spillover0 = packed_ip_a0[0] >> 4; + int16_t b0 = *ip_b0++; + + int8_t lower_a1 = (int8_t)(packed_ip_a1[0] << 4) >> 4; + spillover1 = packed_ip_a1[0] >> 4; + int16_t b1 = *ip_b1++; + + packed_ip_a0++; + packed_ip_a1++; + + ch_0_out_0 += lower_a0 * b0; + ch_0_out_1 += lower_a0 * b1; + ch_1_out_0 += lower_a1 * b0; + ch_1_out_1 += lower_a1 * b1; + } + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0 = (int8_t)ch_0_out_0; + out_0 += 2; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1 = (int8_t)ch_0_out_1; + out_1 += 2; + out_mult += 2; + out_shift += 2; + + ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); + ch_1_out_0 += out_offset; + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0-- = (int8_t)ch_1_out_0; + + ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); + ch_1_out_1 += out_offset; + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1-- = (int8_t)ch_1_out_1; + out_mult--; + out_shift--; + + /* setup pointers for B */ + ip_b0 = input_b; + ip_b1 = ip_b0 + num_col_a; + + /* Align the second pointer for A. + * This will skip a row so that we can ensure the that spilled rows + * don't offset the symmetry. + */ + packed_ip_a1 = packed_ip_a0 + num_col_a; + + ch_0_out_0 = 0; + ch_0_out_1 = 0; + ch_1_out_0 = 0; + ch_1_out_1 = 0; + /* Init accumulator with bias for channel N and N + 1 */ + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias; + bias += 2; + ch_1_out_0 = *bias; + ch_1_out_1 = *bias++; + } + + if (num_col_a % 2) + { + int16_t b0 = *ip_b0++; + int16_t b1 = *ip_b1++; + + ch_0_out_0 += spillover0 * b0; + ch_0_out_1 += spillover0 * b1; + ch_1_out_0 += spillover1 * b0; + ch_1_out_1 += spillover1 * b1; + } + +#if defined(ARM_MATH_DSP) + col_count = num_col_a / 4; + /* accumulate over the vector */ + while (col_count) + { + int32_t a01, a02, a11, a12; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + read_and_pad_s4_ordered(packed_ip_a0, &a01, &a02); + read_and_pad_s4_ordered(packed_ip_a1, &a11, &a12); + packed_ip_a0 += 2; + packed_ip_a1 += 2; + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a11, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a11, b1, ch_1_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a12, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a12, b1, ch_1_out_1); + + col_count--; + } /* while over col_count */ + col_count = (num_col_a & 0x3) >> 1; +#else + col_count = num_col_a >> 1; +#endif + while (col_count) + { + int8_t lower_a0 = (int8_t)(packed_ip_a0[0] << 4) >> 4; + int8_t higher_a0 = packed_ip_a0[0] >> 4; + int16_t b0 = *ip_b0++; + + int8_t lower_a1 = (int8_t)(packed_ip_a1[0] << 4) >> 4; + int8_t higher_a1 = packed_ip_a1[0] >> 4; + int16_t b1 = *ip_b1++; + + packed_ip_a0++; + packed_ip_a1++; + + ch_0_out_0 += lower_a0 * b0; + ch_0_out_1 += lower_a0 * b1; + ch_1_out_0 += lower_a1 * b0; + ch_1_out_1 += lower_a1 * b1; + + b0 = *ip_b0++; + b1 = *ip_b1++; + + ch_0_out_0 += higher_a0 * b0; + ch_0_out_1 += higher_a0 * b1; + ch_1_out_0 += higher_a1 * b0; + ch_1_out_1 += higher_a1 * b1; + + col_count--; + } /* while over col_count */ + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0 = (int8_t)ch_0_out_0; + out_0 += 2; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1 = (int8_t)ch_0_out_1; + out_1 += 2; + out_mult += 2; + out_shift += 2; + + ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); + ch_1_out_0 += out_offset; + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0++ = (int8_t)ch_1_out_0; + + ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); + ch_1_out_1 += out_offset; + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1++ = (int8_t)ch_1_out_1; + out_mult++; + out_shift++; + + /* skip 2 rows */ + packed_ip_a0 += num_col_a; + row_count--; + } + + /* compute the 0 - 3 rows if any */ + int16_t left_over_rows = 0; + while (left_over_rows < output_ch % 4) + { + /* setup pointers for B */ + const int16_t *ip_b0 = input_b; + const int16_t *ip_b1 = ip_b0 + num_col_a; + + int32_t ch_0_out_0 = 0; + int32_t ch_0_out_1 = 0; + + /* load the bias */ + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias++; + } + + if (left_over_rows == 1 && num_col_a % 2) + { + int16_t b0 = *ip_b0++; + int16_t b1 = *ip_b1++; + int8_t spilled_column = packed_ip_a0[0] >> 4; + + ++packed_ip_a0; + + ch_0_out_0 += spilled_column * b0; + ch_0_out_1 += spilled_column * b1; + } + +#if defined(ARM_MATH_DSP) + int32_t col_count = num_col_a / 4; + while (col_count) + { + int32_t a01, a02; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + read_and_pad_s4_ordered(packed_ip_a0, &a01, &a02); + packed_ip_a0 += 2; + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + + col_count--; + } + col_count = (num_col_a & 0x3) >> 1; + +#else + int32_t col_count = num_col_a >> 1; +#endif + + while (col_count) + { + int8_t a0 = (int8_t)(packed_ip_a0[0] << 4) >> 4; + int8_t a1 = packed_ip_a0[0] >> 4; + int16_t b0 = *ip_b0++; + int16_t b1 = *ip_b1++; + + ++packed_ip_a0; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + + b0 = *ip_b0++; + b1 = *ip_b1++; + + ch_0_out_0 += a1 * b0; + ch_0_out_1 += a1 * b1; + + col_count--; + } + if (num_col_a % 2 && left_over_rows != 1) + { + int8_t a0 = (int8_t)(packed_ip_a0[0] << 4) >> 4; + + int16_t b0 = *ip_b0++; + int16_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + } + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int8_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int8_t)ch_0_out_1; + out_mult++; + out_shift++; + + ++left_over_rows; + } + + out_0 += output_ch; + + /* return the new output pointer with offset */ + return out_0; +} diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c new file mode 100644 index 0000000..ebc17a6 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c @@ -0,0 +1,246 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_s8_s16.c + * Description: Matrix-multiplication function for convolution + * + * $Date: 28 October 2024 + * $Revision: V.2.1.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/* + * Matrix-multiplication function for convolution with per-channel requantization. + * + * Refer header file for details. + * + */ + +int8_t *arm_nn_mat_mult_kernel_s8_s16(const int8_t *input_a, + const int16_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const int32_t num_col_a, + const int32_t aligned_num_col_a, + const int32_t *const output_bias, + int8_t *OPTIONAL_RESTRICT_KEYWORD out_0) +{ +#if !defined(ARM_MATH_MVEI) + /* set up the second output pointers */ + int8_t *out_1 = out_0 + output_ch; + const int32_t *bias = output_bias; + + uint16_t row_count = output_ch / 2; + const int8_t *ip_a0 = input_a; + /* this loop over rows in A */ + while (row_count) + { + /* setup pointers for B */ + const int16_t *ip_b0 = input_b; + const int16_t *ip_b1 = ip_b0 + aligned_num_col_a; + + /* align the second pointer for A */ + const int8_t *ip_a1 = ip_a0 + num_col_a; + + int32_t ch_0_out_0 = 0; + int32_t ch_0_out_1 = 0; + int32_t ch_1_out_0 = 0; + int32_t ch_1_out_1 = 0; + /* Init accumulator with bias for channel N and N + 1 */ + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias++; + ch_1_out_0 = *bias; + ch_1_out_1 = *bias++; + } + + #if defined(ARM_MATH_DSP) + int32_t col_count = num_col_a / 4; + /* accumulate over the vector */ + while (col_count) + { + int32_t a01, a02, a11, a12; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); + ip_a1 = read_and_pad_reordered(ip_a1, &a11, &a12); + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a11, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a11, b1, ch_1_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + ch_1_out_0 = SMLAD(a12, b0, ch_1_out_0); + ch_1_out_1 = SMLAD(a12, b1, ch_1_out_1); + + col_count--; + } /* while over col_count */ + col_count = num_col_a & 0x3; + #else + int32_t col_count = num_col_a; + #endif + while (col_count) + { + int8_t a0 = *ip_a0++; + int16_t b0 = *ip_b0++; + int8_t a1 = *ip_a1++; + int16_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + ch_1_out_0 += a1 * b0; + ch_1_out_1 += a1 * b1; + col_count--; + } /* while over col_count */ + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int8_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int8_t)ch_0_out_1; + out_mult++; + out_shift++; + + ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); + ch_1_out_0 += out_offset; + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0++ = (int8_t)ch_1_out_0; + + ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); + ch_1_out_1 += out_offset; + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1++ = (int8_t)ch_1_out_1; + out_mult++; + out_shift++; + + /* skip row */ + ip_a0 += num_col_a; + row_count--; + } + + /* compute the last odd numbered row if any */ + if (output_ch & 0x1) + { + /* setup pointers for B */ + const int16_t *ip_b0 = input_b; + const int16_t *ip_b1 = ip_b0 + aligned_num_col_a; + + int32_t ch_0_out_0 = 0; + int32_t ch_0_out_1 = 0; + + /* load the bias */ + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias++; + } + + #if defined(ARM_MATH_DSP) + int32_t col_count = num_col_a >> 2; + while (col_count) + { + int32_t a01, a02; + int32_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + int32_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); + + ch_0_out_0 = SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a01, b1, ch_0_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + ch_0_out_0 = SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = SMLAD(a02, b1, ch_0_out_1); + + col_count--; + } + col_count = num_col_a & 0x3; + #else + int32_t col_count = num_col_a; + #endif + while (col_count) + { + int8_t a0 = *ip_a0++; + int16_t b0 = *ip_b0++; + int16_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + col_count--; + } + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (int8_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (int8_t)ch_0_out_1; + out_mult++; + out_shift++; + } + + out_0 += output_ch; + + /* return the new output pointer with offset */ + return out_0; +#else + (void)input_a; + (void)input_b; + (void)output_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)aligned_num_col_a, (void)num_col_a; + (void)output_bias; + (void)out_0; + /* To be completed */ + return NULL; +#endif +} diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c new file mode 100644 index 0000000..06b89a9 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c @@ -0,0 +1,180 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_s8.c + * Description: General Matrix-multiplication function + * + * $Date: 26 October 2022 + * $Revision: V.2.0.8 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/* + * s8 General matrix multiplication function with per-channel requantization for upto 4 column batches. + * + * Refer header file for details. + * + */ + +int8_t *arm_nn_mat_mult_s8(const int8_t *input_row, + const int8_t *input_col, + const uint16_t output_ch, + const uint16_t col_batches, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t col_offset, + const int32_t row_offset, + const int16_t activation_min, + const int16_t activation_max, + const uint16_t row_len, + const int32_t *const bias, + int8_t *out) +{ +#if defined(ARM_MATH_MVEI) + (void)row_offset; + if (col_batches == 4) + { + for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + int32_t row_len_tmp = row_len; + const int8_t *ip_r0 = input_row + (i_out_ch * row_len); + const int8_t *ip_c0 = input_col; + const int8_t *ip_c1 = input_col + row_len; + const int8_t *ip_c2 = input_col + (2 * row_len); + const int8_t *ip_c3 = input_col + (3 * row_len); + + int32_t acc_0 = 0; + int32_t acc_1 = 0; + int32_t acc_2 = 0; + int32_t acc_3 = 0; + const int32_t row_loop_cnt = (row_len + 7) / 8; + + for (int i_row_loop = 0; i_row_loop < row_loop_cnt; i_row_loop++) + { + mve_pred16_t p = vctp16q((uint32_t)row_len_tmp); + const int16x8_t offset = vdupq_x_n_s16(col_offset, p); + row_len_tmp -= 8; + + int16x8_t c0 = vldrbq_s16(ip_c0); + ip_c0 += 8; + c0 = vaddq_s16(c0, offset); + + int16x8_t c1 = vldrbq_s16(ip_c1); + ip_c1 += 8; + c1 = vaddq_s16(c1, offset); + + int16x8_t c2 = vldrbq_s16(ip_c2); + ip_c2 += 8; + c2 = vaddq_s16(c2, offset); + + int16x8_t c3 = vldrbq_s16(ip_c3); + ip_c3 += 8; + c3 = vaddq_s16(c3, offset); + + int16x8_t r0 = vldrbq_z_s16(ip_r0, p); + ip_r0 += 8; + + acc_0 = vmladavaq_p_s16(acc_0, r0, c0, p); + acc_1 = vmladavaq_p_s16(acc_1, r0, c1, p); + acc_2 = vmladavaq_p_s16(acc_2, r0, c2, p); + acc_3 = vmladavaq_p_s16(acc_3, r0, c3, p); + } + + int32x4_t res = {acc_0, acc_1, acc_2, acc_3}; + if (bias) + { + res = vaddq_n_s32(res, bias[i_out_ch]); + } + res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); + res = vaddq_n_s32(res, out_offset); + + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + + const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; + vstrbq_scatter_offset_s32(&out[i_out_ch], scatter_offset, res); + } + out += 4 * output_ch; + } + else + { + for (int i_col_batch = (col_batches & ~0x3); i_col_batch < (col_batches & 0x3); i_col_batch++) + { + for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + int32_t row_len_tmp = row_len; + + const int8_t *ip_r0 = input_row + (i_out_ch * row_len); + const int8_t *ip_c0 = input_col + (i_col_batch * row_len); + int32_t acc_0 = 0; + const int32_t row_loop_cnt = (row_len + 7) / 8; + + for (int i_row_loop = 0; i_row_loop < row_loop_cnt; i_row_loop++) + { + const mve_pred16_t p = vctp16q((uint32_t)row_len_tmp); + const int16x8_t offset = vdupq_x_n_s16(col_offset, p); + row_len_tmp -= 8; + + int16x8_t c0 = vldrbq_s16(ip_c0); + ip_c0 += 8; + c0 = vaddq_s16(c0, offset); + + int16x8_t r0 = vldrbq_z_s16(ip_r0, p); + ip_r0 += 8; + acc_0 = vmladavaq_p_s16(acc_0, r0, c0, p); + } + + if (bias) + { + acc_0 += bias[i_out_ch]; + } + acc_0 = arm_nn_requantize(acc_0, output_mult[i_out_ch], output_shift[i_out_ch]); + acc_0 += out_offset; + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + out[i_out_ch] = (int8_t)acc_0; + } + out += output_ch; + } + } + return out; + +#else + (void)input_row; + (void)input_col; + (void)output_ch; + (void)col_batches; + (void)output_shift; + (void)output_mult; + (void)out_offset; + (void)col_offset; + (void)row_offset; + (void)activation_min; + (void)activation_max; + (void)row_len; + (void)bias; + (void)out; + return NULL; +#endif +} diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_get_buffer_sizes_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_get_buffer_sizes_s8.c new file mode 100644 index 0000000..76325dc --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_get_buffer_sizes_s8.c @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_transpose_conv_get_buffer_sizes_s8.c + * Description: Collection of get buffer size functions for the transpose convolution layer functions. + * + * $Date: 29 October 2024 + * $Revision: V.2.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/Internal/arm_nn_compiler.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup NNConv + */ + +/** + * @addtogroup GetBufferSizeNNConv + * @{ + */ + +/* + * Get the required buffer size for arm_transpose_conv_s8. This is the recommended transpose conv s8 get buffer size + * function. + * + * Refer to header file for details. + * + */ +int32_t arm_transpose_conv_s8_get_buffer_size(const cmsis_nn_transpose_conv_params *transpose_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *out_dims) +{ + + const bool reverse_conv_possible = + ((transpose_conv_params->stride.w <= 2) && (transpose_conv_params->stride.h <= 2)); + const bool reverse_conv_efficient = (input_dims->c > REVERSE_TCOL_EFFICIENT_THRESHOLD); + + if (reverse_conv_possible && reverse_conv_efficient) + { + const cmsis_nn_dims reverse_conv_input_dims = {input_dims->n, + input_dims->h * transpose_conv_params->stride.h, + input_dims->w * transpose_conv_params->stride.w, + input_dims->c}; + return arm_convolve_s8_get_buffer_size(&reverse_conv_input_dims, filter_dims); + } + else + { + const int32_t buf_x = ((input_dims->w - 1) * transpose_conv_params->stride.w + + MAX(filter_dims->w, transpose_conv_params->stride.h)) * + out_dims->c; + const int32_t buf_y = MAX(filter_dims->h, transpose_conv_params->stride.h); + return buf_x * buf_y * sizeof(int32_t); + } +} + +int32_t arm_transpose_conv_s8_get_reverse_conv_buffer_size(const cmsis_nn_transpose_conv_params *transpose_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims) +{ + const bool reverse_conv_possible = + ((transpose_conv_params->stride.w <= 2) && (transpose_conv_params->stride.h <= 2)); + const bool reverse_conv_efficient = (input_dims->c > REVERSE_TCOL_EFFICIENT_THRESHOLD); + + if (reverse_conv_possible && reverse_conv_efficient) + { + return input_dims->c * filter_dims->w * filter_dims->h * filter_dims->n; + } + else + { + return 0; + } +} + +/** + * @} end of GetBufferSizeNNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_s8.c new file mode 100644 index 0000000..731fd24 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_s8.c @@ -0,0 +1,280 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_transpose_conv_s8.c + * Description: s8 version of transposed convolution using symmetric quantization. + * + * $Date: 29 October 2024 + * $Revision: V.2.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Basic s8 transpose convolution function. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_transpose_conv_s8(const cmsis_nn_context *ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_transpose_conv_params *transpose_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)bias_dims; + (void)output_ctx; + + const int32_t activation_min = transpose_conv_params->activation.min; + const int32_t activation_max = transpose_conv_params->activation.max; + + const int32_t input_ch = input_dims->c; + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + + const int32_t output_ch = output_dims->c; + + const int32_t filter_x = filter_dims->w; + const int32_t filter_y = filter_dims->h; + + const int32_t pad_x = transpose_conv_params->padding.w; + const int32_t pad_y = transpose_conv_params->padding.h; + + const int32_t stride_x = transpose_conv_params->stride.w; + const int32_t stride_y = transpose_conv_params->stride.h; + + const int32_t *output_multiplier = quant_params->multiplier; + const int32_t *output_shift = quant_params->shift; + + const int32_t out_offset = transpose_conv_params->output_offset; + const int32_t input_offset = transpose_conv_params->input_offset; + + const int32_t buf_x_elements = ((input_x - 1) * stride_x + MAX(filter_x, stride_x)); + const int32_t buf_x = buf_x_elements * output_ch; + const int32_t buf_y = MAX(filter_y, stride_y); + const int32_t buf_size = buf_y * buf_x; + int32_t *buf = ctx->buf; + int32_t batch_cnt = input_dims->n; + + const int8_t *filter = filter_data; + const int8_t *input = input_data; + int8_t *output = output_data; + + while (batch_cnt) + { + // Reset buf + if (bias_data) + { + for (int x = 0; x < buf_x_elements * buf_y; x++) + { + arm_memcpy_s8((int8_t *)(buf + x * output_ch), (const int8_t *)bias_data, output_ch * sizeof(int32_t)); + } + } + else + { + arm_memset_s8((int8_t *)buf, 0, buf_size * sizeof(int32_t)); + } + + int32_t buf_row = 0; + for (int j = 0; j < input_y; j++) + { + int skip_rows_top = MAX(0, pad_y - j * stride_y); + int skip_rows_bottom = MAX(0, (j * stride_y + filter_y) - (pad_y + output_y) - 1); + + // Compute output for one row of input + arm_nn_transpose_conv_row_s8_s32(input, + filter, + buf, + buf_row, + buf_size, + filter_y, + filter_x, + input_ch, + output_ch, + input_offset, + buf_x, + input_x, + stride_x, + skip_rows_top, + skip_rows_bottom); + input += input_ch * input_x; + + if (skip_rows_top == 0) + { + for (int y = 0; y < stride_y; y++) + { + int32_t *buf_out = buf + buf_row; + buf_out += output_ch * pad_x; + +#if defined(ARM_MATH_MVEI) + for (int x = 0; x < output_x; x++) + { + const int32_t *mult_ptr = output_multiplier; + const int32_t *shift_ptr = output_shift; + + int channel_count = output_ch; + for (; channel_count > 0; channel_count -= 4) + { + mve_pred16_t p = vctp32q((uint32_t)channel_count); + + int32x4_t result = vldrwq_z_s32(buf_out, p); + buf_out += 4; + result = + arm_requantize_mve_32x4(result, vldrwq_z_s32(mult_ptr, p), vldrwq_z_s32(shift_ptr, p)); + mult_ptr += 4; + shift_ptr += 4; + result = vaddq_n_s32(result, out_offset); + result = vmaxq_s32(result, vdupq_n_s32(activation_min)); + result = vminq_s32(result, vdupq_n_s32(activation_max)); + vstrbq_p_s32(output, result, p); + output += 4; + } + + // Correct pointer overshoot due to predication + buf_out += channel_count; + output += channel_count; + } +#else + + for (int x = 0; x < output_x; x++) + { + const int32_t *output_multiplier_ptr = output_multiplier; + const int32_t *output_shift_ptr = output_shift; + for (int z = 0; z < output_ch; z++) + { + int32_t result = *buf_out++; + result = arm_nn_requantize(result, *output_multiplier_ptr++, *output_shift_ptr++); + result += out_offset; + result = MAX(result, activation_min); + result = MIN(result, activation_max); + *output++ = result; + } + } +#endif + + // Reset the buffer which was just written + if (bias_data) + { + for (int x = 0; x < buf_x_elements; x++) + { + arm_memcpy_s8((int8_t *)(buf + buf_row + x * output_ch), + (const int8_t *)bias_data, + output_ch * sizeof(int32_t)); + } + } + else + { + arm_memset_s8((int8_t *)(buf + buf_row), 0, buf_x * sizeof(int32_t)); + } + + // Next row in the rolling buffer + buf_row = (buf_row + buf_x) % buf_size; + } + } + } + + // Write leftover rows + for (int y = 0; y < filter_y - stride_y; y++) + { + int32_t *buf_out = buf + buf_row; + if ((input_y * stride_y + y >= pad_y) && (input_y * stride_y + y < pad_y + output_y)) + { + buf_out += output_ch * pad_x; +#if defined(ARM_MATH_MVEI) + for (int x = 0; x < output_x; x++) + { + const int32_t *mult_ptr = output_multiplier; + const int32_t *shift_ptr = output_shift; + + int channel_count = output_ch; + for (; channel_count > 0; channel_count -= 4) + { + mve_pred16_t p = vctp32q((uint32_t)channel_count); + + int32x4_t result = vldrwq_z_s32(buf_out, p); + buf_out += 4; + result = arm_requantize_mve_32x4(result, vldrwq_z_s32(mult_ptr, p), vldrwq_z_s32(shift_ptr, p)); + mult_ptr += 4; + shift_ptr += 4; + result = vaddq_n_s32(result, out_offset); + result = vmaxq_s32(result, vdupq_n_s32(activation_min)); + result = vminq_s32(result, vdupq_n_s32(activation_max)); + vstrbq_p_s32(output, result, p); + output += 4; + } + + // Correct pointer overshoot due to predication + buf_out += channel_count; + output += channel_count; + } +#else + for (int x = 0; x < output_x; x++) + { + const int32_t *output_multiplier_ptr = output_multiplier; + const int32_t *output_shift_ptr = output_shift; + + for (int z = 0; z < output_ch; z++) + { + int32_t result = *buf_out++; + + result = arm_nn_requantize(result, *output_multiplier_ptr++, *output_shift_ptr++); + result += out_offset; + result = MAX(result, activation_min); + result = MIN(result, activation_max); + *output++ = result; + } + } +#endif + } + buf_row = (buf_row + buf_x) % buf_size; + } + + batch_cnt--; + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_wrapper_s8.c b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_wrapper_s8.c new file mode 100644 index 0000000..e7fb5e3 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ConvolutionFunctions/arm_transpose_conv_wrapper_s8.c @@ -0,0 +1,161 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_transpose_conv_wrapper_s8.c + * Description: Wrapper API to select appropriate transpose conv API based + * on dimensions. + * + * $Date: 16 October 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * s8 Transpose conv wrapper function + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_transpose_conv_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_context *reverse_conv_ctx, + const cmsis_nn_transpose_conv_params *transpose_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + + if (ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + const bool reverse_conv_possible = + ((transpose_conv_params->stride.w <= 2) && (transpose_conv_params->stride.h <= 2)); + const bool reverse_conv_efficient = (input_dims->c > REVERSE_TCOL_EFFICIENT_THRESHOLD); + + if (reverse_conv_possible && reverse_conv_efficient) + { + + if (reverse_conv_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + const int32_t stride_w = transpose_conv_params->stride.w; + const int32_t stride_h = transpose_conv_params->stride.h; + const int32_t filter_h = filter_dims->h; + const int32_t filter_w = filter_dims->w; + const int32_t output_c = output_dims->c; + const int32_t input_n = input_dims->n; + const int32_t input_h = input_dims->h; + const int32_t input_w = input_dims->w; + const int32_t input_c = input_dims->c; + const int32_t padding_w = transpose_conv_params->padding.w; + const int32_t padding_h = transpose_conv_params->padding.h; + + cmsis_nn_conv_params conv_params; + conv_params.padding.h = filter_h - 1 - padding_h; + conv_params.padding.w = filter_w - 1 - padding_w; + conv_params.input_offset = transpose_conv_params->input_offset; + conv_params.output_offset = transpose_conv_params->output_offset; + conv_params.stride.h = 1; + conv_params.stride.w = 1; + conv_params.dilation.h = 1; + conv_params.dilation.w = 1; + conv_params.activation = transpose_conv_params->activation; + + const cmsis_nn_dims transposed_input_dims = {input_n, input_h * stride_h, input_w * stride_w, input_c}; + const cmsis_nn_dims upscale_dims = {0, stride_h, stride_w, 0}; + + // Reverse filter in x and y-dimensions + int8_t *reversed_filter = reverse_conv_ctx->buf; + const int8_t *in_ptr = filter_data; + int8_t *out_ptr = reversed_filter; + const int32_t filter_size = filter_h * filter_w * input_c; + + out_ptr += filter_size; + for (int32_t i = 0; i < output_c; i++) + { + for (int32_t y = 0; y < filter_h; y++) + { + for (int32_t x = 0; x < filter_w; x++) + { + out_ptr -= input_c; + arm_memcpy_s8(out_ptr, in_ptr, input_c * sizeof(int8_t)); + in_ptr += input_c; + } + } + out_ptr += 2 * filter_size; + } + + return arm_convolve_s8(ctx, + &conv_params, + quant_params, + &transposed_input_dims, + input_data, + filter_dims, + reversed_filter, + bias_dims, + bias_data, + &upscale_dims, + output_dims, + output_data); + } + else + { + + return arm_transpose_conv_s8(ctx, + reverse_conv_ctx, + transpose_conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } +} + +/** + * @} end of NNconv group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_batch_matmul_s16.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_batch_matmul_s16.c new file mode 100644 index 0000000..296688d --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_batch_matmul_s16.c @@ -0,0 +1,105 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_batch_matmul_s16.c + * Description: Batch matrix multiplication. Does not perform transposes, see header file for details. + * + * $Date: 19 June 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * s16 batch matrix multiplication + * Refer to header file for details. + */ +arm_cmsis_nn_status arm_batch_matmul_s16(const cmsis_nn_context *ctx, + const cmsis_nn_bmm_params *bmm_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_lhs_dims, + const int16_t *input_lhs, + const cmsis_nn_dims *input_rhs_dims, + const int16_t *input_rhs, + const cmsis_nn_dims *output_dims, + int16_t *output) +{ + (void)ctx; + const int32_t output_batch = output_dims->n; + const int32_t output_height = output_dims->h; + const int32_t lhs_rows = input_lhs_dims->w; + const int32_t rhs_rows = input_rhs_dims->w; + const int32_t rhs_cols = input_rhs_dims->c; + + const int32_t inner_lhs_diff = input_lhs_dims->h >= input_rhs_dims->h ? 0 : lhs_rows * rhs_cols; + const int32_t inner_rhs_diff = input_rhs_dims->h >= input_lhs_dims->h ? rhs_rows * rhs_cols : 0; + const int32_t outer_lhs_diff = input_lhs_dims->n >= input_rhs_dims->n + ? inner_lhs_diff + : -((lhs_rows * rhs_cols) - inner_lhs_diff) * input_lhs_dims->h; + const int32_t outer_rhs_diff = input_rhs_dims->n >= input_lhs_dims->n ? (rhs_rows * rhs_cols) - inner_rhs_diff + : -inner_rhs_diff * input_rhs_dims->h; + + const int32_t reduced_multiplier = REDUCE_MULTIPLIER(quant_params->multiplier); + + for (int i_out_batch = 0; i_out_batch < output_batch; i_out_batch++) + { + for (int i_out_height = 0; i_out_height < output_height; i_out_height++) + { + + for (int j = 0; j < lhs_rows; j++) + { + arm_nn_vec_mat_mult_t_s16_s16(input_lhs, + input_rhs, + NULL, + output, + reduced_multiplier, + quant_params->shift, + rhs_cols, + rhs_rows, + bmm_params->fc_params.activation.min, + bmm_params->fc_params.activation.max); + input_lhs += rhs_cols; + output += rhs_rows; + } + input_lhs -= inner_lhs_diff; + input_rhs += inner_rhs_diff; + } + input_lhs += outer_lhs_diff; + input_rhs += outer_rhs_diff; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_batch_matmul_s8.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_batch_matmul_s8.c new file mode 100644 index 0000000..2f29e82 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_batch_matmul_s8.c @@ -0,0 +1,129 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_batch_matmul_s8.c + * Description: Batch matrix multiplication. Does not perform transposes, see header file for details. + * + * $Date: 5 Sep 2024 + * $Revision: V.1.0.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * s8 batchmatrix multiplication + * Refer to header file for details. + */ +arm_cmsis_nn_status arm_batch_matmul_s8(const cmsis_nn_context *ctx, + const cmsis_nn_bmm_params *bmm_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_lhs_dims, + const int8_t *input_lhs, + const cmsis_nn_dims *input_rhs_dims, + const int8_t *input_rhs, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + (void)ctx; +#if defined(ARM_MATH_MVEI) + if (ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + int32_t *vector_sum_buf = (int32_t *)ctx->buf; +#endif + const int32_t output_batch = output_dims->n; + const int32_t output_height = output_dims->h; + const int32_t lhs_rows = input_lhs_dims->w; + const int32_t rhs_rows = input_rhs_dims->w; + const int32_t rhs_cols = input_rhs_dims->c; + + const int32_t inner_lhs_diff = input_lhs_dims->h >= input_rhs_dims->h ? 0 : lhs_rows * rhs_cols; + const int32_t inner_rhs_diff = input_rhs_dims->h >= input_lhs_dims->h ? rhs_rows * rhs_cols : 0; + const int32_t outer_lhs_diff = input_lhs_dims->n >= input_rhs_dims->n + ? inner_lhs_diff + : -((lhs_rows * rhs_cols) - inner_lhs_diff) * input_lhs_dims->h; + const int32_t outer_rhs_diff = input_rhs_dims->n >= input_lhs_dims->n ? (rhs_rows * rhs_cols) - inner_rhs_diff + : -inner_rhs_diff * input_rhs_dims->h; + + for (int i_out_batch = 0; i_out_batch < output_batch; i_out_batch++) + { + for (int i_out_height = 0; i_out_height < output_height; i_out_height++) + { + +#if defined(ARM_MATH_MVEI) + arm_vector_sum_s8(vector_sum_buf, + rhs_cols, + rhs_rows, + input_rhs, + bmm_params->fc_params.input_offset, + bmm_params->fc_params.filter_offset, + NULL); +#endif + for (int i_lhs_rows = 0; i_lhs_rows < lhs_rows; i_lhs_rows++) + { + arm_nn_vec_mat_mult_t_s8(input_lhs, + input_rhs, +#if defined(ARM_MATH_MVEI) + vector_sum_buf, +#else + NULL, +#endif + NULL, + output, + bmm_params->fc_params.input_offset, + bmm_params->fc_params.output_offset, + quant_params->multiplier, + quant_params->shift, + rhs_cols, + rhs_rows, + bmm_params->fc_params.activation.min, + bmm_params->fc_params.activation.max, + 1, + bmm_params->fc_params.filter_offset); + + input_lhs += rhs_cols; + output += rhs_rows; + } + input_lhs -= inner_lhs_diff; + input_rhs += inner_rhs_diff; + } + input_lhs += outer_lhs_diff; + input_rhs += outer_rhs_diff; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_get_buffer_sizes_s16.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_get_buffer_sizes_s16.c new file mode 100644 index 0000000..44baf6b --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_get_buffer_sizes_s16.c @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_get_buffer_sizes_s16.c + * Description: Collection of get buffer size functions for fully connected s16 layer function. + * + * $Date: 30 January 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup FC + */ + +/** + * @addtogroup GetBufferSizeFC + * @{ + */ + +int32_t arm_fully_connected_s16_get_buffer_size(const cmsis_nn_dims *filter_dims) +{ + (void)filter_dims; + return 0; +} + +int32_t arm_fully_connected_s16_get_buffer_size_dsp(const cmsis_nn_dims *filter_dims) +{ + return arm_fully_connected_s16_get_buffer_size(filter_dims); +} + +int32_t arm_fully_connected_s16_get_buffer_size_mve(const cmsis_nn_dims *filter_dims) +{ + return arm_fully_connected_s16_get_buffer_size(filter_dims); +} + +/** + * @} end of GetBufferSizeFC group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_get_buffer_sizes_s8.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_get_buffer_sizes_s8.c new file mode 100644 index 0000000..930462e --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_get_buffer_sizes_s8.c @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_get_buffer_sizes_s8.c + * Description: Collection of get buffer size functions for fully connected s8 layer function. + * + * $Date: 15 August 2023 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup FC + */ + +/** + * @addtogroup GetBufferSizeFC + * @{ + */ + +int32_t arm_fully_connected_s8_get_buffer_size_dsp(const cmsis_nn_dims *filter_dims) +{ + (void)filter_dims; + return 0; +} + +int32_t arm_fully_connected_s8_get_buffer_size_mve(const cmsis_nn_dims *filter_dims) +{ + return filter_dims->c * sizeof(int32_t); +} + +int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims) +{ +#if defined(ARM_MATH_MVEI) + return arm_fully_connected_s8_get_buffer_size_mve(filter_dims); +#else + return arm_fully_connected_s8_get_buffer_size_dsp(filter_dims); +#endif +} + +/** + * @} end of GetBufferSizeFC group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_per_channel_s8.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_per_channel_s8.c new file mode 100644 index 0000000..4a798ee --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_per_channel_s8.c @@ -0,0 +1,102 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_per_channel_s8 + * Description: Fully connected function compatible with TF Lite. + * + * $Date: 15 Aug 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S8 basic fully-connected and matrix multiplication layer function using per-channel quantization for TensorFlow Lite + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_fully_connected_per_channel_s8(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)bias_dims; + + int32_t batch_cnt = input_dims->n; + +#if defined(ARM_MATH_MVEI) + if (ctx->buf == NULL) + { + return (ARM_CMSIS_NN_ARG_ERROR); + } +#endif + + const int32_t *kernel_sum = (const int32_t *)ctx->buf; + + while (batch_cnt) + { + + arm_nn_vec_mat_mult_t_per_ch_s8(input_data, + kernel, + kernel_sum, + bias_data, + output_data, + fc_params->input_offset, + fc_params->output_offset, + quant_params->multiplier, + quant_params->shift, + filter_dims->n, /* col_dim or accum_depth */ + output_dims->c, /* row_dim or output_depth */ + fc_params->activation.min, + fc_params->activation.max, + 1L, + fc_params->filter_offset); + + input_data += filter_dims->n; + output_data += output_dims->c; + batch_cnt--; + } + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of FC group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s16.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s16.c new file mode 100644 index 0000000..f67efc5 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s16.c @@ -0,0 +1,91 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_s16 + * Description: Fully connected function compatible with TF Lite. + * + * $Date: 13 January 2023 + * $Revision: V.2.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S16 basic fully-connected and matrix multiplication layer function for TensorFlow Lite + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_fully_connected_s16(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int16_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int64_t *bias, + const cmsis_nn_dims *output_dims, + int16_t *output) +{ + (void)bias_dims; + (void)ctx; + (void)fc_params->filter_offset; + + int32_t batch_cnt = input_dims->n; + + const int32_t reduced_multiplier = REDUCE_MULTIPLIER(quant_params->multiplier); + + while (batch_cnt) + { + arm_nn_vec_mat_mult_t_s16(input, + kernel, + bias, + output, + reduced_multiplier, + quant_params->shift, + filter_dims->n, /* col_dim or accum_depth */ + output_dims->c, /* row_dim or output_depth */ + fc_params->activation.min, + fc_params->activation.max); + input += filter_dims->n; + output += output_dims->c; + batch_cnt--; + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of FC group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s4.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s4.c new file mode 100644 index 0000000..253a63f --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s4.c @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_s4 + * Description: Fully connected function compatible with TF Lite. + * + * $Date: 22 April 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S4 basic fully-connected and matrix multiplication layer function for TensorFlow Lite + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_fully_connected_s4(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + (void)bias_dims; + (void)ctx; + (void)fc_params->filter_offset; + + int32_t batch_cnt = input_dims->n; + + while (batch_cnt) + { + + arm_nn_vec_mat_mult_t_s4(input, + kernel, + bias, + output, + fc_params->input_offset, + fc_params->output_offset, + quant_params->multiplier, + quant_params->shift, + filter_dims->n, /* col_dim or accum_depth */ + output_dims->c, /* row_dim or output_depth */ + fc_params->activation.min, + fc_params->activation.max); + + input += filter_dims->n; + output += output_dims->c; + batch_cnt--; + } + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of FC group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s8.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s8.c new file mode 100644 index 0000000..ed3dd36 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_s8.c @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_s8 + * Description: Fully connected function compatible with TF Lite. + * + * $Date: 6 February 2024 + * $Revision: V.5.3.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S8 basic fully-connected and matrix multiplication layer function for TensorFlow Lite + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_fully_connected_s8(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input, + const cmsis_nn_dims *filter_dims, + const int8_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + int8_t *output) +{ + (void)bias_dims; + + int32_t batch_cnt = input_dims->n; + +#if defined(ARM_MATH_MVEI) + if (ctx->buf == NULL) + { + return (ARM_CMSIS_NN_ARG_ERROR); + } +#endif + + const int32_t *kernel_sum = (const int32_t *)ctx->buf; + + while (batch_cnt) + { + + arm_nn_vec_mat_mult_t_s8(input, + kernel, + kernel_sum, + bias, + output, + fc_params->input_offset, + fc_params->output_offset, + quant_params->multiplier, + quant_params->shift, + filter_dims->n, /* col_dim or accum_depth */ + output_dims->c, /* row_dim or output_depth */ + fc_params->activation.min, + fc_params->activation.max, + 1L, + fc_params->filter_offset); + + input += filter_dims->n; + output += output_dims->c; + batch_cnt--; + } + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of FC group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_wrapper_s8.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_wrapper_s8.c new file mode 100644 index 0000000..8c7c1b4 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_fully_connected_wrapper_s8.c @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_s8 + * Description: Fully connected function compatible with TF Lite. + * + * $Date: 19 Aug 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S8 basic fully-connected and matrix multiplication layer function for TensorFlow Lite + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_fully_connected_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *filter_dims, + const int8_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + + if (quant_params->is_per_channel) + { + const cmsis_nn_per_channel_quant_params per_channel_quant_params = {quant_params->multiplier, + quant_params->shift}; + + return arm_fully_connected_per_channel_s8(ctx, + fc_params, + &per_channel_quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } + else + { + const cmsis_nn_per_tensor_quant_params per_tensor_quant_params = {*quant_params->multiplier, + *quant_params->shift}; + return arm_fully_connected_s8(ctx, + fc_params, + &per_tensor_quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } +} + +/** + * @} end of FC group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_vector_sum_s8.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_vector_sum_s8.c new file mode 100644 index 0000000..7608ade --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_vector_sum_s8.c @@ -0,0 +1,175 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_vector_sum_s8 + * Description: Generic function for calculating vector sums + * + * $Date: 05 Sep 2024 + * $Revision: V.3.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S8 vector sum fuction in preparation for e.g. kernel sums in fully connected and matrix multiplication layer function + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_vector_sum_s8(int32_t *vector_sum_buf, + const int32_t vector_cols, + const int32_t vector_rows, + const int8_t *vector_data, + const int32_t lhs_offset, + const int32_t rhs_offset, + const int32_t *bias_data) +{ + + if (bias_data) + { + memcpy(vector_sum_buf, bias_data, vector_rows * sizeof(int32_t)); + } + else + { + memset(vector_sum_buf, 0, vector_rows * sizeof(int32_t)); + } + + if (lhs_offset) + { +#if defined(ARM_MATH_MVEI) + + const int32_t row_loop_cnt = vector_rows / 5; + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const int32_t col_loop_cnt = (vector_cols + 15) / 16; + const int8_t *vector_0 = vector_data; + const int8_t *vector_1 = vector_data + vector_cols; + const int8_t *vector_2 = vector_data + 2 * vector_cols; + const int8_t *vector_3 = vector_data + 3 * vector_cols; + const int8_t *vector_4 = vector_data + 4 * vector_cols; + int32_t vector_sum_0 = 0; + int32_t vector_sum_1 = 0; + int32_t vector_sum_2 = 0; + int32_t vector_sum_3 = 0; + int32_t vector_sum_4 = 0; + uint32_t col_cnt = (uint32_t)vector_cols; + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t ker_0 = vldrbq_z_s8(vector_0, p); + vector_sum_0 = vaddvaq_s8(vector_sum_0, ker_0); + const int8x16_t ker_1 = vldrbq_z_s8(vector_1, p); + vector_sum_1 = vaddvaq_s8(vector_sum_1, ker_1); + const int8x16_t ker_2 = vldrbq_z_s8(vector_2, p); + vector_sum_2 = vaddvaq_s8(vector_sum_2, ker_2); + const int8x16_t ker_3 = vldrbq_z_s8(vector_3, p); + vector_sum_3 = vaddvaq_s8(vector_sum_3, ker_3); + const int8x16_t ker_4 = vldrbq_z_s8(vector_4, p); + vector_sum_4 = vaddvaq_s8(vector_sum_4, ker_4); + vector_0 += 16; + vector_1 += 16; + vector_2 += 16; + vector_3 += 16; + vector_4 += 16; + } + vector_data += 5 * vector_cols; + + if (rhs_offset) + { + vector_sum_0 += vector_cols * rhs_offset; + vector_sum_1 += vector_cols * rhs_offset; + vector_sum_2 += vector_cols * rhs_offset; + vector_sum_3 += vector_cols * rhs_offset; + vector_sum_4 += vector_cols * rhs_offset; + } + + vector_sum_0 *= lhs_offset; + vector_sum_1 *= lhs_offset; + vector_sum_2 *= lhs_offset; + vector_sum_3 *= lhs_offset; + vector_sum_4 *= lhs_offset; + + vector_sum_buf[0] += vector_sum_0; + vector_sum_buf[1] += vector_sum_1; + vector_sum_buf[2] += vector_sum_2; + vector_sum_buf[3] += vector_sum_3; + vector_sum_buf[4] += vector_sum_4; + vector_sum_buf += 5; + } + const int32_t loop_cnt = vector_rows % 5; + for (int i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++) + { + const int32_t col_loop_cnt = (vector_cols + 15) / 16; + const int8_t *vector_0 = vector_data; + int32_t vector_sum_0 = 0; + uint32_t col_cnt = (uint32_t)vector_cols; + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t ker_0 = vldrbq_z_s8(vector_0, p); + vector_sum_0 = vaddvaq_s8(vector_sum_0, ker_0); + vector_0 += 16; + } + vector_data += vector_cols; + if (rhs_offset) + { + vector_sum_0 += vector_cols * rhs_offset; + } + vector_sum_0 *= lhs_offset; + + vector_sum_buf[i_row_loop_cnt] += vector_sum_0; + } +#else + for (int i = 0; i < vector_rows; i++) + { + int32_t sum = 0; + for (int j = 0; j < vector_cols; j++) + { + sum += *vector_data++; + } + if (rhs_offset) + { + sum += vector_cols * rhs_offset; + } + *vector_sum_buf++ += sum * lhs_offset; + } +#endif + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of FC group + */ diff --git a/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_vector_sum_s8_s64.c b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_vector_sum_s8_s64.c new file mode 100644 index 0000000..b9f1ef9 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/FullyConnectedFunctions/arm_vector_sum_s8_s64.c @@ -0,0 +1,156 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_vector_sum_s8_s64 + * Description: Generic function for calculating vector sums + * + * $Date: 26 March 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S8 vector sum fuction in preparation for e.g. kernel sums in fully connected and matrix multiplication layer function + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_vector_sum_s8_s64(int64_t *vector_sum_buf, + const int32_t vector_cols, + const int32_t vector_rows, + const int8_t *vector_data, + const int32_t lhs_offset, + const int64_t *bias_data) +{ + + if (bias_data) + { + memcpy(vector_sum_buf, bias_data, vector_rows * sizeof(int64_t)); + } + else + { + memset(vector_sum_buf, 0, vector_rows * sizeof(int64_t)); + } + if (lhs_offset) + { +#if defined(ARM_MATH_MVEI) + + const int32_t row_loop_cnt = vector_rows / 5; + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const int32_t col_loop_cnt = (vector_cols + 15) / 16; + const int8_t *vector_0 = vector_data; + const int8_t *vector_1 = vector_data + vector_cols; + const int8_t *vector_2 = vector_data + 2 * vector_cols; + const int8_t *vector_3 = vector_data + 3 * vector_cols; + const int8_t *vector_4 = vector_data + 4 * vector_cols; + int32_t vector_sum_0 = 0; + int32_t vector_sum_1 = 0; + int32_t vector_sum_2 = 0; + int32_t vector_sum_3 = 0; + int32_t vector_sum_4 = 0; + uint32_t col_cnt = (uint32_t)vector_cols; + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t ker_0 = vldrbq_z_s8(vector_0, p); + vector_sum_0 = vaddvaq_s8(vector_sum_0, ker_0); + const int8x16_t ker_1 = vldrbq_z_s8(vector_1, p); + vector_sum_1 = vaddvaq_s8(vector_sum_1, ker_1); + const int8x16_t ker_2 = vldrbq_z_s8(vector_2, p); + vector_sum_2 = vaddvaq_s8(vector_sum_2, ker_2); + const int8x16_t ker_3 = vldrbq_z_s8(vector_3, p); + vector_sum_3 = vaddvaq_s8(vector_sum_3, ker_3); + const int8x16_t ker_4 = vldrbq_z_s8(vector_4, p); + vector_sum_4 = vaddvaq_s8(vector_sum_4, ker_4); + vector_0 += 16; + vector_1 += 16; + vector_2 += 16; + vector_3 += 16; + vector_4 += 16; + } + vector_data += 5 * vector_cols; + + vector_sum_0 *= lhs_offset; + vector_sum_1 *= lhs_offset; + vector_sum_2 *= lhs_offset; + vector_sum_3 *= lhs_offset; + vector_sum_4 *= lhs_offset; + + vector_sum_buf[0] += vector_sum_0; + vector_sum_buf[1] += vector_sum_1; + vector_sum_buf[2] += vector_sum_2; + vector_sum_buf[3] += vector_sum_3; + vector_sum_buf[4] += vector_sum_4; + vector_sum_buf += 5; + } + const int32_t loop_cnt = vector_rows % 5; + for (int i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++) + { + const int32_t col_loop_cnt = (vector_cols + 15) / 16; + const int8_t *vector_0 = vector_data; + int32_t vector_sum_0 = 0; + uint32_t col_cnt = (uint32_t)vector_cols; + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t ker_0 = vldrbq_z_s8(vector_0, p); + vector_sum_0 = vaddvaq_s8(vector_sum_0, ker_0); + vector_0 += 16; + } + vector_data += vector_cols; + vector_sum_0 *= lhs_offset; + + vector_sum_buf[i_row_loop_cnt] += vector_sum_0; + } +#else + for (int i = 0; i < vector_rows; i++) + { + int64_t sum = 0; + for (int j = 0; j < vector_cols; j++) + { + sum += *vector_data++; + } + *vector_sum_buf++ += sum * (int64_t)lhs_offset; + } +#endif + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of FC group + */ diff --git a/src/third_party/cmsis_nn/Source/LSTMFunctions/arm_lstm_unidirectional_s16.c b/src/third_party/cmsis_nn/Source/LSTMFunctions/arm_lstm_unidirectional_s16.c new file mode 100644 index 0000000..9bd5cc7 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/LSTMFunctions/arm_lstm_unidirectional_s16.c @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024, Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_lstm_unidirectional_s16.c + * Description: S16 LSTM function with S16 gate output + * + * $Date: 26 March 2024 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M processors + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup LSTM + * @{ + */ + +/* + * S16 LSTM function for TensorFlow Lite with S16 gate output + * + * Refer to header file for details. + * + */ + +arm_cmsis_nn_status arm_lstm_unidirectional_s16(const int16_t *input, + int16_t *output, + const cmsis_nn_lstm_params *params, + cmsis_nn_lstm_context *buffers) +{ + + int16_t *hidden_in = NULL; + memset(buffers->cell_state, 0, params->batch_size * params->hidden_size * sizeof(int16_t)); + if (params->time_major) + { + // First dimension is time, input/output for each time step is stored continously in memory + for (int t = 0; t < params->time_steps; t++) + { + const int16_t *data_in = input + (t * params->batch_size * params->input_size); + int16_t *hidden_out = output + (t * params->batch_size * params->hidden_size); + arm_cmsis_nn_status status = arm_nn_lstm_step_s16(data_in, hidden_in, hidden_out, params, buffers, 1); + if (status != ARM_CMSIS_NN_SUCCESS) + { + return status; + } + // Output is used as recurrent input/hidden state for the next timestep. + hidden_in = &hidden_out[0]; + } + } + else + { + // First dimension is time, add batch_offset to jump in memory for each batch + for (int t = 0; t < params->time_steps; t++) + { + const int16_t *data_in = input + (t * params->input_size); + int16_t *hidden_out = output + (t * params->hidden_size); + arm_cmsis_nn_status status = + arm_nn_lstm_step_s16(data_in, hidden_in, hidden_out, params, buffers, params->time_steps); + if (status != ARM_CMSIS_NN_SUCCESS) + { + return status; + } + // Output is used as recurrent input/hidden state for the next timestep. + hidden_in = &hidden_out[0]; + } + } + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of LSTM group + */ diff --git a/src/third_party/cmsis_nn/Source/LSTMFunctions/arm_lstm_unidirectional_s8.c b/src/third_party/cmsis_nn/Source/LSTMFunctions/arm_lstm_unidirectional_s8.c new file mode 100644 index 0000000..a90ecbb --- /dev/null +++ b/src/third_party/cmsis_nn/Source/LSTMFunctions/arm_lstm_unidirectional_s8.c @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024, Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_lstm_unidirectional_s8.c + * Description: S8 LSTM function with S16 gate output + * + * $Date: 08 February 2024 + * $Revision: V.1.1.0 + * + * Target Processor: Cortex-M processors + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup LSTM + * @{ + */ + +/* + * S8 LSTM function for TensorFlow Lite with S16 gate output + * + * Refer to header file for details. + * + */ + +arm_cmsis_nn_status arm_lstm_unidirectional_s8(const int8_t *input, + int8_t *output, + const cmsis_nn_lstm_params *params, + cmsis_nn_lstm_context *buffers) +{ + + int8_t *hidden_in = NULL; + memset(buffers->cell_state, 0, params->batch_size * params->hidden_size * sizeof(int16_t)); + if (params->time_major) + { + // First dimension is time, input/output for each time step is stored continously in memory + for (int t = 0; t < params->time_steps; t++) + { + const int8_t *data_in = input + (t * params->batch_size * params->input_size); + int8_t *hidden_out = output + (t * params->batch_size * params->hidden_size); + arm_cmsis_nn_status status = arm_nn_lstm_step_s8(data_in, hidden_in, hidden_out, params, buffers, 1); + if (status != ARM_CMSIS_NN_SUCCESS) + { + return status; + } + // Output is used as recurrent input/hidden state for the next timestep. + hidden_in = &hidden_out[0]; + } + } + else + { + // First dimension is time, add batch_offset to jump in memory for each batch + for (int t = 0; t < params->time_steps; t++) + { + const int8_t *data_in = input + (t * params->input_size); + int8_t *hidden_out = output + (t * params->hidden_size); + arm_cmsis_nn_status status = + arm_nn_lstm_step_s8(data_in, hidden_in, hidden_out, params, buffers, params->time_steps); + if (status != ARM_CMSIS_NN_SUCCESS) + { + return status; + } + // Output is used as recurrent input/hidden state for the next timestep. + hidden_in = &hidden_out[0]; + } + } + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of LSTM group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c new file mode 100644 index 0000000..c75694b --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c @@ -0,0 +1,176 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_depthwise_conv_nt_t_padded_s8.c + * Description: Depthwise convolution with padded matrices. + * + * $Date: 26 October 2022 + * $Revision: V.2.0.1 + * + * Target Processor: Cortex-M processors with MVE extension + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @defgroup supportConvolution Convolution + * + * Support functions for Convolution and DW Convolution + * + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * Depthwise convolution of transposed rhs matrix with 4 lhs matrices. One or more of the rhs matrices are padded. + * Dimensions are the same for lhs and rhs. + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_padded_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t input_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + int8_t *out) +{ +#if defined(ARM_MATH_MVEI) + int32_t loop_count = (active_ch + 3) / 4; + const int32_t *bias = output_bias; + uint32_t num_ch_to_process = active_ch; + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; + num_ch_to_process -= 4, out += 4, offset += 4, i_loop_cnt++) + { + int32x4_t out_0 = vdupq_n_s32(0); + if (bias) + { + out_0 = vldrwq_s32(bias); + bias += 4; + } + int32x4_t out_1 = out_0; + int32x4_t out_2 = out_0; + int32x4_t out_3 = out_0; + + const int8_t *rhs_0 = rhs + offset; + const int8_t *lhs_0 = lhs + offset; + const int8_t *lhs_1 = lhs + row_x_col * CH_IN_BLOCK_MVE + offset; + const int8_t *lhs_2 = lhs + (row_x_col * CH_IN_BLOCK_MVE * 2) + offset; + const int8_t *lhs_3 = lhs + (row_x_col * CH_IN_BLOCK_MVE * 3) + offset; + + for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) + { + const int32x4_t ker_0 = vldrbq_s32(rhs_0); + + int32x4_t ip_0 = vldrbq_s32(lhs_0); + ip_0 = vaddq_n_s32(ip_0, input_offset); + out_0 += vmulq_s32(ip_0, ker_0); + + int32x4_t ip_1 = vldrbq_s32(lhs_1); + ip_1 = vaddq_n_s32(ip_1, input_offset); + out_1 += vmulq_s32(ip_1, ker_0); + + int32x4_t ip_2 = vldrbq_s32(lhs_2); + ip_2 = vaddq_n_s32(ip_2, input_offset); + out_2 += vmulq_s32(ip_2, ker_0); + + int32x4_t ip_3 = vldrbq_s32(lhs_3); + ip_3 = vaddq_n_s32(ip_3, input_offset); + + out_3 += vmulq_s32(ip_3, ker_0); + + lhs_0 += CH_IN_BLOCK_MVE; + lhs_1 += CH_IN_BLOCK_MVE; + lhs_2 += CH_IN_BLOCK_MVE; + lhs_3 += CH_IN_BLOCK_MVE; + + rhs_0 += total_ch; + } + + const int32x4_t mult = vldrwq_s32(out_mult); + const int32x4_t shift = vldrwq_s32(out_shift); + out_mult += 4; + out_shift += 4; + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, out_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); + mve_pred16_t p = vctp32q(num_ch_to_process); + vstrbq_p_s32(out, out_0, p); + + out_1 = arm_requantize_mve_32x4(out_1, mult, shift); + out_1 = vaddq_n_s32(out_1, out_offset); + out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); + out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + total_ch, out_1, p); + + out_2 = arm_requantize_mve_32x4(out_2, mult, shift); + out_2 = vaddq_n_s32(out_2, out_offset); + out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); + out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 2 * total_ch, out_2, p); + + out_3 = arm_requantize_mve_32x4(out_3, mult, shift); + out_3 = vaddq_n_s32(out_3, out_offset); + out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); + out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 3 * total_ch, out_3, p); + } + + return ARM_CMSIS_NN_SUCCESS; + +#else + (void)lhs; + (void)rhs; + (void)input_offset; + (void)active_ch; + (void)total_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)row_x_col; + (void)output_bias; + (void)out; + return ARM_CMSIS_NN_NO_IMPL_ERROR; +#endif +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s16.c new file mode 100644 index 0000000..b623b89 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s16.c @@ -0,0 +1,171 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_depthwise_conv_nt_t_s16.c + * Description: Depthwise convolution on matrices with no padding. + * + * $Date: 26 October 2022 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M processors with MVE extension + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * Depthwise convolution of rhs matrix with 4 lhs matrices with no padding. Dimensions are the same for lhs and rhs. + * + * Refer header file for details. + * + */ +int16_t *arm_nn_depthwise_conv_nt_t_s16(const int16_t *lhs, + const int8_t *rhs, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int64_t *const output_bias, + int16_t *out) +{ +#if defined(ARM_MATH_MVEI) + + const int64_t *bias = output_bias; + int32_t loop_count = (num_ch + 3) / 4; + uint32_t num_ch_to_process = num_ch; + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, out += 4, i_loop_cnt++) + { + const int8_t *rhs_0 = rhs + offset; + const int16_t *lhs_0 = lhs + offset; + const int16_t *lhs_1 = lhs + row_x_col * num_ch + offset; + const int16_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset; + const int16_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset; + + int32x4_t out_0 = vdupq_n_s32(0); + int32x4_t out_1 = vdupq_n_s32(0); + int32x4_t out_2 = vdupq_n_s32(0); + int32x4_t out_3 = vdupq_n_s32(0); + + for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) + { + const int32x4_t ker_0 = vldrbq_s32(rhs_0); + + int32x4_t ip_0 = vldrhq_s32(lhs_0); + out_0 += vmulq_s32(ip_0, ker_0); + + int32x4_t ip_1 = vldrhq_s32(lhs_1); + out_1 += vmulq_s32(ip_1, ker_0); + + int32x4_t ip_2 = vldrhq_s32(lhs_2); + out_2 += vmulq_s32(ip_2, ker_0); + + int32x4_t ip_3 = vldrhq_s32(lhs_3); + out_3 += vmulq_s32(ip_3, ker_0); + + lhs_0 += num_ch; + lhs_1 += num_ch; + lhs_2 += num_ch; + lhs_3 += num_ch; + + rhs_0 += num_ch; + } + + for (int i_requantize = 0; i_requantize < 4; i_requantize++) + { + int32_t reduced_multiplier = REDUCE_MULTIPLIER(out_mult[i_requantize]); + int32_t shift = out_shift[i_requantize]; + int64_t in_requantize_0 = (int64_t)out_0[i_requantize]; + int64_t in_requantize_1 = (int64_t)out_1[i_requantize]; + int64_t in_requantize_2 = (int64_t)out_2[i_requantize]; + int64_t in_requantize_3 = (int64_t)out_3[i_requantize]; + + if (bias) + { + in_requantize_0 += *bias; + in_requantize_1 += *bias; + in_requantize_2 += *bias; + in_requantize_3 += *bias; + bias++; + } + + out_0[i_requantize] = arm_nn_requantize_s64(in_requantize_0, reduced_multiplier, shift); + out_1[i_requantize] = arm_nn_requantize_s64(in_requantize_1, reduced_multiplier, shift); + out_2[i_requantize] = arm_nn_requantize_s64(in_requantize_2, reduced_multiplier, shift); + out_3[i_requantize] = arm_nn_requantize_s64(in_requantize_3, reduced_multiplier, shift); + } + + mve_pred16_t p = vctp32q(num_ch_to_process); + + out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); + vstrhq_p_s32(out, out_0, p); + + out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); + out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); + vstrhq_p_s32(out + num_ch, out_1, p); + + out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); + out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); + vstrhq_p_s32(out + 2 * num_ch, out_2, p); + + out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); + out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); + vstrhq_p_s32(out + 3 * num_ch, out_3, p); + + out_mult += 4; + out_shift += 4; + } + const int tail_ch = num_ch & 0x3; + if (tail_ch != 0) + { + out -= (4 - tail_ch); + } + + return out + (3 * num_ch); +#else + (void)lhs; + (void)rhs; + (void)num_ch; + (void)out_shift; + (void)out_mult; + (void)activation_min; + (void)activation_max; + (void)row_x_col; + (void)output_bias; + (void)out; + return NULL; +#endif +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s4.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s4.c new file mode 100644 index 0000000..7af7815 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s4.c @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022, 2024 Arm Limited and/or its affiliates + * + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_depthwise_conv_nt_t_s4.c + * Description: Depthwise convolution on matrices with no padding and packed int4 weights. + * + * $Date: 05 April 2024 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M processors with MVE extension. + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * Depthwise convolution of rhs matrix with 4 lhs matrices with no padding and packed int4 weights. + * Dimensions are the same for lhs and rhs. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_s4(const int8_t *lhs, + const int8_t *rhs, + const int32_t input_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + int8_t *out) +{ +#if defined(ARM_MATH_MVEI) + const int32_t *bias = output_bias; + int32_t loop_count = (active_ch + 3) / 4; + uint32_t num_ch_to_process = active_ch; + const uint32x4_t gather_offset = {0, 0, 1, 1}; + const mve_pred16_t lower_nibble_mask = 3855; // 0000111100001111 + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, out += 4, i_loop_cnt++) + { + int32x4_t out_0 = vdupq_n_s32(0); + if (bias) + { + out_0 = vldrwq_s32(bias); + bias += 4; + } + int32x4_t out_1 = out_0; + int32x4_t out_2 = out_0; + int32x4_t out_3 = out_0; + + const int8_t *rhs_0 = rhs + (offset >> 1); + const int8_t *lhs_0 = lhs + offset; + const int8_t *lhs_1 = lhs + row_x_col * S4_CH_IN_BLOCK_MVE + offset; + const int8_t *lhs_2 = lhs + (row_x_col * S4_CH_IN_BLOCK_MVE * 2) + offset; + const int8_t *lhs_3 = lhs + (row_x_col * S4_CH_IN_BLOCK_MVE * 3) + offset; + int32x4_t ker_sum = vdupq_n_s32(0); + + if (total_ch % 2) + { + int get_low_nibble = 1; + for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) + { + int32x4_t ker_0; + if (get_low_nibble) + { + ker_0 = vldrbq_gather_offset_s32(rhs_0, gather_offset); + + ker_0 = vrshlq_m_n_s32(ker_0, 28, lower_nibble_mask); + ker_0 = vshrq_m_n_s32(ker_0, ker_0, 24, lower_nibble_mask); + + ker_0 = vshrq_n_s32(ker_0, 4); + } + else + { + int8_t temp[] = { + rhs_0[0] >> 4, (int8_t)(rhs_0[1] << 4) >> 4, rhs_0[1] >> 4, (int8_t)(rhs_0[2] << 4) >> 4}; + ker_0 = vldrbq_s32(temp); + } + + ker_sum = vaddq_s32(ker_sum, ker_0); + + int32x4_t ip_0 = vldrbq_s32(lhs_0); + out_0 += vmulq_s32(ip_0, ker_0); + + int32x4_t ip_1 = vldrbq_s32(lhs_1); + out_1 += vmulq_s32(ip_1, ker_0); + + int32x4_t ip_2 = vldrbq_s32(lhs_2); + out_2 += vmulq_s32(ip_2, ker_0); + + int32x4_t ip_3 = vldrbq_s32(lhs_3); + out_3 += vmulq_s32(ip_3, ker_0); + + lhs_0 += S4_CH_IN_BLOCK_MVE; + lhs_1 += S4_CH_IN_BLOCK_MVE; + lhs_2 += S4_CH_IN_BLOCK_MVE; + lhs_3 += S4_CH_IN_BLOCK_MVE; + + get_low_nibble = !get_low_nibble; + rhs_0 += (total_ch >> 1) + get_low_nibble; + } + } + else + { + for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) + { + int32x4_t ker_0 = vldrbq_gather_offset_s32(rhs_0, gather_offset); + + ker_0 = vrshlq_m_n_s32(ker_0, 28, lower_nibble_mask); + ker_0 = vshrq_m_n_s32(ker_0, ker_0, 24, lower_nibble_mask); + + ker_0 = vshrq_n_s32(ker_0, 4); + + ker_sum = vaddq_s32(ker_sum, ker_0); + + int32x4_t ip_0 = vldrbq_s32(lhs_0); + out_0 += vmulq_s32(ip_0, ker_0); + + int32x4_t ip_1 = vldrbq_s32(lhs_1); + out_1 += vmulq_s32(ip_1, ker_0); + + int32x4_t ip_2 = vldrbq_s32(lhs_2); + out_2 += vmulq_s32(ip_2, ker_0); + + int32x4_t ip_3 = vldrbq_s32(lhs_3); + out_3 += vmulq_s32(ip_3, ker_0); + + lhs_0 += S4_CH_IN_BLOCK_MVE; + lhs_1 += S4_CH_IN_BLOCK_MVE; + lhs_2 += S4_CH_IN_BLOCK_MVE; + lhs_3 += S4_CH_IN_BLOCK_MVE; + + rhs_0 += total_ch >> 1; + } + } + + ker_sum = vmulq_n_s32(ker_sum, input_offset); + out_0 = ker_sum + out_0; + out_1 = ker_sum + out_1; + out_2 = ker_sum + out_2; + out_3 = ker_sum + out_3; + + const int32x4_t mult = vldrwq_s32(out_mult); + const int32x4_t shift = vldrwq_s32(out_shift); + out_mult += 4; + out_shift += 4; + mve_pred16_t p = vctp32q(num_ch_to_process); + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, out_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out, out_0, p); + + out_1 = arm_requantize_mve_32x4(out_1, mult, shift); + out_1 = vaddq_n_s32(out_1, out_offset); + out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); + out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + total_ch, out_1, p); + + out_2 = arm_requantize_mve_32x4(out_2, mult, shift); + out_2 = vaddq_n_s32(out_2, out_offset); + out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); + out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 2 * total_ch, out_2, p); + + out_3 = arm_requantize_mve_32x4(out_3, mult, shift); + out_3 = vaddq_n_s32(out_3, out_offset); + out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); + out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 3 * total_ch, out_3, p); + } + + return ARM_CMSIS_NN_SUCCESS; +#else + (void)lhs; + (void)rhs; + (void)input_offset; + (void)active_ch; + (void)total_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)row_x_col; + (void)output_bias; + (void)out; + return ARM_CMSIS_NN_NO_IMPL_ERROR; +#endif +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c new file mode 100644 index 0000000..6ec0970 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_depthwise_conv_nt_t_s8.c + * Description: Depthwise convolution on matrices with no padding. + * + * $Date: 26 October 2022 + * $Revision: V.2.0.1 + * + * Target Processor: Cortex-M processors with MVE extension. + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * Depthwise convolution of rhs matrix with 4 lhs matrices with no padding. Dimensions are the same for lhs and rhs. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t input_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + int8_t *out) +{ +#if defined(ARM_MATH_MVEI) + const int32_t *bias = output_bias; + int32_t loop_count = (active_ch + 3) / 4; + uint32_t num_ch_to_process = active_ch; + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, out += 4, i_loop_cnt++) + { + int32x4_t out_0 = vdupq_n_s32(0); + if (bias) + { + out_0 = vldrwq_s32(bias); + bias += 4; + } + int32x4_t out_1 = out_0; + int32x4_t out_2 = out_0; + int32x4_t out_3 = out_0; + + const int8_t *rhs_0 = rhs + offset; + const int8_t *lhs_0 = lhs + offset; + const int8_t *lhs_1 = lhs + row_x_col * CH_IN_BLOCK_MVE + offset; + const int8_t *lhs_2 = lhs + (row_x_col * CH_IN_BLOCK_MVE * 2) + offset; + const int8_t *lhs_3 = lhs + (row_x_col * CH_IN_BLOCK_MVE * 3) + offset; + int32x4_t ker_sum = vdupq_n_s32(0); + + for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) + { + const int32x4_t ker_0 = vldrbq_s32(rhs_0); + ker_sum = vaddq_s32(ker_sum, ker_0); + + int32x4_t ip_0 = vldrbq_s32(lhs_0); + out_0 += vmulq_s32(ip_0, ker_0); + + int32x4_t ip_1 = vldrbq_s32(lhs_1); + out_1 += vmulq_s32(ip_1, ker_0); + + int32x4_t ip_2 = vldrbq_s32(lhs_2); + out_2 += vmulq_s32(ip_2, ker_0); + + int32x4_t ip_3 = vldrbq_s32(lhs_3); + out_3 += vmulq_s32(ip_3, ker_0); + + lhs_0 += CH_IN_BLOCK_MVE; + lhs_1 += CH_IN_BLOCK_MVE; + lhs_2 += CH_IN_BLOCK_MVE; + lhs_3 += CH_IN_BLOCK_MVE; + + rhs_0 += total_ch; + } + + ker_sum = vmulq_n_s32(ker_sum, input_offset); + out_0 = ker_sum + out_0; + out_1 = ker_sum + out_1; + out_2 = ker_sum + out_2; + out_3 = ker_sum + out_3; + + const int32x4_t mult = vldrwq_s32(out_mult); + const int32x4_t shift = vldrwq_s32(out_shift); + out_mult += 4; + out_shift += 4; + mve_pred16_t p = vctp32q(num_ch_to_process); + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, out_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out, out_0, p); + + out_1 = arm_requantize_mve_32x4(out_1, mult, shift); + out_1 = vaddq_n_s32(out_1, out_offset); + out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); + out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + total_ch, out_1, p); + + out_2 = arm_requantize_mve_32x4(out_2, mult, shift); + out_2 = vaddq_n_s32(out_2, out_offset); + out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); + out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 2 * total_ch, out_2, p); + + out_3 = arm_requantize_mve_32x4(out_3, mult, shift); + out_3 = vaddq_n_s32(out_3, out_offset); + out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); + out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 3 * total_ch, out_3, p); + } + + return ARM_CMSIS_NN_SUCCESS; +#else + (void)lhs; + (void)rhs; + (void)input_offset; + (void)active_ch; + (void)total_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)row_x_col; + (void)output_bias; + (void)out; + return ARM_CMSIS_NN_NO_IMPL_ERROR; +#endif +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_calculate_gate_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_calculate_gate_s16.c new file mode 100644 index 0000000..85429cf --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_calculate_gate_s16.c @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022, 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_lstm_calculate_gate_s16.c + * Description: Update single gate for an incremental step of LSTM function. + * + * $Date: 26 March 2024 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nn_tables.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportLSTM + * @{ + */ + +/* + * Calculates a single LSTM gate, int16x8_16 version. + * Refer to header file for details + */ +arm_cmsis_nn_status arm_nn_lstm_calculate_gate_s16(const int16_t *data_in, + const int16_t *hidden_in, + const cmsis_nn_lstm_gate *gate, + const cmsis_nn_lstm_params *params, + int16_t *output, + const int32_t batch_offset) +{ + + memset(output, 0, params->hidden_size * params->batch_size * sizeof(int16_t)); + + arm_nn_vec_mat_mul_result_acc_s16(data_in, + gate->input_weights, + gate->input_effective_bias, + output, + gate->input_multiplier, + gate->input_shift, + params->input_size, + params->hidden_size, + params->batch_size, + batch_offset); + + if (hidden_in) + { + + arm_nn_vec_mat_mul_result_acc_s16(hidden_in, + gate->hidden_weights, + gate->hidden_effective_bias, + output, + gate->hidden_multiplier, + gate->hidden_shift, + params->hidden_size, + params->hidden_size, + params->batch_size, + batch_offset); + } + + arm_nn_activation_s16(output, output, params->hidden_size * params->batch_size, 0, gate->activation_type); + + return ARM_CMSIS_NN_SUCCESS; +} +/** + * @} end of supportLSTM group + */ \ No newline at end of file diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_calculate_gate_s8_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_calculate_gate_s8_s16.c new file mode 100644 index 0000000..29a385e --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_calculate_gate_s8_s16.c @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022, 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_lstm_calculate_gate_s8_s16.c + * Description: Update single gate for an incremental step of LSTM function. + * + * $Date: 19 January 2024 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nn_tables.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup groupSupport + */ + +/** + * @defgroup supportLSTM LSTM + * + * Support functions for LSTM + * + */ + +/** + * @addtogroup supportLSTM + * @{ + */ + +/* + * Calculates a single LSTM gate, int8x8_16 version. + * Refer to header file for details + */ +arm_cmsis_nn_status arm_nn_lstm_calculate_gate_s8_s16(const int8_t *data_in, + const int8_t *hidden_in, + const cmsis_nn_lstm_gate *gate, + const cmsis_nn_lstm_params *params, + int16_t *output, + const int32_t batch_offset) +{ + + memset(output, 0, params->hidden_size * params->batch_size * sizeof(int16_t)); + + arm_nn_vec_mat_mul_result_acc_s8_s16(data_in, + gate->input_weights, + gate->input_effective_bias, + output, + gate->input_multiplier, + gate->input_shift, + params->input_size, + params->hidden_size, + params->batch_size, + batch_offset); + + if (hidden_in) + { + arm_nn_vec_mat_mul_result_acc_s8_s16(hidden_in, + gate->hidden_weights, + gate->hidden_effective_bias, + output, + gate->hidden_multiplier, + gate->hidden_shift, + params->hidden_size, + params->hidden_size, + params->batch_size, + batch_offset); + } + + arm_nn_activation_s16(output, output, params->hidden_size * params->batch_size, 0, gate->activation_type); + + return ARM_CMSIS_NN_SUCCESS; +} +/** + * @} end of supportLSTM group + */ \ No newline at end of file diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_step_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_step_s16.c new file mode 100644 index 0000000..0040e28 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_step_s16.c @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_lstm_step_s16.c + * Description: Update LSTM function for a single iteration step. + * + * $Date: 26 March 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportLSTM + * @{ + */ + +/* + * Calculate the output state tensor of an LSTM step, s16 input/output/weights and s16 internal buffers version. + * Refer to header file for details. + */ +arm_cmsis_nn_status arm_nn_lstm_step_s16(const int16_t *data_in, + const int16_t *hidden_in, + int16_t *hidden_out, + const cmsis_nn_lstm_params *params, + cmsis_nn_lstm_context *buffers, + const int32_t batch_offset) +{ + int16_t *forget_gate = buffers->temp1; + int16_t *input_gate = buffers->temp1; + int16_t *cell_gate = buffers->temp2; + int16_t *output_gate = buffers->temp1; + int16_t *hidden_temp = buffers->temp2; + + int16_t *cell_state = buffers->cell_state; + + arm_nn_lstm_calculate_gate_s16(data_in, hidden_in, ¶ms->forget_gate, params, forget_gate, batch_offset); + + // Calculate first term of cell state in place early to maximise reuse of scratch-buffers + arm_elementwise_mul_s16(forget_gate, + cell_state, + 0, + 0, + cell_state, + 0, + params->forget_to_cell_multiplier, + params->forget_to_cell_shift, + NN_Q15_MIN, + NN_Q15_MAX, + params->hidden_size * params->batch_size); + + arm_nn_lstm_calculate_gate_s16(data_in, hidden_in, ¶ms->input_gate, params, input_gate, batch_offset); + + arm_nn_lstm_calculate_gate_s16(data_in, hidden_in, ¶ms->cell_gate, params, cell_gate, batch_offset); + + // Reminder of cell state calculation, multiply and add to previous result. + arm_elementwise_mul_acc_s16(forget_gate, + cell_gate, + 0, + 0, + cell_state, + 0, + params->input_to_cell_multiplier, + params->input_to_cell_shift, + -params->cell_clip, + params->cell_clip, + params->hidden_size * params->batch_size); + + arm_nn_lstm_calculate_gate_s16(data_in, hidden_in, ¶ms->output_gate, params, output_gate, batch_offset); + + // Calculate hidden state directly to output. + arm_nn_activation_s16( + cell_state, hidden_temp, params->hidden_size * params->batch_size, params->cell_scale_power + 12, ARM_TANH); + arm_elementwise_mul_s16_batch_offset(output_gate, + hidden_temp, + hidden_out, + params->output_offset, + params->output_multiplier, + params->output_shift, + params->hidden_size, + params->batch_size, + batch_offset); + + return ARM_CMSIS_NN_SUCCESS; +} +/** + * @} end of supportLSTM group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_step_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_step_s8.c new file mode 100644 index 0000000..1176758 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_lstm_step_s8.c @@ -0,0 +1,110 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_lstm_step_s8.c + * Description: Update LSTM function for a single iteration step. + * + * $Date: 19 January 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportLSTM + * @{ + */ + +/* + * Calculate the output state tensor of an LSTM step, s8 input/output/weights and s16 internal buffers version. + * Refer to header file for details. + */ +arm_cmsis_nn_status arm_nn_lstm_step_s8(const int8_t *data_in, + const int8_t *hidden_in, + int8_t *hidden_out, + const cmsis_nn_lstm_params *params, + cmsis_nn_lstm_context *buffers, + const int32_t batch_offset) +{ + int16_t *forget_gate = buffers->temp1; + int16_t *input_gate = buffers->temp1; + int16_t *cell_gate = buffers->temp2; + int16_t *output_gate = buffers->temp1; + int16_t *hidden_temp = buffers->temp2; + + int16_t *cell_state = buffers->cell_state; + + arm_nn_lstm_calculate_gate_s8_s16(data_in, hidden_in, ¶ms->forget_gate, params, forget_gate, batch_offset); + + // Calculate first term of cell state in place early to maximise reuse of scratch-buffers + arm_elementwise_mul_s16(forget_gate, + cell_state, + 0, + 0, + cell_state, + 0, + params->forget_to_cell_multiplier, + params->forget_to_cell_shift, + NN_Q15_MIN, + NN_Q15_MAX, + params->hidden_size * params->batch_size); + + arm_nn_lstm_calculate_gate_s8_s16(data_in, hidden_in, ¶ms->input_gate, params, input_gate, batch_offset); + arm_nn_lstm_calculate_gate_s8_s16(data_in, hidden_in, ¶ms->cell_gate, params, cell_gate, batch_offset); + + // Reminder of cell state calculation, multiply and add to previous result. + arm_elementwise_mul_acc_s16(forget_gate, + cell_gate, + 0, + 0, + cell_state, + 0, + params->input_to_cell_multiplier, + params->input_to_cell_shift, + -params->cell_clip, + params->cell_clip, + params->hidden_size * params->batch_size); + + arm_nn_lstm_calculate_gate_s8_s16(data_in, hidden_in, ¶ms->output_gate, params, output_gate, batch_offset); + + // Calculate hidden state directly to output. + arm_nn_activation_s16( + cell_state, hidden_temp, params->hidden_size * params->batch_size, params->cell_scale_power + 12, ARM_TANH); + arm_elementwise_mul_s16_s8(output_gate, + hidden_temp, + hidden_out, + params->output_offset, + params->output_multiplier, + params->output_shift, + params->hidden_size, + params->batch_size, + batch_offset); + + return ARM_CMSIS_NN_SUCCESS; +} +/** + * @} end of supportLSTM group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s4.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s4.c new file mode 100644 index 0000000..0925e12 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s4.c @@ -0,0 +1,149 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mul_core_1x_s4.c + * Description: General Matrix-multiplication function + * + * $Date: 10 April 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * s4 matrix multiplication to process 1 row + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_mat_mul_core_1x_s4(int32_t row_elements, + const int32_t skipped_row_elements, + const int8_t *row_base_ref, + const int8_t *col_base_ref, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output) +{ +#if defined(ARM_MATH_MVEI) + const int8_t *col_base = col_base_ref; + int32_t *output_mult = quant_params->multiplier; + int32_t *output_shift = quant_params->shift; + const int32_t out_offset = conv_params->output_offset; + const int32_t out_activation_min = conv_params->activation.min; + const int32_t out_activation_max = conv_params->activation.max; + + const uint8x16_t gather_offset = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}; + const mve_pred16_t lower_nibble_mask = 21845; // 0101010101010101 + + int32_t acc[4]; + for (int i = 0; i < out_ch; i++) + { + int32_t acc_n0 = 0; + const int8_t *row_base = row_base_ref; + + int32_t sum_tmp = 0; + for (int j = row_elements; j > 0; j -= 16) + { + mve_pred16_t rmdr_mask = vctp8q((uint32_t)j); + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += 8; + col_vec = vrshlq_m_n_s8(col_vec, 4, (lower_nibble_mask & rmdr_mask)); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(row_base, rmdr_mask); + row_base += 16; + + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + } + + sum_tmp *= conv_params->input_offset; + acc_n0 += sum_tmp; + + const int32_t index = i & 0x3; + acc[index] = acc_n0; + + if (index == 3) + { + int32x4_t res = vldrwq_s32(acc); + if (bias) + { + res = vaddq_s32(res, vldrwq_s32(bias)); + bias += 4; + } + res = arm_requantize_mve_32x4(res, vldrwq_s32(output_mult), vldrwq_s32(output_shift)); + output_mult += 4; + output_shift += 4; + res = vaddq_n_s32(res, out_offset); + res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); + res = vminq_s32(res, vdupq_n_s32(out_activation_max)); + vstrbq_s32(output, res); + output += 4; + } + int val = (i + 1) * ((row_elements + skipped_row_elements)); + col_base = col_base_ref + (val >> 1); + } + // Handle left over elements + for (int i = 0; i < (out_ch & 0x3); i++) + { + int32_t acc_n0 = acc[i]; + if (bias) + { + acc_n0 += bias[i]; + } + acc_n0 = arm_nn_requantize(acc_n0, output_mult[i], output_shift[i]); + acc_n0 += conv_params->output_offset; + acc_n0 = MAX(acc_n0, conv_params->activation.min); + acc_n0 = MIN(acc_n0, conv_params->activation.max); + *output++ = (int8_t)acc_n0; + } + return ARM_CMSIS_NN_SUCCESS; + +#else + (void)row_elements; + (void)skipped_row_elements; + (void)row_base_ref; + (void)col_base_ref; + (void)out_ch; + (void)conv_params; + (void)quant_params; + (void)bias; + (void)output; + return ARM_CMSIS_NN_NO_IMPL_ERROR; +#endif +} + +/** + * @} end of supportConvolution group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c new file mode 100644 index 0000000..03dc7ab --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c @@ -0,0 +1,153 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mul_core_1x_s8.c + * Description: General Matrix-multiplication function + * + * $Date: 20 January 2023 + * $Revision: V.3.1.3 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * s8 matrix multiplication to process 1 row + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, + const int32_t skipped_row_elements, + const int8_t *row_base_ref, + const int8_t *col_base_ref, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output) +{ +#if defined(ARM_MATH_MVEI) + const int8_t *col_base = col_base_ref; + int32_t *output_mult = quant_params->multiplier; + int32_t *output_shift = quant_params->shift; + const int32_t out_offset = conv_params->output_offset; + const int32_t out_activation_min = conv_params->activation.min; + const int32_t out_activation_max = conv_params->activation.max; + + int32_t acc[4]; + for (int i = 0; i < out_ch; i++) + { + int32_t acc_n0 = 0; + const int8_t *row_base = row_base_ref; + + int32_t sum_tmp = 0; + + #if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < row_elements; j++) + { + int32_t col = col_base[j]; + sum_tmp += col; + acc_n0 += row_base[j] * col; + } + #else + __ASM volatile(" .p2align 2 \n" + " vldrb.8 q0, [%[col]], #16 \n" + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vaddva.s8 %[sum], q0 \n" + " vldrb.8 q1, [%[row0]], #16 \n" + " vmladava.s8 %[out0], q0, q1 \n" + " vldrb.8 q0, [%[col]], #16 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+r"(col_base), [sum] "+Te"(sum_tmp), [row0] "+r"(row_base), [out0] "+Te"(acc_n0) + : [cnt] "r"(row_elements) + : "q0", "q1", "memory", "r14"); + #endif + + sum_tmp *= conv_params->input_offset; + acc_n0 += sum_tmp; + + const int32_t index = i & 0x3; + acc[index] = acc_n0; + + if (index == 3) + { + int32x4_t res = vldrwq_s32(acc); + if (bias) + { + res = vaddq_s32(res, vldrwq_s32(bias)); + bias += 4; + } + res = arm_requantize_mve_32x4(res, vldrwq_s32(output_mult), vldrwq_s32(output_shift)); + output_mult += 4; + output_shift += 4; + res = vaddq_n_s32(res, out_offset); + res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); + res = vminq_s32(res, vdupq_n_s32(out_activation_max)); + vstrbq_s32(output, res); + output += 4; + } + col_base = col_base_ref + (i + 1) * (row_elements + skipped_row_elements); + } + // Handle left over elements + for (int i = 0; i < (out_ch & 0x3); i++) + { + int32_t acc_n0 = acc[i]; + if (bias) + { + acc_n0 += bias[i]; + } + acc_n0 = arm_nn_requantize(acc_n0, output_mult[i], output_shift[i]); + acc_n0 += conv_params->output_offset; + acc_n0 = MAX(acc_n0, conv_params->activation.min); + acc_n0 = MIN(acc_n0, conv_params->activation.max); + *output++ = (int8_t)acc_n0; + } + return ARM_CMSIS_NN_SUCCESS; + +#else + (void)row_elements; + (void)skipped_row_elements; + (void)row_base_ref; + (void)col_base_ref; + (void)out_ch; + (void)conv_params; + (void)quant_params; + (void)bias; + (void)output; + return ARM_CMSIS_NN_NO_IMPL_ERROR; +#endif +} + +/** + * @} end of supportConvolution group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c new file mode 100644 index 0000000..643a9c7 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c @@ -0,0 +1,150 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mul_core_4x_s8.c + * Description: General matrix multiplication function for MVE extension + * + * $Date: 13 December 2022 + * $Revision: V.3.1.1 + * + * Target Processor: Cortex-M processors + * -------------------------------------------------------------------- */ +#include "third_party/cmsis_nn/Include/arm_nn_types.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * s8 matrix multiplication to process 4 rows and one column + * + * Refer header file for details. + * + */ + +int8_t *arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, + const int32_t offset, + const int8_t *row_base, + const int8_t *col_base_ref, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output) +{ + +#if defined(ARM_MATH_MVEI) + for (int i = 0; i < out_ch; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + int32_t acc_n3 = 0; + + const int8_t *ip_row_0 = row_base; + const int8_t *ip_row_1 = row_base + offset; + const int8_t *ip_row_2 = row_base + (2 * offset); + const int8_t *ip_row_3 = row_base + (3 * offset); + const int8_t *col_base = col_base_ref + i * row_elements; + int32_t sum_tmp = 0; + +#if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < row_elements; j++) + { + int32_t col = col_base[j]; + sum_tmp += col; + acc_n0 += ip_row_0[j] * col; + acc_n1 += ip_row_1[j] * col; + acc_n2 += ip_row_2[j] * col; + acc_n3 += ip_row_3[j] * col; + } +#else + __ASM volatile(" .p2align 2 \n" + " vldrb.8 q0, [%[col]], #16 \n" + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vaddva.s8 %[sum], q0 \n" + " vldrb.8 q1, [%[row0]], #16 \n" + " vmladava.s8 %[out0], q0, q1 \n" + " vldrb.8 q2, [%[row1]], #16 \n" + " vmladava.s8 %[out1], q0, q2 \n" + " vldrb.8 q3, [%[row2]], #16 \n" + " vmladava.s8 %[out2], q0, q3 \n" + " vldrb.8 q4, [%[row3]], #16 \n" + " vmladava.s8 %[out3], q0, q4 \n" + " vldrb.8 q0, [%[col]], #16 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+r"(col_base), + [sum] "+Te"(sum_tmp), + [row0] "+r"(ip_row_0), + [row1] "+r"(ip_row_1), + [row2] "+r"(ip_row_2), + [row3] "+r"(ip_row_3), + [out0] "+Te"(acc_n0), + [out1] "+Te"(acc_n1), + [out2] "+Te"(acc_n2), + [out3] "+Te"(acc_n3) + : [cnt] "r"(row_elements) + : "q0", "q1", "q2", "q3", "q4", "memory", "r14"); +#endif + + int32x4_t res = {acc_n0, acc_n1, acc_n2, acc_n3}; + sum_tmp *= conv_params->input_offset; + if (bias) + { + sum_tmp += bias[i]; + } + res = vaddq_n_s32(res, sum_tmp); + + res = arm_requantize_mve(res, quant_params->multiplier[i], quant_params->shift[i]); + res = vaddq_n_s32(res, conv_params->output_offset); + + res = vmaxq_s32(res, vdupq_n_s32(conv_params->activation.min)); + res = vminq_s32(res, vdupq_n_s32(conv_params->activation.max)); + + const uint32x4_t scatter_offset = {0, (uint32_t)out_ch, (uint32_t)out_ch * 2, (uint32_t)out_ch * 3}; + vstrbq_scatter_offset_s32(output, scatter_offset, res); + output++; + } + + return output + (3 * out_ch); +#else + (void)row_elements; + (void)offset; + (void)row_base; + (void)col_base_ref; + (void)out_ch; + (void)conv_params; + (void)quant_params; + (void)bias; + (void)output; + return NULL; +#endif +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_interleaved_t_even_s4.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_interleaved_t_even_s4.c new file mode 100644 index 0000000..9d2a8da --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_interleaved_t_even_s4.c @@ -0,0 +1,1879 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_nt_interleaved_t_even_s4 + * Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed, and 4 bit rhs. + * + * $Date: 04 Jun 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * s4 matrix multiplication with the left-hand side interleaved and the right-hand-side matrix transposed and columns + * even + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_interleaved_t_even_s4(const int8_t *lhs, + const int8_t *packed_rhs, + const int32_t *bias, + int8_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t lhs_cols_offset) +{ +#if defined(ARM_MATH_MVEI) + + const int rhs_cols_offset = rhs_cols % 16; + const int32_t blk_cnt = rhs_cols >> 5; + const mve_pred16_t lower_nibble_mask = 21845; // 0101010101010101 + const uint8x16_t gather_offset = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}; + const uint32x4_t scatter_offset = {0, (uint32_t)rhs_rows, (uint32_t)rhs_rows * 2, (uint32_t)rhs_rows * 3}; + const int I6_elements_spill = rhs_cols & 0x10; + + int i_items = 0; + + for (; i_items <= (lhs_rows - 4); i_items += 4) + { + int8_t const *col_base = packed_rhs; + + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + int32_t acc_n3 = 0; + + int32_t sum_tmp = 0; + + int8_t const *ip_row_0 = lhs; + int8_t const *ip_row_1 = lhs + lhs_cols_offset; + int8_t const *ip_row_2 = lhs + (2 * lhs_cols_offset); + int8_t const *ip_row_3 = lhs + (3 * lhs_cols_offset); + + const mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + for (int j = blk_cnt; j > 0; --j) + { + const int8x16_t col_vec = vldrbq_s8(col_base); + + int8x16_t ker_low = vrshlq_n_s8(col_vec, 4); + ker_low = vshrq_n_s8(ker_low, 4); + + int8x16_t ker_high = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, ker_low); + sum_tmp = vaddvaq_s8(sum_tmp, ker_high); + + int8x16_t lhs_high, lhs_low; + + lhs_high = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + lhs_low = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, ker_low, lhs_low); + acc_n0 = vmladavaq_s8(acc_n0, ker_high, lhs_high); + + lhs_high = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + lhs_low = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + acc_n1 = vmladavaq_s8(acc_n1, ker_low, lhs_low); + acc_n1 = vmladavaq_s8(acc_n1, ker_high, lhs_high); + + lhs_high = vldrbq_s8(ip_row_2); + ip_row_2 += 16; + lhs_low = vldrbq_s8(ip_row_2); + ip_row_2 += 16; + acc_n2 = vmladavaq_s8(acc_n2, ker_low, lhs_low); + acc_n2 = vmladavaq_s8(acc_n2, ker_high, lhs_high); + + lhs_high = vldrbq_s8(ip_row_3); + ip_row_3 += 16; + lhs_low = vldrbq_s8(ip_row_3); + ip_row_3 += 16; + acc_n3 = vmladavaq_s8(acc_n3, ker_low, lhs_low); + acc_n3 = vmladavaq_s8(acc_n3, ker_high, lhs_high); + + col_base += 16; + } + + if (I6_elements_spill) + { + int8x16_t col_vec = vldrbq_gather_offset_s8(col_base, gather_offset); + col_base += 8; + + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, col_vec); + + int8x16_t lhs_vec = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + acc_n1 = vmladavaq_s8(acc_n1, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_2); + ip_row_2 += 16; + acc_n2 = vmladavaq_s8(acc_n2, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_3); + ip_row_3 += 16; + acc_n3 = vmladavaq_s8(acc_n3, col_vec, lhs_vec); + } + + if (rmdr_mask) + { + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += rhs_cols_offset >> 1; + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(ip_row_0, rmdr_mask); + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_1, rmdr_mask); + acc_n1 = vmladavaq_p_s8(acc_n1, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_2, rmdr_mask); + acc_n2 = vmladavaq_p_s8(acc_n2, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_3, rmdr_mask); + acc_n3 = vmladavaq_p_s8(acc_n3, col_vec, lhs_vec, rmdr_mask); + } + + int32x4_t res = {acc_n0, acc_n1, acc_n2, acc_n3}; + sum_tmp *= lhs_offset; + if (bias) + { + sum_tmp += bias[i]; + } + res = vaddq_n_s32(res, sum_tmp); + + res = arm_requantize_mve(res, dst_multipliers[i], dst_shifts[i]); + res = vaddq_n_s32(res, dst_offset); + + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + + vstrbq_scatter_offset_s32(dst, scatter_offset, res); + dst++; + } + lhs += 4 * lhs_cols_offset; + dst += (3 * rhs_rows); + } + + if (lhs_rows % 4 == 3) + { + int8_t const *col_base = packed_rhs; + const mve_pred16_t requant_mask = vctp32q(3); + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + + int8_t const *ip_row_0 = lhs; + int8_t const *ip_row_1 = lhs + lhs_cols_offset; + int8_t const *ip_row_2 = lhs + (2 * lhs_cols_offset); + int32_t sum_tmp = 0; + + mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + for (int j = blk_cnt; j > 0; --j) + { + const int8x16_t col_vec = vldrbq_s8(col_base); + + int8x16_t ker_low = vrshlq_n_s8(col_vec, 4); + ker_low = vshrq_n_s8(ker_low, 4); + + int8x16_t ker_high = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, ker_low); + sum_tmp = vaddvaq_s8(sum_tmp, ker_high); + + int8x16_t lhs_high, lhs_low; + + lhs_high = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + lhs_low = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, ker_low, lhs_low); + acc_n0 = vmladavaq_s8(acc_n0, ker_high, lhs_high); + + lhs_high = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + lhs_low = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + acc_n1 = vmladavaq_s8(acc_n1, ker_low, lhs_low); + acc_n1 = vmladavaq_s8(acc_n1, ker_high, lhs_high); + + lhs_high = vldrbq_s8(ip_row_2); + ip_row_2 += 16; + lhs_low = vldrbq_s8(ip_row_2); + ip_row_2 += 16; + acc_n2 = vmladavaq_s8(acc_n2, ker_low, lhs_low); + acc_n2 = vmladavaq_s8(acc_n2, ker_high, lhs_high); + + col_base += 16; + } + + if (I6_elements_spill) + { + int8x16_t col_vec = vldrbq_gather_offset_s8(col_base, gather_offset); + col_base += 8; + + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, col_vec); + + int8x16_t lhs_vec = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + acc_n1 = vmladavaq_s8(acc_n1, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_2); + ip_row_2 += 16; + acc_n2 = vmladavaq_s8(acc_n2, col_vec, lhs_vec); + } + + if (rmdr_mask) + { + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += rhs_cols_offset >> 1; + col_vec = vrshlq_m_n_s8(col_vec, 4, (lower_nibble_mask & rmdr_mask)); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(ip_row_0, rmdr_mask); + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_1, rmdr_mask); + acc_n1 = vmladavaq_p_s8(acc_n1, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_2, rmdr_mask); + acc_n2 = vmladavaq_p_s8(acc_n2, col_vec, lhs_vec, rmdr_mask); + } + + int32x4_t res = {acc_n0, acc_n1, acc_n2, 0}; + sum_tmp *= lhs_offset; + if (bias) + { + sum_tmp += bias[i]; + } + + res = vaddq_x_n_s32(res, sum_tmp, requant_mask); + + res = arm_requantize_mve_pred(res, dst_multipliers[i], dst_shifts[i], requant_mask); + res = vaddq_x_n_s32(res, dst_offset, requant_mask); + + res = vmaxq_x_s32(res, vdupq_n_s32(activation_min), requant_mask); + res = vminq_x_s32(res, vdupq_n_s32(activation_max), requant_mask); + + vstrbq_scatter_offset_p_s32(dst, scatter_offset, res, requant_mask); + dst++; + } + lhs += 3 * lhs_cols_offset; + dst += (2 * rhs_rows); + } + + if (lhs_rows % 4 == 2) + { + int8_t const *col_base = packed_rhs; + const mve_pred16_t requant_mask = vctp32q(2); + + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + + int8_t const *ip_row_0 = lhs; + int8_t const *ip_row_1 = lhs + lhs_cols_offset; + int32_t sum_tmp = 0; + + mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + for (int j = blk_cnt; j > 0; --j) + { + const int8x16_t col_vec = vldrbq_s8(col_base); + + int8x16_t ker_low = vrshlq_n_s8(col_vec, 4); + ker_low = vshrq_n_s8(ker_low, 4); + + int8x16_t ker_high = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, ker_low); + sum_tmp = vaddvaq_s8(sum_tmp, ker_high); + + int8x16_t lhs_high, lhs_low; + + lhs_high = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + lhs_low = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + + acc_n0 = vmladavaq_s8(acc_n0, ker_low, lhs_low); + acc_n0 = vmladavaq_s8(acc_n0, ker_high, lhs_high); + + lhs_high = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + lhs_low = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + + acc_n1 = vmladavaq_s8(acc_n1, ker_low, lhs_low); + acc_n1 = vmladavaq_s8(acc_n1, ker_high, lhs_high); + + col_base += 16; + } + + if (I6_elements_spill) + { + int8x16_t col_vec = vldrbq_gather_offset_s8(col_base, gather_offset); + col_base += 8; + + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, col_vec); + + int8x16_t lhs_vec = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + acc_n1 = vmladavaq_s8(acc_n1, col_vec, lhs_vec); + } + + if (rmdr_mask) + { + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += rhs_cols_offset >> 1; + col_vec = vrshlq_m_n_s8(col_vec, 4, (lower_nibble_mask & rmdr_mask)); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(ip_row_0, rmdr_mask); + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_1, rmdr_mask); + acc_n1 = vmladavaq_p_s8(acc_n1, col_vec, lhs_vec, rmdr_mask); + } + + int32x4_t res = {acc_n0, acc_n1, 0, 0}; + sum_tmp *= lhs_offset; + if (bias) + { + sum_tmp += bias[i]; + } + + res = vaddq_x_n_s32(res, sum_tmp, requant_mask); + + res = arm_requantize_mve_pred(res, dst_multipliers[i], dst_shifts[i], requant_mask); + res = vaddq_x_n_s32(res, dst_offset, requant_mask); + + res = vmaxq_x_s32(res, vdupq_n_s32(activation_min), requant_mask); + res = vminq_x_s32(res, vdupq_n_s32(activation_max), requant_mask); + + vstrbq_scatter_offset_p_s32(dst, scatter_offset, res, requant_mask); + dst++; + } + lhs += 2 * lhs_cols_offset; + dst += (2 * rhs_rows); + } + + if (lhs_rows % 4 == 1) + { + int32_t acc[4]; + const int32_t *multipliers = dst_multipliers; + const int32_t *shifts = dst_shifts; + const int8_t *col_base = packed_rhs; + int col_inc = rhs_cols_offset >> 1; + + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + const int8_t *ip_row_0 = lhs; + int32_t sum_tmp = 0; + mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + for (int j = blk_cnt; j > 0; --j) + { + const int8x16_t col_vec = vldrbq_s8(col_base); + + int8x16_t ker_low = vrshlq_n_s8(col_vec, 4); + ker_low = vshrq_n_s8(ker_low, 4); + + int8x16_t ker_high = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, ker_low); + sum_tmp = vaddvaq_s8(sum_tmp, ker_high); + + int8x16_t lhs_high, lhs_low; + + lhs_high = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + lhs_low = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, ker_low, lhs_low); + acc_n0 = vmladavaq_s8(acc_n0, ker_high, lhs_high); + + col_base += 16; + } + + if (I6_elements_spill) + { + int8x16_t col_vec = vldrbq_gather_offset_s8(col_base, gather_offset); + col_base += 8; + + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, col_vec); + + int8x16_t lhs_vec = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, col_vec, lhs_vec); + } + + if (rmdr_mask) + { + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += col_inc; + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(ip_row_0, rmdr_mask); + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + } + + sum_tmp *= lhs_offset; + sum_tmp += acc_n0; + if (bias) + { + sum_tmp += bias[i]; + } + const int32_t index = i & 0x3; + acc[index] = sum_tmp; + + if (index == 3) + { + int32x4_t res = vldrwq_s32(acc); + res = arm_requantize_mve_32x4(res, vldrwq_s32(multipliers), vldrwq_s32(shifts)); + multipliers += 4; + shifts += 4; + res = vaddq_n_s32(res, dst_offset); + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + vstrbq_s32(dst, res); + dst += 4; + } + } + lhs += lhs_cols_offset; + const int32_t tail_rows = rhs_rows & 0x3; + for (int i = 0; i < tail_rows; i++) + { + int32_t acc_n0 = acc[i]; + acc_n0 = arm_nn_requantize(acc_n0, multipliers[i], shifts[i]); + acc_n0 += dst_offset; + acc_n0 = MAX(acc_n0, activation_min); + acc_n0 = MIN(acc_n0, activation_max); + *dst++ = (int8_t)acc_n0; + } + } + +#elif defined(ARM_MATH_DSP) + const int32_t lhs_cols_off1 = lhs_cols_offset - 4; + const int16_t i16_lhs_offset = (int16_t)lhs_offset; + const uint32_t ui32_lhs_offset_i16x2 = PKHBT(i16_lhs_offset, i16_lhs_offset, 16); + const int32_t rhs_cols_int4 = rhs_cols >> 1; + + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 4); rhs_rows_idx += 4) + { + + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + while (lhs_rows_idx) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res10 = 0; + int32_t res11 = 0; + + int32_t spillover00 = 0; + int32_t spillover01 = 0; + int32_t spillover10 = 0; + int32_t spillover11 = 0; + + if (bias) + { + res00 = bias[rhs_rows_idx]; + res01 = bias[rhs_rows_idx + 2]; + res10 = bias[rhs_rows_idx]; + res11 = bias[rhs_rows_idx + 2]; + spillover00 = bias[rhs_rows_idx + 1]; + spillover01 = bias[rhs_rows_idx + 3]; + spillover10 = bias[rhs_rows_idx + 1]; + spillover11 = bias[rhs_rows_idx + 3]; + } + + int32_t rhs_cols_idx = 0; + + int32_t lhs_low, rhs_low0, rhs_high0, lhs_high, rhs_low1, rhs_high1; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + } + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + } + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + res11 += lhs_low * rhs_low1; + res11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + lhs_ptr -= rhs_cols; + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[2] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr[2] = (int8_t)res11; + dst_ptr -= rhs_rows; + + res00 = spillover00; + res01 = spillover01; + res10 = spillover10; + res11 = spillover11; + + rhs_cols_idx = 0; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + } + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + } + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + res11 += lhs_low * rhs_low1; + res11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[1] = (int8_t)res00; + dst_ptr[3] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[1] = (int8_t)res10; + dst_ptr[3] = (int8_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + lhs_rows_idx--; + } + + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res01 = 0; + + int32_t spillover00 = 0; + int32_t spillover01 = 0; + + if (bias) + { + res00 = bias[rhs_rows_idx]; + spillover00 = bias[rhs_rows_idx + 1]; + res01 = bias[rhs_rows_idx + 2]; + spillover01 = bias[rhs_rows_idx + 3]; + } + + int32_t rhs_cols_idx = 0; + + int32_t lhs_low, rhs_low0, rhs_high0, lhs_high, rhs_low1, rhs_high1; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + } + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + } + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + lhs_ptr -= rhs_cols; + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[2] = (int8_t)res01; + + res00 = spillover00; + res01 = spillover01; + + rhs_cols_idx = 0; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + } + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + } + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[1] = (int8_t)res00; + dst_ptr[3] = (int8_t)res01; + } + + packed_rhs += 2 * rhs_cols; + dst += 4; + } + + const int32_t rhs_rows_finished = rhs_rows - (rhs_rows % 4); + // Left over rhs rows will be in the range 0 -> 3 + for (int rhs_rows_idx = 0; rhs_rows_idx < rhs_rows % 4; ++rhs_rows_idx) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + while (lhs_rows_idx) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res10 = 0; + + if (bias) + { + res00 = bias[rhs_rows_finished + rhs_rows_idx]; + res10 = bias[rhs_rows_finished + rhs_rows_idx]; + } + + int32_t rhs_cols_idx = 0; + + int32_t lhs_low, rhs_low0, rhs_high0, lhs_high; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + } + + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + } + + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + // Quantize down + res00 = arm_nn_requantize( + res00, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + res10 = arm_nn_requantize( + res10, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + + // Add offset + res00 += dst_offset; + res10 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr += rhs_rows; + + lhs_rows_idx--; + } + if (lhs_rows % 2) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + + if (bias) + { + res00 = bias[rhs_rows_finished + rhs_rows_idx]; + } + + int32_t rhs_cols_idx = 0; + + int32_t lhs_low, rhs_low0, rhs_high0, lhs_high; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + } + + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + } + + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize( + res00, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + dst_ptr[0] = (int8_t)res00; + } + + packed_rhs += rhs_cols_int4; + + ++dst; + } +#else + + const int32_t rhs_cols_int4 = rhs_cols >> 1; + + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 4); rhs_rows_idx += 4) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = (lhs_rows >> 1); lhs_rows_idx > 0; --lhs_rows_idx) + { + // To avoid the issue of packed values leaking into the next rhs row + // we instead evaluate the rhs rows in pairs like so: + // rhs[0] and rhs[2], rhs[1] and rhs[3] + + // Start processing rhs_row[0] and rhs_row[2] + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res10 = 0; + int32_t res11 = 0; + + int32_t spillover00 = 0; + int32_t spillover01 = 0; + int32_t spillover10 = 0; + int32_t spillover11 = 0; + + if (bias) + { + res00 = bias[rhs_rows_idx]; + res01 = bias[rhs_rows_idx + 2]; + res10 = bias[rhs_rows_idx]; + res11 = bias[rhs_rows_idx + 2]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + res11 += lhs_low * rhs_low1; + res11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + lhs_ptr -= rhs_cols; + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[2] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr[2] = (int8_t)res11; + dst_ptr -= rhs_rows; + + // Start processing rhs_row[1] and rhs_row[3] + res00 = spillover00; + res01 = spillover01; + res10 = spillover10; + res11 = spillover11; + if (bias) + { + res00 += bias[rhs_rows_idx + 1]; + res01 += bias[rhs_rows_idx + 3]; + res10 += bias[rhs_rows_idx + 1]; + res11 += bias[rhs_rows_idx + 3]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + res11 += lhs_low * rhs_low1; + res11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[1] = (int8_t)res00; + dst_ptr[3] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[1] = (int8_t)res10; + dst_ptr[3] = (int8_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + } + + // Left-over row + if (lhs_rows % 2) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res01 = 0; + + int32_t spillover00 = 0; + int32_t spillover01 = 0; + + if (bias) + { + res00 += bias[rhs_rows_idx]; + res01 += bias[rhs_rows_idx + 2]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + lhs_ptr -= rhs_cols; + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[2] = (int8_t)res01; + + res00 = spillover00; + res01 = spillover01; + + if (bias) + { + res00 += bias[rhs_rows_idx + 1]; + res01 += bias[rhs_rows_idx + 3]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[1] = (int8_t)res00; + dst_ptr[3] = (int8_t)res01; + } + + packed_rhs += 2 * rhs_cols; + dst += 4; + } + + const int32_t rhs_rows_finished = rhs_rows - (rhs_rows % 4); + // Left over rhs rows will be in the range 0 -> 3 + for (int rhs_rows_idx = 0; rhs_rows_idx < rhs_rows % 4; ++rhs_rows_idx) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = (lhs_rows >> 1); lhs_rows_idx > 0; --lhs_rows_idx) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + int32_t res00 = 0; + int32_t res10 = 0; + if (bias) + { + res00 = bias[rhs_rows_finished + rhs_rows_idx]; + res10 = bias[rhs_rows_finished + rhs_rows_idx]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high = packed_rhs_ptr[0] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low; + res00 += lhs_high * rhs_high; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + + res10 += lhs_low * rhs_low; + res10 += lhs_high * rhs_high; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + // Quantize down + res00 = arm_nn_requantize( + res00, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + res10 = arm_nn_requantize( + res10, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + + // Add offset + res00 += dst_offset; + res10 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr += rhs_rows; + } + if (lhs_rows % 2) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + int32_t res00 = 0; + if (bias) + { + res00 = bias[rhs_rows_finished + rhs_rows_idx]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high = packed_rhs_ptr[0] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low; + res00 += lhs_high * rhs_high; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize( + res00, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + dst_ptr[0] = (int8_t)res00; + } + + packed_rhs += rhs_cols_int4; + + ++dst; + } + +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s16.c new file mode 100644 index 0000000..cc11227 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s16.c @@ -0,0 +1,360 @@ +/* + * SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_nt_t_s16 + * Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed + * + * $Date: 11 April 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * s16 matrix multiplication with the right-hand-side matrix transposed + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s16(const int16_t *lhs, + const int8_t *rhs, + const cmsis_nn_bias_data *bias_data, + int16_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t activation_min, + const int32_t activation_max) +{ +#if defined(ARM_MATH_MVEI) + + const uint32_t rhs_rows_offset = (uint32_t)rhs_rows * sizeof(int16_t); + const uint32x4_t scatter_offset = { + 0, (uint32_t)rhs_rows_offset, (uint32_t)rhs_rows_offset * 2, (uint32_t)rhs_rows_offset * 3}; + + const int64_t *bias_s64 = (const int64_t *)bias_data->data; + const int32_t *bias_s32 = (const int32_t *)bias_data->data; + const bool is_int32_bias = bias_data->is_int32_bias; + + const int32_t rhs_cols_fast = is_int32_bias ? rhs_cols : (rhs_cols > MAX_COL_COUNT ? MAX_COL_COUNT : rhs_cols); + const int32_t rhs_cols_slow = rhs_cols - MAX_COL_COUNT; + + int i_items = 0; + for (; i_items <= (lhs_rows - 4); i_items += 4) + { + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + int32_t acc_n3 = 0; + + const int16_t *ip_row_0 = lhs; + const int16_t *ip_row_1 = lhs + rhs_cols; + const int16_t *ip_row_2 = lhs + (2 * rhs_cols); + const int16_t *ip_row_3 = lhs + (3 * rhs_cols); + const int8_t *col_base = rhs + i * rhs_cols; + + #if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < rhs_cols_fast; j++) + { + int8_t col = col_base[j]; + acc_n0 += ip_row_0[j] * col; + acc_n1 += ip_row_1[j] * col; + acc_n2 += ip_row_2[j] * col; + acc_n3 += ip_row_3[j] * col; + } + #else + // Note: If operand initialization is moved around, use '&' constraint to + // specify earlyclobber operands. + __ASM volatile(" .p2align 2 \n" + " wlstp.16 lr, %[cnt], 1f \n" + " mov %[out0], 0 \n" + " mov %[out1], 0 \n" + " mov %[out2], 0 \n" + " mov %[out3], 0 \n" + " vldrb.s16 q0, [%[col]], #8 \n" + "2: \n" + " vldrh.u16 q1, [%[row0]], #16 \n" + " vmlava.s16 %[out0], q0, q1 \n" + " vldrh.u16 q2, [%[row1]], #16 \n" + " vmlava.s16 %[out1], q0, q2 \n" + " vldrh.u16 q3, [%[row2]], #16 \n" + " vmlava.s16 %[out2], q0, q3 \n" + " vldrh.u16 q4, [%[row3]], #16 \n" + " vmlava.s16 %[out3], q0, q4 \n" + " vldrb.s16 q0, [%[col]], #8 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+l"(col_base), + [row0] "+l"(ip_row_0), + [row1] "+l"(ip_row_1), + [row2] "+l"(ip_row_2), + [row3] "+l"(ip_row_3), + [out0] "=Te"(acc_n0), + [out1] "=Te"(acc_n1), + [out2] "=Te"(acc_n2), + [out3] "=Te"(acc_n3) + : [cnt] "r"(rhs_cols_fast) + : "q0", "q1", "q2", "q3", "q4", "memory", "r14"); + #endif + + if (is_int32_bias) + { + int32x4_t result; + + if (bias_s32) + { + acc_n0 += bias_s32[i]; + acc_n1 += bias_s32[i]; + acc_n2 += bias_s32[i]; + acc_n3 += bias_s32[i]; + } + + int32x4_t res = {acc_n0, acc_n1, acc_n2, acc_n3}; + + result = arm_requantize_mve(res, dst_multipliers[i], dst_shifts[i]); + + result = vmaxq_s32(result, vdupq_n_s32(activation_min)); + result = vminq_s32(result, vdupq_n_s32(activation_max)); + + vstrhq_scatter_offset_s32(dst, scatter_offset, result); + } + else + { + int64_t acc_n0_s64 = acc_n0; + int64_t acc_n1_s64 = acc_n1; + int64_t acc_n2_s64 = acc_n2; + int64_t acc_n3_s64 = acc_n3; + + if (rhs_cols > MAX_COL_COUNT) + { + ip_row_0 = lhs + MAX_COL_COUNT; + ip_row_1 = lhs + rhs_cols + MAX_COL_COUNT; + ip_row_2 = lhs + (2 * rhs_cols) + MAX_COL_COUNT; + ip_row_3 = lhs + (3 * rhs_cols) + MAX_COL_COUNT; + col_base = rhs + i * rhs_cols + MAX_COL_COUNT; + + for (int j = 0; j < rhs_cols_slow; j++) + { + int8_t col = col_base[j]; + acc_n0_s64 += ip_row_0[j] * col; + acc_n1_s64 += ip_row_1[j] * col; + acc_n2_s64 += ip_row_2[j] * col; + acc_n3_s64 += ip_row_3[j] * col; + } + } + + if (bias_s64) + { + acc_n0_s64 += bias_s64[i]; + acc_n1_s64 += bias_s64[i]; + acc_n2_s64 += bias_s64[i]; + acc_n3_s64 += bias_s64[i]; + } + + int32_t reduced_multiplier = REDUCE_MULTIPLIER(dst_multipliers[i]); + int32_t shift = dst_shifts[i]; + + acc_n0 = arm_nn_requantize_s64(acc_n0_s64, reduced_multiplier, shift); + acc_n1 = arm_nn_requantize_s64(acc_n1_s64, reduced_multiplier, shift); + acc_n2 = arm_nn_requantize_s64(acc_n2_s64, reduced_multiplier, shift); + acc_n3 = arm_nn_requantize_s64(acc_n3_s64, reduced_multiplier, shift); + int32x4_t res = {acc_n0, acc_n1, acc_n2, acc_n3}; + + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + + vstrhq_scatter_offset_s32(dst, scatter_offset, res); + } + dst++; + } + + lhs += 4 * rhs_cols; + dst += (3 * rhs_rows); + } + + if (is_int32_bias) + { + + for (; i_items < lhs_rows; i_items++) + { + int32_t acc[4]; + const int32_t *multipliers = dst_multipliers; + const int32_t *shifts = dst_shifts; + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + const int16_t *ip_row_0 = lhs; + const int8_t *col_base = rhs + i * rhs_cols; + + #if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < rhs_cols; j++) + { + int32_t col = col_base[j]; + acc_n0 += ip_row_0[j] * col; + } + #else + __ASM volatile(" .p2align 2 \n" + " wlstp.32 lr, %[cnt], 1f \n" + " mov %[out0], 0 \n" + "2: \n" + " vldrb.s32 q0, [%[col]], #4 \n" + " vldrh.s32 q1, [%[row0]], #8 \n" + " vmlava.s32 %[out0], q0, q1 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+l"(col_base), [row0] "+l"(ip_row_0), [out0] "=Te"(acc_n0) + : [cnt] "r"(rhs_cols) + : "q0", "q1", "memory", "r14"); + #endif + if (bias_s32) + { + acc_n0 += bias_s32[i]; + } + + const int32_t index = i & 0x3; + acc[index] = acc_n0; + + if (index == 3) + { + int32x4_t res = vldrwq_s32(acc); + res = arm_requantize_mve_32x4(res, vldrwq_s32(multipliers), vldrwq_s32(shifts)); + multipliers += 4; + shifts += 4; + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + vstrhq_s32(dst, res); + dst += 4; + } + } + lhs += rhs_cols; + + const int32_t tail_rows = rhs_rows & 0x3; + for (int i = 0; i < tail_rows; i++) + { + int32_t acc_n0 = acc[i]; + acc_n0 = arm_nn_requantize(acc_n0, multipliers[i], shifts[i]); + acc_n0 = MAX(acc_n0, activation_min); + acc_n0 = MIN(acc_n0, activation_max); + *dst++ = (int16_t)acc_n0; + } + } + } + else + { + + for (; i_items < lhs_rows; i_items++) + { + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + int64_t acc_n0_s64 = 0; + const int16_t *ip_row_0 = lhs; + const int8_t *col_base = rhs + i * rhs_cols; + + #if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < rhs_cols_fast; j++) + { + int8_t col = col_base[j]; + acc_n0 += ip_row_0[j] * col; + } + #else + __ASM volatile(" .p2align 2 \n" + " wlstp.32 lr, %[cnt], 1f \n" + " mov %[out0], 0 \n" + "2: \n" + " vldrb.s32 q0, [%[col]], #4 \n" + " vldrh.s32 q1, [%[row0]], #8 \n" + " vmlava.s32 %[out0], q0, q1 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+l"(col_base), [row0] "+l"(ip_row_0), [out0] "=Te"(acc_n0) + : [cnt] "r"(rhs_cols_fast) + : "q0", "q1", "memory", "r14"); + #endif + + acc_n0_s64 = acc_n0; + + if (rhs_cols > MAX_COL_COUNT) + { + ip_row_0 = lhs + MAX_COL_COUNT; + col_base = rhs + i * rhs_cols + MAX_COL_COUNT; + + for (int j = 0; j < rhs_cols_slow; j++) + { + int8_t col = col_base[j]; + acc_n0_s64 += ip_row_0[j] * col; + } + } + + if (bias_s64) + { + acc_n0_s64 += bias_s64[i]; + } + + int32_t reduced_multiplier = REDUCE_MULTIPLIER(dst_multipliers[i]); + int32_t shift = dst_shifts[i]; + + acc_n0 = arm_nn_requantize_s64(acc_n0_s64, reduced_multiplier, shift); + acc_n0 = MAX(acc_n0, activation_min); + acc_n0 = MIN(acc_n0, activation_max); + *dst++ = (int16_t)acc_n0; + } + lhs += rhs_cols; + } + } + +#else + (void)lhs; + (void)rhs; + (void)dst_multipliers; + (void)dst_shifts; + (void)dst; + (void)activation_min; + (void)activation_max; + (void)bias_data; + (void)lhs_rows; + (void)lhs_rows; + (void)rhs_rows; + (void)rhs_cols; + + return ARM_CMSIS_NN_NO_IMPL_ERROR; +#endif + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s4.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s4.c new file mode 100644 index 0000000..b400f96 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s4.c @@ -0,0 +1,2056 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_nt_t_s4 + * Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed, and 4 bit rhs. + * + * $Date: 27 May 2024 + * $Revision: V.1.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * s4 matrix multiplication with the right-hand-side matrix transposed + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s4(const int8_t *lhs, + const int8_t *packed_rhs, + const int32_t *bias, + int8_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t lhs_cols_offset) +{ +#if defined(ARM_MATH_MVEI) + int i_items = 0; + const int rhs_cols_offset = rhs_cols % 16; + const mve_pred16_t lower_nibble_mask = 21845; // 0101010101010101 + const uint8x16_t gather_offset = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}; + const uint32x4_t scatter_offset = {0, (uint32_t)rhs_rows, (uint32_t)rhs_rows * 2, (uint32_t)rhs_rows * 3}; + const int I6_elements_spill = rhs_cols & 0x10; + + for (; i_items <= (lhs_rows - 4); i_items += 4) + { + const int32_t blk_cnt = rhs_cols >> 4; + int8_t const *col_base = packed_rhs; + + for (int i = 0; i < rhs_rows; i++) + { + + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + int32_t acc_n3 = 0; + + int8_t const *ip_row_0 = lhs; + int8_t const *ip_row_1 = lhs + lhs_cols_offset; + int8_t const *ip_row_2 = lhs + (2 * lhs_cols_offset); + int8_t const *ip_row_3 = lhs + (3 * lhs_cols_offset); + int32_t sum_tmp = 0; + + mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + if ((rhs_cols & 0x1) & (i & 0x1)) + { + rmdr_mask >>= 1; + int32_t col = col_base[0] >> 4; + sum_tmp = col; + acc_n0 += ip_row_0[0] * col; + acc_n1 += ip_row_1[0] * col; + acc_n2 += ip_row_2[0] * col; + acc_n3 += ip_row_3[0] * col; + + ++col_base; + ++ip_row_0; + ++ip_row_1; + ++ip_row_2; + ++ip_row_3; + } + + for (int j = blk_cnt; j > 0; --j) + { + int8x16_t col_vec = vldrbq_gather_offset_s8(col_base, gather_offset); + col_base += 8; + + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, col_vec); + + int8x16_t lhs_vec = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + acc_n1 = vmladavaq_s8(acc_n1, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_2); + ip_row_2 += 16; + acc_n2 = vmladavaq_s8(acc_n2, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_3); + ip_row_3 += 16; + acc_n3 = vmladavaq_s8(acc_n3, col_vec, lhs_vec); + } + + if (rmdr_mask) + { + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += rhs_cols_offset >> 1; + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(ip_row_0, rmdr_mask); + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_1, rmdr_mask); + acc_n1 = vmladavaq_p_s8(acc_n1, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_2, rmdr_mask); + acc_n2 = vmladavaq_p_s8(acc_n2, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_3, rmdr_mask); + acc_n3 = vmladavaq_p_s8(acc_n3, col_vec, lhs_vec, rmdr_mask); + } + + int32x4_t res = {acc_n0, acc_n1, acc_n2, acc_n3}; + sum_tmp *= lhs_offset; + if (bias) + { + sum_tmp += bias[i]; + } + res = vaddq_n_s32(res, sum_tmp); + + res = arm_requantize_mve(res, dst_multipliers[i], dst_shifts[i]); + res = vaddq_n_s32(res, dst_offset); + + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + + vstrbq_scatter_offset_s32(dst, scatter_offset, res); + dst++; + } + lhs += 4 * lhs_cols_offset; + dst += (3 * rhs_rows); + } + + for (; i_items <= (lhs_rows - 3); i_items += 3) + { + int8_t const *col_base = packed_rhs; + const mve_pred16_t requant_mask = vctp32q(3); + const int32_t blk_cnt = rhs_cols >> 4; + + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + + int8_t const *ip_row_0 = lhs; + int8_t const *ip_row_1 = lhs + lhs_cols_offset; + int8_t const *ip_row_2 = lhs + (2 * lhs_cols_offset); + int32_t sum_tmp = 0; + + mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + if ((rhs_cols & 0x1) & (i & 0x1)) + { + rmdr_mask >>= 1; + int32_t col = col_base[0] >> 4; + sum_tmp = col; + acc_n0 += ip_row_0[0] * col; + acc_n1 += ip_row_1[0] * col; + acc_n2 += ip_row_2[0] * col; + + ++col_base; + ++ip_row_0; + ++ip_row_1; + ++ip_row_2; + } + + for (int j = blk_cnt; j > 0; --j) + { + int8x16_t col_vec = vldrbq_gather_offset_s8(col_base, gather_offset); + col_base += 8; + + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, col_vec); + + int8x16_t lhs_vec = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + acc_n1 = vmladavaq_s8(acc_n1, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_2); + ip_row_2 += 16; + acc_n2 = vmladavaq_s8(acc_n2, col_vec, lhs_vec); + } + + if (rmdr_mask) + { + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += rhs_cols_offset >> 1; + col_vec = vrshlq_m_n_s8(col_vec, 4, (lower_nibble_mask & rmdr_mask)); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(ip_row_0, rmdr_mask); + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_1, rmdr_mask); + acc_n1 = vmladavaq_p_s8(acc_n1, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_2, rmdr_mask); + acc_n2 = vmladavaq_p_s8(acc_n2, col_vec, lhs_vec, rmdr_mask); + } + + int32x4_t res = {acc_n0, acc_n1, acc_n2, 0}; + sum_tmp *= lhs_offset; + if (bias) + { + sum_tmp += bias[i]; + } + + res = vaddq_x_n_s32(res, sum_tmp, requant_mask); + + res = arm_requantize_mve_pred(res, dst_multipliers[i], dst_shifts[i], requant_mask); + res = vaddq_x_n_s32(res, dst_offset, requant_mask); + + res = vmaxq_x_s32(res, vdupq_n_s32(activation_min), requant_mask); + res = vminq_x_s32(res, vdupq_n_s32(activation_max), requant_mask); + + vstrbq_scatter_offset_p_s32(dst, scatter_offset, res, requant_mask); + dst++; + } + lhs += 3 * lhs_cols_offset; + dst += (2 * rhs_rows); + } + + for (; i_items <= (lhs_rows - 2); i_items += 2) + { + int8_t const *col_base = packed_rhs; + const mve_pred16_t requant_mask = vctp32q(2); + const int32_t blk_cnt = rhs_cols >> 5; + + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + + int8_t const *ip_row_0 = lhs; + int8_t const *ip_row_1 = lhs + lhs_cols_offset; + + int32_t sum_tmp = 0; + + mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + if ((rhs_cols & 0x1) & (i & 0x1)) + { + rmdr_mask >>= 1; + int32_t col = col_base[0] >> 4; + sum_tmp = col; + acc_n0 += ip_row_0[0] * col; + acc_n1 += ip_row_1[0] * col; + + ++col_base; + ++ip_row_0; + ++ip_row_1; + } + + for (int j = blk_cnt; j > 0; --j) + { + const int8x16_t col_vec = vldrbq_s8(col_base); + + int8x16_t ker_low = vrshlq_n_s8(col_vec, 4); + ker_low = vshrq_n_s8(ker_low, 4); + int8x16_t ker_high = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, ker_low); + sum_tmp = vaddvaq_s8(sum_tmp, ker_high); + + int8x16x2_t lhs_x2 = vld2q_s8(ip_row_0); + + acc_n0 = vmladavaq_s8(acc_n0, ker_low, lhs_x2.val[0]); + acc_n0 = vmladavaq_s8(acc_n0, ker_high, lhs_x2.val[1]); + + lhs_x2 = vld2q_s8(ip_row_1); + acc_n1 = vmladavaq_s8(acc_n1, ker_low, lhs_x2.val[0]); + acc_n1 = vmladavaq_s8(acc_n1, ker_high, lhs_x2.val[1]); + + ip_row_0 += 32; + ip_row_1 += 32; + col_base += 16; + } + + if (I6_elements_spill) + { + int8x16_t col_vec = vldrbq_gather_offset_s8(col_base, gather_offset); + col_base += 8; + + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, col_vec); + + int8x16_t lhs_vec = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, col_vec, lhs_vec); + + lhs_vec = vldrbq_s8(ip_row_1); + ip_row_1 += 16; + acc_n1 = vmladavaq_s8(acc_n1, col_vec, lhs_vec); + } + + if (rmdr_mask) + { + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += rhs_cols_offset >> 1; + col_vec = vrshlq_m_n_s8(col_vec, 4, (lower_nibble_mask & rmdr_mask)); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(ip_row_0, rmdr_mask); + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + + lhs_vec = vldrbq_z_s8(ip_row_1, rmdr_mask); + acc_n1 = vmladavaq_p_s8(acc_n1, col_vec, lhs_vec, rmdr_mask); + } + + int32x4_t res = {acc_n0, acc_n1, 0, 0}; + sum_tmp *= lhs_offset; + if (bias) + { + sum_tmp += bias[i]; + } + + res = vaddq_x_n_s32(res, sum_tmp, requant_mask); + + res = arm_requantize_mve_pred(res, dst_multipliers[i], dst_shifts[i], requant_mask); + res = vaddq_x_n_s32(res, dst_offset, requant_mask); + + res = vmaxq_x_s32(res, vdupq_n_s32(activation_min), requant_mask); + res = vminq_x_s32(res, vdupq_n_s32(activation_max), requant_mask); + + vstrbq_scatter_offset_p_s32(dst, scatter_offset, res, requant_mask); + dst++; + } + lhs += 2 * lhs_cols_offset; + dst += (2 * rhs_rows); + } + + for (; i_items < lhs_rows; i_items++) + { + int32_t acc[4]; + const int32_t *multipliers = dst_multipliers; + const int32_t *shifts = dst_shifts; + const int8_t *col_base = packed_rhs; + const int32_t blk_cnt = rhs_cols >> 5; + int col_inc = rhs_cols_offset >> 1; + + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + const int8_t *ip_row_0 = lhs; + int32_t sum_tmp = 0; + mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + if ((rhs_cols & 0x1) & (i & 0x1)) + { + rmdr_mask >>= 1; + int32_t col = col_base[0] >> 4; + sum_tmp += col; + acc_n0 += ip_row_0[0] * col; + + ++col_base; + ++ip_row_0; + } + + for (int j = blk_cnt; j > 0; --j) + { + const int8x16_t col_vec = vldrbq_s8(col_base); + + int8x16_t ker_low = vrshlq_n_s8(col_vec, 4); + ker_low = vshrq_n_s8(ker_low, 4); + int8x16_t ker_high = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, ker_low); + sum_tmp = vaddvaq_s8(sum_tmp, ker_high); + + int8x16x2_t lhs_x2 = vld2q_s8(ip_row_0); + + acc_n0 = vmladavaq_s8(acc_n0, ker_low, lhs_x2.val[0]); + acc_n0 = vmladavaq_s8(acc_n0, ker_high, lhs_x2.val[1]); + + ip_row_0 += 32; + col_base += 16; + } + + if (I6_elements_spill) + { + int8x16_t col_vec = vldrbq_gather_offset_s8(col_base, gather_offset); + col_base += 8; + + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_s8(sum_tmp, col_vec); + + int8x16_t lhs_vec = vldrbq_s8(ip_row_0); + ip_row_0 += 16; + acc_n0 = vmladavaq_s8(acc_n0, col_vec, lhs_vec); + } + + if (rmdr_mask) + { + int8x16_t col_vec = vldrbq_gather_offset_z_s8(col_base, gather_offset, rmdr_mask); + col_base += col_inc; + col_vec = vrshlq_m_n_s8(col_vec, 4, lower_nibble_mask); + col_vec = vshrq_n_s8(col_vec, 4); + + sum_tmp = vaddvaq_p_s8(sum_tmp, col_vec, rmdr_mask); + + int8x16_t lhs_vec = vldrbq_z_s8(ip_row_0, rmdr_mask); + acc_n0 = vmladavaq_p_s8(acc_n0, col_vec, lhs_vec, rmdr_mask); + } + + sum_tmp *= lhs_offset; + sum_tmp += acc_n0; + if (bias) + { + sum_tmp += bias[i]; + } + const int32_t index = i & 0x3; + acc[index] = sum_tmp; + + if (index == 3) + { + int32x4_t res = vldrwq_s32(acc); + res = arm_requantize_mve_32x4(res, vldrwq_s32(multipliers), vldrwq_s32(shifts)); + multipliers += 4; + shifts += 4; + res = vaddq_n_s32(res, dst_offset); + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + vstrbq_s32(dst, res); + dst += 4; + } + } + lhs += lhs_cols_offset; + const int32_t tail_rows = rhs_rows & 0x3; + for (int i = 0; i < tail_rows; i++) + { + int32_t acc_n0 = acc[i]; + acc_n0 = arm_nn_requantize(acc_n0, multipliers[i], shifts[i]); + acc_n0 += dst_offset; + acc_n0 = MAX(acc_n0, activation_min); + acc_n0 = MIN(acc_n0, activation_max); + *dst++ = (int8_t)acc_n0; + } + } + +#elif defined(ARM_MATH_DSP) + const int32_t lhs_cols_off1 = lhs_cols_offset - 4; + const int16_t i16_lhs_offset = (int16_t)lhs_offset; + const uint32_t ui32_lhs_offset_i16x2 = PKHBT(i16_lhs_offset, i16_lhs_offset, 16); + const int32_t rhs_cols_int4 = rhs_cols >> 1; + + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 4); rhs_rows_idx += 4) + { + + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + while (lhs_rows_idx) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res10 = 0; + int32_t res11 = 0; + + int32_t spillover00 = 0; + int32_t spillover01 = 0; + int32_t spillover10 = 0; + int32_t spillover11 = 0; + + if (bias) + { + res00 = bias[rhs_rows_idx]; + res01 = bias[rhs_rows_idx + 2]; + res10 = bias[rhs_rows_idx]; + res11 = bias[rhs_rows_idx + 2]; + spillover00 = bias[rhs_rows_idx + 1]; + spillover01 = bias[rhs_rows_idx + 3]; + spillover10 = bias[rhs_rows_idx + 1]; + spillover11 = bias[rhs_rows_idx + 3]; + } + + int32_t rhs_cols_idx = 0; + + int32_t lhs_low, rhs_low0, rhs_high0, lhs_high, rhs_low1, rhs_high1; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + } + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + } + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + res11 += lhs_low * rhs_low1; + res11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + if (rhs_cols % 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res01 += lhs_low * rhs_low1; + + res10 += lhs_high * rhs_low0; + res11 += lhs_high * rhs_low1; + + lhs_ptr -= rhs_cols - 1; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + + spillover00 += lhs_low * rhs_high0; + spillover01 += lhs_low * rhs_high1; + + spillover10 += lhs_high * rhs_high0; + spillover11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + ++lhs_ptr; + } + else + { + lhs_ptr -= rhs_cols; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[2] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr[2] = (int8_t)res11; + dst_ptr -= rhs_rows; + + res00 = spillover00; + res01 = spillover01; + res10 = spillover10; + res11 = spillover11; + + rhs_cols_idx = 0; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + } + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + // 4 x MAC res10, res11 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + res10 = SMLAD(rhs_low0, lhs_low, res10); + res11 = SMLAD(rhs_low1, lhs_low, res11); + res10 = SMLAD(rhs_high0, lhs_high, res10); + res11 = SMLAD(rhs_high1, lhs_high, res11); + } + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + res11 += lhs_low * rhs_low1; + res11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[1] = (int8_t)res00; + dst_ptr[3] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[1] = (int8_t)res10; + dst_ptr[3] = (int8_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + lhs_rows_idx--; + } + + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res01 = 0; + + int32_t spillover00 = 0; + int32_t spillover01 = 0; + + if (bias) + { + res00 = bias[rhs_rows_idx]; + spillover00 = bias[rhs_rows_idx + 1]; + res01 = bias[rhs_rows_idx + 2]; + spillover01 = bias[rhs_rows_idx + 3]; + } + + int32_t rhs_cols_idx = 0; + + int32_t lhs_low, rhs_low0, rhs_high0, lhs_high, rhs_low1, rhs_high1; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + } + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + } + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + if (rhs_cols % 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_ptr -= rhs_cols - 1; + lhs_high = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res01 += lhs_low * rhs_low1; + spillover00 += lhs_high * rhs_high0; + spillover01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + ++lhs_ptr; + } + else + { + lhs_ptr -= rhs_cols; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[2] = (int8_t)res01; + + res00 = spillover00; + res01 = spillover01; + + rhs_cols_idx = 0; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + } + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&packed_rhs_ptr[rhs_cols], &rhs_low1, &rhs_high1); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + res01 = SMLAD(rhs_low1, lhs_low, res01); + res01 = SMLAD(rhs_high1, lhs_high, res01); + } + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[1] = (int8_t)res00; + dst_ptr[3] = (int8_t)res01; + } + + packed_rhs += 2 * rhs_cols; + dst += 4; + } + + int8_t rhs_spilled_col = 0; + const int32_t rhs_rows_finished = rhs_rows - (rhs_rows % 4); + // Left over rhs rows will be in the range 0 -> 3 + for (int rhs_rows_idx = 0; rhs_rows_idx < rhs_rows % 4; ++rhs_rows_idx) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + while (lhs_rows_idx) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res10 = 0; + + if (bias) + { + res00 = bias[rhs_rows_finished + rhs_rows_idx]; + res10 = bias[rhs_rows_finished + rhs_rows_idx]; + } + + // Since there can only be 3 rhs rows here we only need treat rhs_row_idx[1] + // differently by dealing with the leftover column from rhs_row_idx[0] + if (rhs_cols % 2 && rhs_rows_idx == 1) + { + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + res00 += lhs_low * rhs_spilled_col; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + res10 += lhs_low * rhs_spilled_col; + + ++lhs_ptr; + } + + int32_t rhs_cols_idx = 0; + + int32_t lhs_low, rhs_low0, rhs_high0, lhs_high; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + } + + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + // 2 x MAC res10 + lhs_high = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_cols_off1]); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + res10 = SMLAD(rhs_low0, lhs_low, res10); + res10 = SMLAD(rhs_high0, lhs_high, res10); + } + + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + if (rhs_cols % 2 && !(rhs_rows_idx % 2)) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_low0; + + ++lhs_ptr; + } + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + // Quantize down + res00 = arm_nn_requantize( + res00, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + res10 = arm_nn_requantize( + res10, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + + // Add offset + res00 += dst_offset; + res10 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr += rhs_rows; + + lhs_rows_idx--; + } + if (lhs_rows % 2) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + + if (bias) + { + res00 = bias[rhs_rows_finished + rhs_rows_idx]; + } + + // Since there can only be 3 rhs rows here we only need treat rhs_row_idx[1] + // differently by dealing with the leftover column from rhs_row_idx[0] + if (rhs_cols % 2 && rhs_rows_idx == 1) + { + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + res00 += lhs_low * rhs_spilled_col; + + ++lhs_ptr; + } + + int32_t rhs_cols_idx = 0; + + int32_t lhs_low, rhs_low0, rhs_high0, lhs_high; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + } + + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + read_and_pad_s4(packed_rhs_ptr, &rhs_low0, &rhs_high0); + packed_rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(ui32_lhs_offset_i16x2, lhs_high); + lhs_high = SXTAB16_RORn(ui32_lhs_offset_i16x2, lhs_high, 8); + + // 2 x MAC res00 + res00 = SMLAD(rhs_low0, lhs_low, res00); + res00 = SMLAD(rhs_high0, lhs_high, res00); + } + + for (; rhs_cols_idx <= rhs_cols - 2; rhs_cols_idx += 2) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + rhs_high0 = packed_rhs_ptr[0] >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + if (rhs_cols % 2 && !(rhs_rows_idx % 2)) + { + rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + + res00 += lhs_low * rhs_low0; + + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize( + res00, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + dst_ptr[0] = (int8_t)res00; + } + if (rhs_cols % 2 && !(rhs_rows_idx % 2)) + { + rhs_spilled_col = packed_rhs[rhs_cols_int4] >> 4; + packed_rhs += rhs_cols_int4 + 1; + } + else + { + rhs_spilled_col = 0; + packed_rhs += rhs_cols_int4; + } + + ++dst; + } +#else + + const int32_t rhs_cols_int4 = rhs_cols >> 1; + + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 4); rhs_rows_idx += 4) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = (lhs_rows >> 1); lhs_rows_idx > 0; --lhs_rows_idx) + { + // To avoid the issue of packed values leaking into the next rhs row + // we instead evaluate the rhs rows in pairs like so: + // rhs[0] and rhs[2], rhs[1] and rhs[3] + + // Start processing rhs_row[0] and rhs_row[2] + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res10 = 0; + int32_t res11 = 0; + + int32_t spillover00 = 0; + int32_t spillover01 = 0; + int32_t spillover10 = 0; + int32_t spillover11 = 0; + + if (bias) + { + res00 = bias[rhs_rows_idx]; + res01 = bias[rhs_rows_idx + 2]; + res10 = bias[rhs_rows_idx]; + res11 = bias[rhs_rows_idx + 2]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + res11 += lhs_low * rhs_low1; + res11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + if (rhs_cols % 2) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res01 += lhs_low * rhs_low1; + + res10 += lhs_high * rhs_low0; + res11 += lhs_high * rhs_low1; + + lhs_ptr -= rhs_cols - 1; + + lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + + spillover00 += lhs_low * rhs_high0; + spillover01 += lhs_low * rhs_high1; + + spillover10 += lhs_high * rhs_high0; + spillover11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + ++lhs_ptr; + } + else + { + lhs_ptr -= rhs_cols; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[2] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr[2] = (int8_t)res11; + dst_ptr -= rhs_rows; + + // Start processing rhs_row[1] and rhs_row[3] + res00 = spillover00; + res01 = spillover01; + res10 = spillover10; + res11 = spillover11; + if (bias) + { + res00 += bias[rhs_rows_idx + 1]; + res01 += bias[rhs_rows_idx + 3]; + res10 += bias[rhs_rows_idx + 1]; + res11 += bias[rhs_rows_idx + 3]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + + res10 += lhs_low * rhs_low0; + res10 += lhs_high * rhs_high0; + res11 += lhs_low * rhs_low1; + res11 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[1] = (int8_t)res00; + dst_ptr[3] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[1] = (int8_t)res10; + dst_ptr[3] = (int8_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + } + + // Left-over row + if (lhs_rows % 2) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + + int32_t res00 = 0; + int32_t res01 = 0; + + int32_t spillover00 = 0; + int32_t spillover01 = 0; + + if (bias) + { + res00 += bias[rhs_rows_idx]; + res01 += bias[rhs_rows_idx + 2]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + if (rhs_cols % 2) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_ptr -= rhs_cols - 1; + int32_t lhs_high = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res01 += lhs_low * rhs_low1; + spillover00 = lhs_high * rhs_high0; + spillover01 = lhs_high * rhs_high1; + + ++packed_rhs_ptr; + ++lhs_ptr; + } + else + { + lhs_ptr -= rhs_cols; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 2], dst_shifts[rhs_rows_idx + 2]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[2] = (int8_t)res01; + + res00 = spillover00; + res01 = spillover01; + + if (bias) + { + res00 += bias[rhs_rows_idx + 1]; + res01 += bias[rhs_rows_idx + 3]; + } + + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low0 = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high0 = packed_rhs_ptr[0] >> 4; + + int8_t rhs_low1 = (int8_t)(packed_rhs_ptr[rhs_cols] << 4) >> 4; + int8_t rhs_high1 = packed_rhs_ptr[rhs_cols] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low0; + res00 += lhs_high * rhs_high0; + + res01 += lhs_low * rhs_low1; + res01 += lhs_high * rhs_high1; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 3], dst_shifts[rhs_rows_idx + 3]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[1] = (int8_t)res00; + dst_ptr[3] = (int8_t)res01; + } + + packed_rhs += 2 * rhs_cols; + dst += 4; + } + + int32_t spillover00 = 0; + const int32_t rhs_rows_finished = rhs_rows - (rhs_rows % 4); + // Left over rhs rows will be in the range 0 -> 3 + for (int rhs_rows_idx = 0; rhs_rows_idx < rhs_rows % 4; ++rhs_rows_idx) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = (lhs_rows >> 1); lhs_rows_idx > 0; --lhs_rows_idx) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + int32_t res00 = 0; + int32_t res10 = 0; + if (bias) + { + res00 = bias[rhs_rows_finished + rhs_rows_idx]; + res10 = bias[rhs_rows_finished + rhs_rows_idx]; + } + // Since there can only be 3 rhs rows here we only need treat rhs_row_idx[1] + // differently by dealing with the leftover column from rhs_row_idx[0] + if (rhs_cols % 2 && rhs_rows_idx == 1) + { + int32_t lhs_low = lhs_ptr[0] + lhs_offset; + res00 += lhs_low * spillover00; + + lhs_low = lhs_ptr[lhs_cols_offset] + lhs_offset; + res10 += lhs_low * spillover00; + + ++lhs_ptr; + } + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high = packed_rhs_ptr[0] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low; + res00 += lhs_high * rhs_high; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + lhs_high = (int8_t)lhs_ptr[lhs_cols_offset + 1] + lhs_offset; + + res10 += lhs_low * rhs_low; + res10 += lhs_high * rhs_high; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + if (rhs_cols % 2 && !(rhs_rows_idx % 2)) + { + int8_t rhs_low = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_low * rhs_low; + + lhs_low = (int8_t)lhs_ptr[lhs_cols_offset] + lhs_offset; + + res10 += lhs_low * rhs_low; + + ++lhs_ptr; + } + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + // Quantize down + res00 = arm_nn_requantize( + res00, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + res10 = arm_nn_requantize( + res10, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + + // Add offset + res00 += dst_offset; + res10 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr += rhs_rows; + } + if (lhs_rows % 2) + { + const int8_t *packed_rhs_ptr = &packed_rhs[0]; + int32_t res00 = 0; + if (bias) + { + res00 = bias[rhs_rows_finished + rhs_rows_idx]; + } + // Since there can only be 3 rhs rows here we only need treat rhs_row_idx[1] + // differently by dealing with the leftover column from rhs_row_idx[0] + if (rhs_cols % 2 && rhs_rows_idx == 1) + { + int32_t lhs_low = lhs_ptr[0] + lhs_offset; + res00 += lhs_low * spillover00; + + ++lhs_ptr; + } + for (int32_t rhs_cols_idx = rhs_cols_int4; rhs_cols_idx != 0; --rhs_cols_idx) + { + int8_t rhs_low = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + int8_t rhs_high = packed_rhs_ptr[0] >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res00 += lhs_low * rhs_low; + res00 += lhs_high * rhs_high; + + ++packed_rhs_ptr; + lhs_ptr += 2; + } + if (rhs_cols % 2 && (rhs_rows_idx != 1)) + { + int8_t rhs_low = (int8_t)(packed_rhs_ptr[0] << 4) >> 4; + + int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + res00 += lhs_low * rhs_low; + + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize( + res00, dst_multipliers[rhs_rows_finished + rhs_rows_idx], dst_shifts[rhs_rows_finished + rhs_rows_idx]); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + dst_ptr[0] = (int8_t)res00; + } + if (rhs_cols % 2 && !(rhs_rows_idx % 2)) + { + spillover00 = packed_rhs[rhs_cols_int4] >> 4; + packed_rhs += rhs_cols_int4 + (rhs_cols & 0x1); + } + else + { + spillover00 = 0; + packed_rhs += rhs_cols_int4; + } + + ++dst; + } + +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c new file mode 100644 index 0000000..851691b --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c @@ -0,0 +1,1117 @@ +/* + * SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_s8_nt_t_s8 + * Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed + * + * $Date: 22 March 2023 + * $Revision: V.2.1.2 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +// Uncomment this to try the experimental dual core support on the RP2040. +#define TF_LITE_PICO_MULTICORE + +#ifdef TF_LITE_PICO_MULTICORE + +#include "pico/stdlib.h" +#include "pico/multicore.h" + +typedef struct { + int32_t rhs_rows_start; + int32_t rhs_rows_end; + const int8_t *lhs; + const int8_t *rhs; + const int32_t *bias; + int8_t* dst; + const int32_t *dst_multipliers; + const int32_t *dst_shifts; + int32_t lhs_rows; + int32_t rhs_rows; + int32_t rhs_cols; + int32_t lhs_offset; + int32_t dst_offset; + int32_t activation_min; + int32_t activation_max; + int32_t lhs_cols_offset; +} MatMulArgs; + +static MatMulArgs g_core1_mat_mul_args; + +static void calculate_two_rows( + const int8_t *lhs, + const int8_t *rhs, + const int32_t *bias, + int8_t* dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t lhs_cols_offset, + const int32_t rhs_rows_idx) { + + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + int32_t lhs_offset_contribution0 = 0; + int32_t lhs_offset_contribution1 = 0; + + for (int32_t x = 0; x < rhs_cols; ++x) + { + lhs_offset_contribution0 += rhs[x]; + lhs_offset_contribution1 += rhs[x + rhs_cols]; + } + + lhs_offset_contribution0 *= lhs_offset; + lhs_offset_contribution1 *= lhs_offset; + if (bias) + { + lhs_offset_contribution0 += bias[rhs_rows_idx]; + lhs_offset_contribution1 += bias[rhs_rows_idx + 1]; + } + + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = lhs_offset_contribution0; + int32_t res01 = lhs_offset_contribution1; + int32_t res10 = lhs_offset_contribution0; + int32_t res11 = lhs_offset_contribution1; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int8_t rhs_value0 = rhs_ptr[0]; + int8_t rhs_value1 = rhs_ptr[rhs_cols]; + int8_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + lhs_value = lhs_ptr[lhs_cols_offset]; + res10 += lhs_value * rhs_value0; + res11 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[1] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr[1] = (int8_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + lhs_rows_idx--; + } + + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = lhs_offset_contribution0; + int32_t res01 = lhs_offset_contribution1; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int8_t rhs_value0 = rhs_ptr[0]; + int8_t rhs_value1 = rhs_ptr[rhs_cols]; + int8_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[1] = (int8_t)res01; + } +} + +static void calculate_row_range( + int32_t rhs_rows_start, + int32_t rhs_rows_end, + const int8_t *lhs, + const int8_t *rhs, + const int32_t *bias, + int8_t* dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t lhs_cols_offset) { + + const int8_t* current_rhs = rhs + (rhs_rows_start * rhs_cols); + int8_t* current_dst = dst + rhs_rows_start; + + for (int32_t rhs_rows_idx = rhs_rows_start; rhs_rows_idx < rhs_rows_end; rhs_rows_idx += 2) + { + calculate_two_rows( + lhs, + current_rhs, + bias, + current_dst, + dst_multipliers, + dst_shifts, + lhs_rows, + rhs_rows, + rhs_cols, + lhs_offset, + dst_offset, + activation_min, + activation_max, + lhs_cols_offset, + rhs_rows_idx); + + current_rhs += 2 * rhs_cols; + current_dst += 2; + } + +} + +static void mat_mul_task(const MatMulArgs* args) { + const int32_t rhs_rows_start = args->rhs_rows_start; + const int32_t rhs_rows_end = args->rhs_rows_end; + const int8_t *lhs = args->lhs; + const int8_t *rhs = args->rhs; + const int32_t *bias = args->bias; + int8_t* dst = args->dst; + const int32_t *dst_multipliers = args->dst_multipliers; + const int32_t *dst_shifts = args->dst_shifts; + int32_t lhs_rows = args->lhs_rows; + int32_t rhs_rows = args->rhs_rows; + int32_t rhs_cols = args->rhs_cols; + int32_t lhs_offset = args->lhs_offset; + int32_t dst_offset = args->dst_offset; + int32_t activation_min = args->activation_min; + int32_t activation_max = args->activation_max; + int32_t lhs_cols_offset = args->lhs_cols_offset; + + calculate_row_range( + rhs_rows_start, + rhs_rows_end, + lhs, + rhs, + bias, + dst, + dst_multipliers, + dst_shifts, + lhs_rows, + rhs_rows, + rhs_cols, + lhs_offset, + dst_offset, + activation_min, + activation_max, + lhs_cols_offset); +} + +static void core1_mat_mul_worker(void) { + mat_mul_task(&g_core1_mat_mul_args); + + // Signal we're done by pushing a result of zero. + multicore_fifo_push_blocking(ARM_CMSIS_NN_SUCCESS); +} + +#endif // TF_LITE_PICO_MULTICORE + +/* + * s8 matrix multiplication with the right-hand-side matrix transposed + * + * Refer header file for details. + * + */ + + + +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t *bias, + int8_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max, + const int32_t row_address_offset, + const int32_t lhs_cols_offset) +{ + +#if defined(ARM_MATH_MVEI) + int i_items = 0; + for (; i_items <= (lhs_rows - 4); i_items += 4) + { + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + int32_t acc_n3 = 0; + + const int8_t *lhs_vec = lhs; + const int8_t *ip_row_1 = lhs + lhs_cols_offset; + const int8_t *ip_row_2 = lhs + (2 * lhs_cols_offset); + const int8_t *ip_row_3 = lhs + (3 * lhs_cols_offset); + const int8_t *col_base = rhs + i * rhs_cols; + int32_t sum_tmp = 0; + + #if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < rhs_cols; j++) + { + int32_t col = col_base[j]; + sum_tmp += col; + acc_n0 += lhs_vec[j] * col; + acc_n1 += ip_row_1[j] * col; + acc_n2 += ip_row_2[j] * col; + acc_n3 += ip_row_3[j] * col; + } + #else + // Note: If operand initialization is moved around, use '&' constraint to + // specify earlyclobber operands. + __ASM volatile(" .p2align 2 \n" + " wlstp.8 lr, %[cnt], 1f \n" + " mov %[sum], 0 \n" + " mov %[out0], 0 \n" + " mov %[out1], 0 \n" + " mov %[out2], 0 \n" + " mov %[out3], 0 \n" + " vldrb.8 q0, [%[col]], #16 \n" + "2: \n" + " vaddva.s8 %[sum], q0 \n" + " vldrb.8 q1, [%[row0]], #16 \n" + " vmladava.s8 %[out0], q0, q1 \n" + " vldrb.8 q2, [%[row1]], #16 \n" + " vmladava.s8 %[out1], q0, q2 \n" + " vldrb.8 q3, [%[row2]], #16 \n" + " vmladava.s8 %[out2], q0, q3 \n" + " vldrb.8 q4, [%[row3]], #16 \n" + " vmladava.s8 %[out3], q0, q4 \n" + " vldrb.8 q0, [%[col]], #16 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+r"(col_base), + [sum] "=Te"(sum_tmp), + [row0] "+r"(lhs_vec), + [row1] "+r"(ip_row_1), + [row2] "+r"(ip_row_2), + [row3] "+r"(ip_row_3), + [out0] "=Te"(acc_n0), + [out1] "=Te"(acc_n1), + [out2] "=Te"(acc_n2), + [out3] "=Te"(acc_n3) + : [cnt] "r"(rhs_cols) + : "q0", "q1", "q2", "q3", "q4", "memory", "r14"); + #endif + int32x4_t res = {acc_n0, acc_n1, acc_n2, acc_n3}; + sum_tmp *= lhs_offset; + if (bias) + { + sum_tmp += bias[i]; + } + res = vaddq_n_s32(res, sum_tmp); + + res = arm_requantize_mve(res, dst_multipliers[i], dst_shifts[i]); + res = vaddq_n_s32(res, dst_offset); + + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + + const uint32x4_t scatter_offset = {0, (uint32_t)rhs_rows, (uint32_t)rhs_rows * 2, (uint32_t)rhs_rows * 3}; + vstrbq_scatter_offset_s32(dst, scatter_offset, res); + dst++; + } + lhs += 4 * lhs_cols_offset; + dst += (3 * rhs_rows); + } + + for (; i_items < lhs_rows; i_items++) + { + int32_t acc[4]; + const int32_t *multipliers = dst_multipliers; + const int32_t *shifts = dst_shifts; + for (int i = 0; i < rhs_rows; i++) + { + int32_t acc_n0 = 0; + const int8_t *lhs_vec = lhs; + const int8_t *col_base = rhs + i * rhs_cols; + int32_t sum_tmp = 0; + + #if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < rhs_cols; j++) + { + int32_t col = col_base[j]; + sum_tmp += col; + acc_n0 += lhs_vec[j] * col; + } + #else + __ASM volatile(" .p2align 2 \n" + " wlstp.8 lr, %[cnt], 1f \n" + " mov %[sum], 0 \n" + " mov %[out0], 0 \n" + " vldrb.8 q0, [%[col]], #16 \n" + "2: \n" + " vaddva.s8 %[sum], q0 \n" + " vldrb.8 q1, [%[row0]], #16 \n" + " vmladava.s8 %[out0], q0, q1 \n" + " vldrb.8 q0, [%[col]], #16 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+r"(col_base), [sum] "=Te"(sum_tmp), [row0] "+r"(lhs_vec), [out0] "=Te"(acc_n0) + : [cnt] "r"(rhs_cols) + : "q0", "q1", "memory", "r14"); + #endif + sum_tmp *= lhs_offset; + sum_tmp += acc_n0; + if (bias) + { + sum_tmp += bias[i]; + } + const int32_t index = i & 0x3; + acc[index] = sum_tmp; + + if (index == 3) + { + int32x4_t res = vldrwq_s32(acc); + res = arm_requantize_mve_32x4(res, vldrwq_s32(multipliers), vldrwq_s32(shifts)); + multipliers += 4; + shifts += 4; + res = vaddq_n_s32(res, dst_offset); + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + vstrbq_s32(dst, res); + dst += 4; + } + } + lhs += lhs_cols_offset; + const int32_t tail_rows = rhs_rows & 0x3; + for (int i = 0; i < tail_rows; i++) + { + int32_t acc_n0 = acc[i]; + acc_n0 = arm_nn_requantize(acc_n0, multipliers[i], shifts[i]); + acc_n0 += dst_offset; + acc_n0 = MAX(acc_n0, activation_min); + acc_n0 = MIN(acc_n0, activation_max); + *dst++ = (int8_t)acc_n0; + } + } + +#elif defined(ARM_MATH_DSP) + const int32_t rhs_off0 = rhs_cols - 4; + const int32_t lhs_off0 = lhs_cols_offset - 4; + + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + int32_t lhs_offset_contribution0 = 0; + int32_t lhs_offset_contribution1 = 0; + + for (int32_t x = 0; x < rhs_cols; ++x) + { + lhs_offset_contribution0 += rhs[x]; + lhs_offset_contribution1 += rhs[x + rhs_cols]; + } + + lhs_offset_contribution0 *= lhs_offset; + lhs_offset_contribution1 *= lhs_offset; + if (bias) + { + lhs_offset_contribution0 += bias[rhs_rows_idx]; + lhs_offset_contribution1 += bias[rhs_rows_idx + 1]; + } + + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = lhs_offset_contribution0; + int32_t res01 = lhs_offset_contribution1; + int32_t res10 = lhs_offset_contribution0; + int32_t res11 = lhs_offset_contribution1; + + int32_t rhs_cols_idx = 0; + + int32_t val0, val1, val2, val3, val4, val5; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + val1 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + val2 = SXTB16(val1); + val0 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val4 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val1 = SXTB16_RORn(val1, 8); + val0 = SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val3, val2, res00); + val5 = SXTB16(val4); + res00 = SMLAD(val0, val1, res00); + val4 = SXTB16_RORn(val4, 8); + res01 = SMLAD(val3, val5, res01); + res01 = SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_off0]); + val3 = SXTB16(val0); + val0 = SXTB16_RORn(val0, 8); + res10 = SMLAD(val3, val2, res10); + res11 = SMLAD(val3, val5, res11); + res10 = SMLAD(val0, val1, res10); + val1 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + res11 = SMLAD(val0, val4, res11); + + val4 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val2 = SXTB16(val1); + val0 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val1 = SXTB16_RORn(val1, 8); + val0 = SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val3, val2, res00); + val5 = SXTB16(val4); + res00 = SMLAD(val0, val1, res00); + val4 = SXTB16_RORn(val4, 8); + res01 = SMLAD(val3, val5, res01); + res01 = SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_off0]); + val3 = SXTB16(val0); + val0 = SXTB16_RORn(val0, 8); + res10 = SMLAD(val3, val2, res10); + res11 = SMLAD(val3, val5, res11); + res10 = SMLAD(val0, val1, res10); + val1 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + res11 = SMLAD(val0, val4, res11); + + val4 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val2 = SXTB16(val1); + val0 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val1 = SXTB16_RORn(val1, 8); + val0 = SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val3, val2, res00); + val5 = SXTB16(val4); + res00 = SMLAD(val0, val1, res00); + val4 = SXTB16_RORn(val4, 8); + res01 = SMLAD(val3, val5, res01); + res01 = SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_off0]); + val3 = SXTB16(val0); + val0 = SXTB16_RORn(val0, 8); + res10 = SMLAD(val3, val2, res10); + res11 = SMLAD(val3, val5, res11); + res10 = SMLAD(val0, val1, res10); + val1 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + res11 = SMLAD(val0, val4, res11); + + val4 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val2 = SXTB16(val1); + val0 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val1 = SXTB16_RORn(val1, 8); + val0 = SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val3, val2, res00); + val5 = SXTB16(val4); + res00 = SMLAD(val0, val1, res00); + val4 = SXTB16_RORn(val4, 8); + res01 = SMLAD(val3, val5, res01); + res01 = SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_off0]); + val3 = SXTB16(val0); + val0 = SXTB16_RORn(val0, 8); + res10 = SMLAD(val3, val2, res10); + res11 = SMLAD(val3, val5, res11); + res10 = SMLAD(val0, val1, res10); + res11 = SMLAD(val0, val4, res11); + } + + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + val1 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + val2 = SXTB16(val1); + val0 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val4 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val1 = SXTB16_RORn(val1, 8); + val0 = SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val3, val2, res00); + val5 = SXTB16(val4); + res00 = SMLAD(val0, val1, res00); + val4 = SXTB16_RORn(val4, 8); + res01 = SMLAD(val3, val5, res01); + res01 = SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_s8x4((const int8_t *)&lhs_ptr[lhs_off0]); + val3 = SXTB16(val0); + val0 = SXTB16_RORn(val0, 8); + res10 = SMLAD(val3, val2, res10); + res11 = SMLAD(val3, val5, res11); + res10 = SMLAD(val0, val1, res10); + res11 = SMLAD(val0, val4, res11); + } + + for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int8_t rhs_value0 = rhs_ptr[0]; + int8_t rhs_value1 = rhs_ptr[rhs_cols]; + int8_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + lhs_value = lhs_ptr[lhs_cols_offset]; + res10 += lhs_value * rhs_value0; + res11 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[1] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr[1] = (int8_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + lhs_rows_idx--; + } + + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = lhs_offset_contribution0; + int32_t res01 = lhs_offset_contribution1; + + int32_t rhs_cols_idx = 0; + + int32_t val0, val1, val2, val3, val4, val5; + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + val0 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + val1 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val2 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val5 = SXTB16(val2); + val4 = SXTB16(val1); + val0 = SXTB16_RORn(val0, 8); + val2 = SXTB16_RORn(val2, 8); + val1 = SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val5, val3, res00); + res00 = SMLAD(val2, val0, res00); + res01 = SMLAD(val5, val4, res01); + res01 = SMLAD(val2, val1, res01); + + val0 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + val1 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val2 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val5 = SXTB16(val2); + val4 = SXTB16(val1); + val0 = SXTB16_RORn(val0, 8); + val2 = SXTB16_RORn(val2, 8); + val1 = SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val5, val3, res00); + res00 = SMLAD(val2, val0, res00); + res01 = SMLAD(val5, val4, res01); + res01 = SMLAD(val2, val1, res01); + + val0 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + val1 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val2 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val5 = SXTB16(val2); + val4 = SXTB16(val1); + val0 = SXTB16_RORn(val0, 8); + val2 = SXTB16_RORn(val2, 8); + val1 = SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val5, val3, res00); + res00 = SMLAD(val2, val0, res00); + res01 = SMLAD(val5, val4, res01); + res01 = SMLAD(val2, val1, res01); + + val0 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + val1 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val2 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val5 = SXTB16(val2); + val4 = SXTB16(val1); + val0 = SXTB16_RORn(val0, 8); + val2 = SXTB16_RORn(val2, 8); + val1 = SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val5, val3, res00); + res00 = SMLAD(val2, val0, res00); + res01 = SMLAD(val5, val4, res01); + res01 = SMLAD(val2, val1, res01); + } + + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + val0 = arm_nn_read_s8x4_ia((const int8_t **)&rhs_ptr); + val1 = arm_nn_read_s8x4((const int8_t *)&rhs_ptr[rhs_off0]); + val2 = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + val3 = SXTB16(val0); + val5 = SXTB16(val2); + val4 = SXTB16(val1); + val0 = SXTB16_RORn(val0, 8); + val2 = SXTB16_RORn(val2, 8); + val1 = SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = SMLAD(val5, val3, res00); + res00 = SMLAD(val2, val0, res00); + res01 = SMLAD(val5, val4, res01); + res01 = SMLAD(val2, val1, res01); + } + + // Left-over accumulations + for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int8_t rhs_value0 = rhs_ptr[0]; + int8_t rhs_value1 = rhs_ptr[rhs_cols]; + int8_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[1] = (int8_t)res01; + } + + rhs += 2 * rhs_cols; + dst += 2; + } + + if (rhs_rows % 2) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + int32_t res00 = 0; + if (bias) + { + res00 = bias[rhs_rows - 1]; + } + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int32_t rhs_value = rhs_ptr[0]; + int32_t lhs_value = lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value; + + ++rhs_ptr; + ++lhs_ptr; + } + lhs_ptr -= rhs_cols; + lhs_ptr += lhs_cols_offset; + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows - 1], dst_shifts[rhs_rows - 1]); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr += rhs_rows; + } + } +#else + +#if defined(TF_LITE_PICO_MULTICORE) + + const int32_t mid_range = (rhs_rows / 4) * 2; + + MatMulArgs shared_args; + shared_args.lhs = lhs; + shared_args.rhs = rhs; + shared_args.bias = bias; + shared_args.dst = dst; + shared_args.dst_multipliers = dst_multipliers; + shared_args.dst_shifts = dst_shifts; + shared_args.lhs_rows = lhs_rows; + shared_args.rhs_rows = rhs_rows; + shared_args.rhs_cols = rhs_cols; + shared_args.lhs_offset = lhs_offset; + shared_args.dst_offset = dst_offset; + shared_args.activation_min = activation_min; + shared_args.activation_max = activation_max; + shared_args.lhs_cols_offset = lhs_cols_offset; + + MatMulArgs core0_args = shared_args; + core0_args.rhs_rows_start = 0; + core0_args.rhs_rows_end = mid_range; + + MatMulArgs core1_args = shared_args; + core1_args.rhs_rows_start = mid_range; + core1_args.rhs_rows_end = rhs_rows - 1; + + // Start the second core working. + g_core1_mat_mul_args = core1_args; + + multicore_reset_core1(); + multicore_launch_core1(core1_mat_mul_worker); + + // Do the processing on the first core. + mat_mul_task(&core0_args); + + // A result of ARM_CMSIS_NN_SUCCESS means success. Blocks until core 1 is + // done. + const uint32_t core1_result = multicore_fifo_pop_blocking(); + if (core1_result != ARM_CMSIS_NN_SUCCESS) { + return core1_result; + } + + const int32_t rows_processed = (rhs_rows / 2) * 2; + const int8_t* new_rhs = rhs + (rows_processed * rhs_cols); + const int8_t* new_dst = dst + rows_processed; + + rhs = new_rhs; + dst = new_dst; + + +#else + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + int32_t lhs_offset_contribution0 = 0; + int32_t lhs_offset_contribution1 = 0; + + for (int32_t x = 0; x < rhs_cols; ++x) + { + lhs_offset_contribution0 += rhs[x]; + lhs_offset_contribution1 += rhs[x + rhs_cols]; + } + + lhs_offset_contribution0 *= lhs_offset; + lhs_offset_contribution1 *= lhs_offset; + if (bias) + { + lhs_offset_contribution0 += bias[rhs_rows_idx]; + lhs_offset_contribution1 += bias[rhs_rows_idx + 1]; + } + + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = lhs_offset_contribution0; + int32_t res01 = lhs_offset_contribution1; + int32_t res10 = lhs_offset_contribution0; + int32_t res11 = lhs_offset_contribution1; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int8_t rhs_value0 = rhs_ptr[0]; + int8_t rhs_value1 = rhs_ptr[rhs_cols]; + int8_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + lhs_value = lhs_ptr[lhs_cols_offset]; + res10 += lhs_value * rhs_value0; + res11 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[1] = (int8_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (int8_t)res10; + dst_ptr[1] = (int8_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr -= rhs_cols; + lhs_ptr += 2 * lhs_cols_offset; + + lhs_rows_idx--; + } + + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = lhs_offset_contribution0; + int32_t res01 = lhs_offset_contribution1; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int8_t rhs_value0 = rhs_ptr[0]; + int8_t rhs_value1 = rhs_ptr[rhs_cols]; + int8_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr[1] = (int8_t)res01; + } + + rhs += 2 * rhs_cols; + dst += 2; + } +#endif + + if (rhs_rows % 2) + { + const int8_t *lhs_ptr = &lhs[0]; + int8_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + int32_t res00 = 0; + if (bias) + { + res00 = bias[rhs_rows - 1]; + } + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int32_t rhs_value = rhs_ptr[0]; + int32_t lhs_value = lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value; + + ++rhs_ptr; + ++lhs_ptr; + } + lhs_ptr -= rhs_cols; + lhs_ptr += lhs_cols_offset; + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows - 1], dst_shifts[rhs_rows - 1]); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + dst_ptr[0] = (int8_t)res00; + dst_ptr += rhs_rows; + } + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8_s32.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8_s32.c new file mode 100644 index 0000000..bcb7811 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8_s32.c @@ -0,0 +1,470 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_nt_t_s8_s32 + * Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed + * + * $Date: 31 January 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * s32 matrix multiplication with the right-hand-side matrix transposed + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s8_s32(const int8_t *lhs, + const int8_t *rhs, + int32_t *dst, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_idx_offset) +{ + int32_t rhs_rows_idx = rhs_rows; + const int32_t dst_idx_col_offset = dst_idx_offset * rhs_cols; +#if defined(ARM_MATH_MVEI) + for (; rhs_rows_idx >= 16; rhs_rows_idx -= 16) + { + int32_t *dst_ptr = &dst[0]; + const int8_t *lhs_ptr = &lhs[0]; + int32_t lhs_rows_idx = lhs_rows; + + for (; lhs_rows_idx >= 4; lhs_rows_idx -= 4) + { + const int8_t *rhs_ptr = &rhs[0]; + int8x16_t v_lhs0 = vldrbq_s8(lhs_ptr); + lhs_ptr += rhs_rows; + int8x16_t v_lhs1 = vldrbq_s8(lhs_ptr); + lhs_ptr += rhs_rows; + int8x16_t v_lhs2 = vldrbq_s8(lhs_ptr); + lhs_ptr += rhs_rows; + int8x16_t v_lhs3 = vldrbq_s8(lhs_ptr); + lhs_ptr += rhs_rows; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int32_t *ip_dst = dst_ptr; + + int8x16_t v_rhs0 = vldrbq_s8(rhs_ptr); + int32_t rhs_sum = vaddvq_s8(v_rhs0); + rhs_sum *= lhs_offset; + + *ip_dst += rhs_sum; + *ip_dst = vmladavaq_s8(*ip_dst, v_lhs0, v_rhs0); + ip_dst += dst_idx_col_offset; + + *ip_dst += rhs_sum; + *ip_dst = vmladavaq_s8(*ip_dst, v_lhs1, v_rhs0); + ip_dst += dst_idx_col_offset; + + *ip_dst += rhs_sum; + *ip_dst = vmladavaq_s8(*ip_dst, v_lhs2, v_rhs0); + ip_dst += dst_idx_col_offset; + + *ip_dst += rhs_sum; + *ip_dst = vmladavaq_s8(*ip_dst, v_lhs3, v_rhs0); + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + + dst_ptr += 3 * dst_idx_col_offset; + } + for (; lhs_rows_idx > 0; lhs_rows_idx--) + { + const int8_t *rhs_ptr = &rhs[0]; + int8x16_t v_lhs0 = vldrbq_s8(lhs_ptr); + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int8x16_t v_rhs0 = vldrbq_s8(rhs_ptr); + + int32_t offset_sum = vaddvq_s8(v_rhs0); + *dst_ptr += offset_sum * lhs_offset; + + *dst_ptr = vmladavaq_s8(*dst_ptr, v_lhs0, v_rhs0); + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + lhs_ptr += rhs_rows; + } + + rhs += 16; + lhs += 16; + } + if (rhs_rows_idx) + { + mve_pred16_t rmdr = (1 << rhs_rows_idx) - 1; + int32_t *dst_ptr = &dst[0]; + const int8_t *lhs_ptr = &lhs[0]; + int32_t lhs_rows_idx = lhs_rows; + + for (; lhs_rows_idx >= 4; lhs_rows_idx -= 4) + { + const int8_t *rhs_ptr = &rhs[0]; + int8x16_t v_lhs0 = vldrbq_z_s8(lhs_ptr, rmdr); + lhs_ptr += rhs_rows; + int8x16_t v_lhs1 = vldrbq_z_s8(lhs_ptr, rmdr); + lhs_ptr += rhs_rows; + int8x16_t v_lhs2 = vldrbq_z_s8(lhs_ptr, rmdr); + lhs_ptr += rhs_rows; + int8x16_t v_lhs3 = vldrbq_z_s8(lhs_ptr, rmdr); + lhs_ptr += rhs_rows; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int32_t *ip_dst = dst_ptr; + int8x16_t v_rhs0 = vldrbq_z_s8(rhs_ptr, rmdr); + + int32_t rhs_sum = vaddvq_p_s8(v_rhs0, rmdr); + rhs_sum *= lhs_offset; + + *ip_dst += rhs_sum; + *ip_dst = vmladavaq_p_s8(*ip_dst, v_lhs0, v_rhs0, rmdr); + ip_dst += dst_idx_col_offset; + + *ip_dst += rhs_sum; + *ip_dst = vmladavaq_p_s8(*ip_dst, v_lhs1, v_rhs0, rmdr); + ip_dst += dst_idx_col_offset; + + *ip_dst += rhs_sum; + *ip_dst = vmladavaq_p_s8(*ip_dst, v_lhs2, v_rhs0, rmdr); + ip_dst += dst_idx_col_offset; + + *ip_dst += rhs_sum; + *ip_dst = vmladavaq_p_s8(*ip_dst, v_lhs3, v_rhs0, rmdr); + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + + dst_ptr += 3 * dst_idx_col_offset; + } + for (; lhs_rows_idx > 0; lhs_rows_idx--) + { + const int8_t *rhs_ptr = &rhs[0]; + int8x16_t v_lhs0 = vldrbq_z_s8(lhs_ptr, rmdr); + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int8x16_t v_rhs0 = vldrbq_z_s8(rhs_ptr, rmdr); + + int32_t rhs_sum = vaddvq_p_s8(v_rhs0, rmdr); + *dst_ptr += rhs_sum * lhs_offset; + + *dst_ptr = vmladavaq_p_s8(*dst_ptr, v_lhs0, v_rhs0, rmdr); + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + lhs_ptr += rhs_rows; + } + } + +#elif defined(ARM_MATH_DSP) + int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + for (; rhs_rows_idx >= 8; rhs_rows_idx -= 8) + { + int32_t *dst_ptr = &dst[0]; + const int8_t *lhs_ptr = &lhs[0]; + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t lhs000, lhs001, lhs010, lhs011, lhs100, lhs101, lhs110, lhs111; + read_pad_and_add_s8(lhs_ptr, &lhs000, &lhs001, lhs_offset_s16x2); + read_pad_and_add_s8(&lhs_ptr[4], &lhs010, &lhs011, lhs_offset_s16x2); + read_pad_and_add_s8(&lhs_ptr[rhs_rows], &lhs100, &lhs101, lhs_offset_s16x2); + read_pad_and_add_s8(&lhs_ptr[rhs_rows + 4], &lhs110, &lhs111, lhs_offset_s16x2); + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int32_t rhs_val00, rhs_val01; + read_and_pad(rhs_ptr, &rhs_val00, &rhs_val01); + + dst_ptr[0] = SMLAD(lhs000, rhs_val00, dst_ptr[0]); + dst_ptr[0] = SMLAD(lhs001, rhs_val01, dst_ptr[0]); + dst_ptr[dst_idx_col_offset] = SMLAD(lhs100, rhs_val00, dst_ptr[dst_idx_col_offset]); + dst_ptr[dst_idx_col_offset] = SMLAD(lhs101, rhs_val01, dst_ptr[dst_idx_col_offset]); + + read_and_pad(&rhs_ptr[4], &rhs_val00, &rhs_val01); + + dst_ptr[0] = SMLAD(lhs010, rhs_val00, dst_ptr[0]); + dst_ptr[0] = SMLAD(lhs011, rhs_val01, dst_ptr[0]); + dst_ptr[dst_idx_col_offset] = SMLAD(lhs110, rhs_val00, dst_ptr[dst_idx_col_offset]); + dst_ptr[dst_idx_col_offset] = SMLAD(lhs111, rhs_val01, dst_ptr[dst_idx_col_offset]); + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + dst_ptr += dst_idx_col_offset; + + lhs_ptr += rhs_rows << 1; + + lhs_rows_idx--; + } + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *rhs_ptr = &rhs[0]; + int32_t lhs00, lhs01, lhs10, lhs11; + read_pad_and_add_s8(lhs_ptr, &lhs00, &lhs01, lhs_offset_s16x2); + read_pad_and_add_s8(&lhs_ptr[4], &lhs10, &lhs11, lhs_offset_s16x2); + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int32_t rhs_val00, rhs_val01, rhs_val10, rhs_val11; + read_and_pad(rhs_ptr, &rhs_val00, &rhs_val01); + read_and_pad(&rhs_ptr[4], &rhs_val10, &rhs_val11); + + dst_ptr[0] = SMLAD(lhs00, rhs_val00, dst_ptr[0]); + dst_ptr[0] = SMLAD(lhs01, rhs_val01, dst_ptr[0]); + dst_ptr[0] = SMLAD(lhs10, rhs_val10, dst_ptr[0]); + dst_ptr[0] = SMLAD(lhs11, rhs_val11, dst_ptr[0]); + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + } + + rhs += 8; + lhs += 8; + } + for (; rhs_rows_idx >= 4; rhs_rows_idx -= 4) + { + int32_t *dst_ptr = &dst[0]; + const int8_t *lhs_ptr = &lhs[0]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t lhs00, lhs01, lhs10, lhs11; + read_pad_and_add_s8(lhs_ptr, &lhs00, &lhs01, lhs_offset_s16x2); + read_pad_and_add_s8(&lhs_ptr[rhs_rows], &lhs10, &lhs11, lhs_offset_s16x2); + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int32_t rhs_val0, rhs_val1; + read_and_pad(rhs_ptr, &rhs_val0, &rhs_val1); + + dst_ptr[0] = SMLAD(lhs00, rhs_val0, dst_ptr[0]); + dst_ptr[0] = SMLAD(lhs01, rhs_val1, dst_ptr[0]); + dst_ptr[dst_idx_col_offset] = SMLAD(lhs10, rhs_val0, dst_ptr[dst_idx_col_offset]); + dst_ptr[dst_idx_col_offset] = SMLAD(lhs11, rhs_val1, dst_ptr[dst_idx_col_offset]); + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + dst_ptr += dst_idx_col_offset; + + lhs_ptr += rhs_rows << 1; + + lhs_rows_idx--; + } + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *rhs_ptr = &rhs[0]; + int32_t lhs00, lhs01; + read_pad_and_add_s8(lhs_ptr, &lhs00, &lhs01, lhs_offset_s16x2); + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int32_t rhs_val0, rhs_val1; + read_and_pad(rhs_ptr, &rhs_val0, &rhs_val1); + + dst_ptr[0] = SMLAD(lhs00, rhs_val0, dst_ptr[0]); + dst_ptr[0] = SMLAD(lhs01, rhs_val1, dst_ptr[0]); + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + } + + rhs += 4; + lhs += 4; + } + for (; rhs_rows_idx >= 2; rhs_rows_idx -= 2) + { + int32_t *dst_ptr = &dst[0]; + const int8_t *lhs_ptr = &lhs[0]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + + int32_t lhs0, lhs1; + read_pad_and_add_s8x2(lhs_ptr, &lhs0, lhs_offset_s16x2); + read_pad_and_add_s8x2(&lhs_ptr[rhs_rows], &lhs1, lhs_offset_s16x2); + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + int32_t rhs_val; + read_and_pad_s8x2(rhs_ptr, &rhs_val); + + dst_ptr[0] = SMLAD(lhs0, rhs_val, dst_ptr[0]); + dst_ptr[dst_idx_col_offset] = SMLAD(lhs1, rhs_val, dst_ptr[dst_idx_col_offset]); + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + dst_ptr += dst_idx_col_offset; + + lhs_ptr += rhs_rows << 1; + + lhs_rows_idx--; + } + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *rhs_ptr = &rhs[0]; + const int32_t lhs_value = lhs_ptr[0] + lhs_offset; + const int32_t lhs_value01 = lhs_ptr[1] + lhs_offset; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + const int32_t rhs_value0 = rhs_ptr[0]; + const int32_t rhs_value01 = rhs_ptr[1]; + + dst_ptr[0] += lhs_value * rhs_value0; + dst_ptr[0] += lhs_value01 * rhs_value01; + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + } + + rhs += 2; + lhs += 2; + } +#else + for (; rhs_rows_idx >= 2; rhs_rows_idx -= 2) + { + int32_t *dst_ptr = &dst[0]; + const int8_t *lhs_ptr = &lhs[0]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + + const int32_t lhs_value00 = lhs_ptr[0] + lhs_offset; + const int32_t lhs_value01 = lhs_ptr[1] + lhs_offset; + + const int32_t lhs_value10 = lhs_ptr[rhs_rows] + lhs_offset; + const int32_t lhs_value11 = lhs_ptr[rhs_rows + 1] + lhs_offset; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + const int32_t rhs_value0 = rhs_ptr[0]; + const int32_t rhs_value1 = rhs_ptr[1]; + + dst_ptr[0] += lhs_value00 * rhs_value0; + dst_ptr[0] += lhs_value01 * rhs_value1; + + dst_ptr[dst_idx_col_offset] += lhs_value10 * rhs_value0; + dst_ptr[dst_idx_col_offset] += lhs_value11 * rhs_value1; + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + dst_ptr += dst_idx_col_offset; + + lhs_ptr += rhs_rows << 1; + + lhs_rows_idx--; + } + // Left-over rows + if (lhs_rows % 2) + { + const int8_t *rhs_ptr = &rhs[0]; + const int32_t lhs_value = lhs_ptr[0] + lhs_offset; + const int32_t lhs_value01 = lhs_ptr[1] + lhs_offset; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + const int32_t rhs_value0 = rhs_ptr[0]; + const int32_t rhs_value01 = rhs_ptr[1]; + + dst_ptr[0] += lhs_value * rhs_value0; + dst_ptr[0] += lhs_value01 * rhs_value01; + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + } + + rhs += 2; + lhs += 2; + } +#endif +#if !defined(ARM_MATH_MVEI) + if (rhs_rows_idx) + { + const int8_t *lhs_ptr = &lhs[0]; + int32_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx) + { + const int8_t *rhs_ptr = &rhs[0]; + const int32_t lhs_value = lhs_ptr[0] + lhs_offset; + + for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--) + { + const int32_t rhs_value = rhs_ptr[0]; + + *dst_ptr += lhs_value * rhs_value; + + dst_ptr += dst_idx_offset; + rhs_ptr += rhs_rows; + } + lhs_ptr += rhs_rows; + } + } +#endif + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_transpose_conv_row_s8_s32.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_transpose_conv_row_s8_s32.c new file mode 100644 index 0000000..4cf0832 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_transpose_conv_row_s8_s32.c @@ -0,0 +1,515 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_transpose_conv_row_s8_s32 + * Description: Transpose covolution help function. + * + * $Date: 22 Oct 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConvolution + * @{ + */ + +/* + * Computation of transposed convolution for one row of input into a rolling scratch buffer. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_transpose_conv_row_s8_s32(const int8_t *lhs, + const int8_t *rhs, + int32_t *output_start, + const int32_t output_index, + const int32_t output_max, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t input_channels, + const int32_t output_channels, + const int32_t lhs_offset, + const int32_t row_offset, + const int32_t input_x, + const int32_t stride_x, + const int32_t skip_rows_top, + const int32_t skip_rows_bottom) +{ + + const int32_t skip_pre_rows = skip_rows_top * rhs_cols * input_channels; + const int32_t skip_post_rows = skip_rows_bottom * rhs_cols * input_channels; + const int32_t rhs_rows_count = rhs_rows - skip_rows_top - skip_rows_bottom; + + int32_t input_count = input_x; + for (; input_count > 3; input_count -= 4) + { + const int8_t *rhs_ptr = rhs; + + for (int32_t i_out_channel = 0; i_out_channel < output_channels; i_out_channel++) + { + rhs_ptr += skip_pre_rows; + int32_t index = output_index; + + for (int32_t i_row = 0; i_row < rhs_rows_count; i_row++) + { + int32_t *output_ptr0 = output_start + index; + + for (int32_t i_col = 0; i_col < rhs_cols; i_col++) + { + const int8_t *lhs_ptr0 = lhs; + + int32_t result0 = 0; + int32_t result1 = 0; + int32_t result2 = 0; + int32_t result3 = 0; + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + + for (int32_t channel_count = input_channels; channel_count > 3; channel_count -= 4) + { + const int8_t *lhs_temp = lhs_ptr0; + int32_t lhs00 = arm_nn_read_s8x4(lhs_temp); + int32_t lhs01 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)lhs00, 8); + lhs00 = SXTAB16(lhs_offset_s16x2, lhs00); + lhs_temp += input_channels; + int32_t lhs10 = arm_nn_read_s8x4(lhs_temp); + int32_t lhs11 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)lhs10, 8); + lhs10 = SXTAB16(lhs_offset_s16x2, lhs10); + lhs_temp += input_channels; + int32_t lhs20 = arm_nn_read_s8x4(lhs_temp); + int32_t lhs21 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)lhs20, 8); + lhs20 = SXTAB16(lhs_offset_s16x2, lhs20); + lhs_temp += input_channels; + int32_t lhs30 = arm_nn_read_s8x4(lhs_temp); + int32_t lhs31 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)lhs30, 8); + lhs30 = SXTAB16(lhs_offset_s16x2, lhs30); + lhs_ptr0 += 4; + + int32_t rhs0 = arm_nn_read_s8x4(rhs_ptr); + int32_t rhs1 = SXTB16_RORn((uint32_t)rhs0, 8); + rhs0 = SXTB16(rhs0); + rhs_ptr += 4; + + result0 = SMLAD(lhs00, rhs0, result0); + result0 = SMLAD(lhs01, rhs1, result0); + result1 = SMLAD(lhs10, rhs0, result1); + result1 = SMLAD(lhs11, rhs1, result1); + result2 = SMLAD(lhs20, rhs0, result2); + result2 = SMLAD(lhs21, rhs1, result2); + result3 = SMLAD(lhs30, rhs0, result3); + result3 = SMLAD(lhs31, rhs1, result3); + } + + for (int32_t i = 0; i < (input_channels & 0b11); i++) + { + const int8_t *lhs_temp = lhs_ptr0; + const int32_t lhs_val00 = *lhs_temp + lhs_offset; + lhs_temp += input_channels; + const int32_t lhs_val10 = *lhs_temp + lhs_offset; + lhs_temp += input_channels; + const int32_t lhs_val20 = *lhs_temp + lhs_offset; + lhs_temp += input_channels; + const int32_t lhs_val30 = *lhs_temp + lhs_offset; + lhs_ptr0++; + + const int32_t rhs_val0 = *rhs_ptr++; + + result0 += lhs_val00 * rhs_val0; + result1 += lhs_val10 * rhs_val0; + result2 += lhs_val20 * rhs_val0; + result3 += lhs_val30 * rhs_val0; + } + + int32_t *output_temp = output_ptr0; + *output_ptr0 += result0; + output_temp += stride_x * output_channels; + *output_temp += result1; + output_temp += stride_x * output_channels; + *output_temp += result2; + output_temp += stride_x * output_channels; + *output_temp += result3; + + output_ptr0 += output_channels; +#else + int32_t rhs_sum = 0; + #if defined(ARM_MATH_MVEI) + + int channel_count = input_channels; + for (int channel_i = 0; channel_i < (input_channels + 15) / 16; channel_i++) + { + mve_pred16_t p0 = vctp8q((uint32_t)channel_count); + channel_count -= 16; + + const int8_t *lhs_temp = lhs_ptr0; + int8x16_t v_lhs00 = vldrbq_z_s8(lhs_temp, p0); + lhs_temp += input_channels; + int8x16_t v_lhs10 = vldrbq_z_s8(lhs_temp, p0); + lhs_temp += input_channels; + int8x16_t v_lhs20 = vldrbq_z_s8(lhs_temp, p0); + lhs_temp += input_channels; + int8x16_t v_lhs30 = vldrbq_z_s8(lhs_temp, p0); + + lhs_ptr0 += 16; + int8x16_t v_rhs0 = vldrbq_z_s8(rhs_ptr, p0); + rhs_ptr += 16; + + result0 = vmladavaq_s8(result0, v_lhs00, v_rhs0); + result1 = vmladavaq_s8(result1, v_lhs10, v_rhs0); + result2 = vmladavaq_s8(result2, v_lhs20, v_rhs0); + result3 = vmladavaq_s8(result3, v_lhs30, v_rhs0); + + rhs_sum = vaddvaq_s8(rhs_sum, v_rhs0); + } + + rhs_ptr += channel_count; + + #else + for (int32_t channel_count = 0; channel_count < input_channels / 2; channel_count++) + { + const int8_t *lhs_temp = lhs_ptr0; + const int32_t lhs_val00 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val10 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val20 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val30 = *lhs_temp; + lhs_ptr0++; + + lhs_temp = lhs_ptr0; + const int32_t lhs_val01 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val11 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val21 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val31 = *lhs_temp; + lhs_ptr0++; + + const int32_t rhs_val0 = *rhs_ptr++; + const int32_t rhs_val1 = *rhs_ptr++; + + result0 += lhs_val00 * rhs_val0; + result0 += lhs_val01 * rhs_val1; + + result1 += lhs_val10 * rhs_val0; + result1 += lhs_val11 * rhs_val1; + + result2 += lhs_val20 * rhs_val0; + result2 += lhs_val21 * rhs_val1; + + result3 += lhs_val30 * rhs_val0; + result3 += lhs_val31 * rhs_val1; + + rhs_sum += rhs_val0; + rhs_sum += rhs_val1; + } + + // Input channel tail-handling + if (input_channels & 0b1) + { + const int8_t *lhs_temp = lhs_ptr0; + const int32_t lhs_val00 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val10 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val20 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val30 = *lhs_temp; + lhs_ptr0++; + + const int32_t rhs_val0 = *rhs_ptr++; + + result0 += lhs_val00 * rhs_val0; + result1 += lhs_val10 * rhs_val0; + result2 += lhs_val20 * rhs_val0; + result3 += lhs_val30 * rhs_val0; + + rhs_sum += rhs_val0; + } + #endif + int32_t *output_temp = output_ptr0; + *output_ptr0 += result0 + rhs_sum * lhs_offset; + output_temp += stride_x * output_channels; + *output_temp += result1 + rhs_sum * lhs_offset; + output_temp += stride_x * output_channels; + *output_temp += result2 + rhs_sum * lhs_offset; + output_temp += stride_x * output_channels; + *output_temp += result3 + rhs_sum * lhs_offset; + + output_ptr0 += output_channels; +#endif + } + + // Next row, wrapping around the circular buffer + index = (index + row_offset) % output_max; + } + // Next output_channel + ++output_start; + rhs_ptr += skip_post_rows; + } + + output_start += (4 * stride_x - 1) * output_channels; + lhs += 4 * input_channels; + } + + // Input column tail handling + if (input_count & 0b10) + { + const int8_t *rhs_ptr = rhs; + + for (int32_t i_out_channel = 0; i_out_channel < output_channels; i_out_channel++) + { + int32_t index = output_index; + rhs_ptr += skip_pre_rows; + + for (int32_t i_row = 0; i_row < rhs_rows_count; i_row++) + { + int32_t *output_ptr0 = output_start + index; + + for (int32_t i_col = 0; i_col < rhs_cols; i_col++) + { + const int8_t *lhs_ptr0 = lhs; + + int32_t result0 = 0; + int32_t result1 = 0; + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + + for (int32_t channel_count = input_channels; channel_count > 3; channel_count -= 4) + { + const int8_t *lhs_temp = lhs_ptr0; + int32_t lhs00 = arm_nn_read_s8x4(lhs_temp); + int32_t lhs01 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)lhs00, 8); + lhs00 = SXTAB16(lhs_offset_s16x2, lhs00); + lhs_temp += input_channels; + int32_t lhs10 = arm_nn_read_s8x4(lhs_temp); + int32_t lhs11 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)lhs10, 8); + lhs10 = SXTAB16(lhs_offset_s16x2, lhs10); + lhs_ptr0 += 4; + + int32_t rhs0 = arm_nn_read_s8x4(rhs_ptr); + int32_t rhs1 = SXTB16_RORn((uint32_t)rhs0, 8); + rhs0 = SXTB16(rhs0); + rhs_ptr += 4; + + result0 = SMLAD(lhs00, rhs0, result0); + result0 = SMLAD(lhs01, rhs1, result0); + result1 = SMLAD(lhs10, rhs0, result1); + result1 = SMLAD(lhs11, rhs1, result1); + } + + for (int32_t i = 0; i < (input_channels & 0b11); i++) + { + const int8_t *lhs_temp = lhs_ptr0; + const int32_t lhs_val00 = *lhs_temp + lhs_offset; + lhs_temp += input_channels; + const int32_t lhs_val10 = *lhs_temp + lhs_offset; + lhs_ptr0++; + + const int32_t rhs_val0 = *rhs_ptr++; + + result0 += lhs_val00 * rhs_val0; + result1 += lhs_val10 * rhs_val0; + } + + int32_t *output_temp = output_ptr0; + *output_ptr0 += result0; + output_temp += stride_x * output_channels; + *output_temp += result1; + + output_ptr0 += output_channels; +#else + int32_t rhs_sum = 0; + #if defined(ARM_MATH_MVEI) + int channel_count = input_channels; + for (int channel_i = 0; channel_i < (input_channels + 15) / 16; channel_i++) + { + mve_pred16_t p0 = vctp8q((uint32_t)channel_count); + channel_count -= 16; + + const int8_t *lhs_temp = lhs_ptr0; + int8x16_t v_lhs00 = vldrbq_z_s8(lhs_temp, p0); + lhs_temp += input_channels; + int8x16_t v_lhs10 = vldrbq_z_s8(lhs_temp, p0); + lhs_ptr0 += 16; + int8x16_t v_rhs0 = vldrbq_z_s8(rhs_ptr, p0); + rhs_ptr += 16; + + result0 = vmladavaq_s8(result0, v_lhs00, v_rhs0); + result1 = vmladavaq_s8(result1, v_lhs10, v_rhs0); + + rhs_sum = vaddvaq_s8(rhs_sum, v_rhs0); + } + + rhs_ptr += channel_count; + + #else + for (int32_t channel_count = 0; channel_count < input_channels; channel_count++) + { + const int8_t *lhs_temp = lhs_ptr0; + const int32_t lhs_val00 = *lhs_temp; + lhs_temp += input_channels; + const int32_t lhs_val10 = *lhs_temp; + lhs_ptr0++; + + const int32_t rhs_val0 = *rhs_ptr++; + + result0 += lhs_val00 * rhs_val0; + result1 += lhs_val10 * rhs_val0; + + rhs_sum += rhs_val0; + } + #endif + int32_t *output_temp = output_ptr0; + *output_ptr0 += result0 + rhs_sum * lhs_offset; + output_temp += stride_x * output_channels; + *output_temp += result1 + rhs_sum * lhs_offset; + + output_ptr0 += output_channels; +#endif + } + + // Next row, wrapping around the circular buffer + index = (index + row_offset) % output_max; + } + + // Next output_channel + ++output_start; + rhs_ptr += skip_post_rows; + } + + output_start += (2 * stride_x - 1) * output_channels; + lhs += 2 * input_channels; + } + + if (input_count & 0b1) + { + const int8_t *rhs_ptr = rhs; + + for (int32_t i_out_channel = 0; i_out_channel < output_channels; i_out_channel++) + { + int32_t index = output_index; + rhs_ptr += skip_pre_rows; + + for (int32_t i_row = 0; i_row < rhs_rows_count; i_row++) + { + int32_t *output_ptr0 = output_start + index; + + for (int32_t i_col = 0; i_col < rhs_cols; i_col++) + { + const int8_t *lhs_ptr0 = lhs; + + int32_t result0 = 0; +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + + for (int32_t channel_count = input_channels; channel_count > 3; channel_count -= 4) + { + const int8_t *lhs_temp = lhs_ptr0; + int32_t lhs00 = arm_nn_read_s8x4(lhs_temp); + int32_t lhs01 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)lhs00, 8); + lhs00 = SXTAB16(lhs_offset_s16x2, lhs00); + lhs_ptr0 += 4; + + int32_t rhs0 = arm_nn_read_s8x4(rhs_ptr); + int32_t rhs1 = SXTB16_RORn((uint32_t)rhs0, 8); + rhs0 = SXTB16(rhs0); + rhs_ptr += 4; + + result0 = SMLAD(lhs00, rhs0, result0); + result0 = SMLAD(lhs01, rhs1, result0); + } + + for (int32_t i = 0; i < (input_channels & 0b11); i++) + { + const int8_t *lhs_temp = lhs_ptr0; + const int32_t lhs_val00 = *lhs_temp + lhs_offset; + lhs_ptr0++; + + const int32_t rhs_val0 = *rhs_ptr++; + + result0 += lhs_val00 * rhs_val0; + } +#else + #if defined(ARM_MATH_MVEI) + int channel_count = input_channels; + for (int channel_i = 0; channel_i < (input_channels + 15) / 16; channel_i++) + { + mve_pred16_t p0 = vctp8q((uint32_t)channel_count); + channel_count -= 16; + + int8x16_t v_lhs00 = vldrbq_z_s8(lhs_ptr0, p0); + lhs_ptr0 += 16; + int8x16_t v_rhs0 = vldrbq_z_s8(rhs_ptr, p0); + rhs_ptr += 16; + + result0 = vmladavaq_s8(result0, v_lhs00, v_rhs0); + + int32_t rhs_sum = vaddvaq_s8(0, v_rhs0); + result0 += rhs_sum * lhs_offset; + } + + rhs_ptr += channel_count; + #else + for (int32_t channel_count = 0; channel_count < input_channels; channel_count++) + { + const int32_t lhs_val00 = *lhs_ptr0; + lhs_ptr0++; + + const int32_t rhs_val0 = *rhs_ptr++; + + result0 += (lhs_val00 + lhs_offset) * rhs_val0; + } + #endif +#endif + *output_ptr0 += result0; + output_ptr0 += output_channels; + } + + // Next row, wrapping around the circular buffer + index = (index + row_offset) % output_max; + } + + // Next output_channel + ++output_start; + rhs_ptr += skip_post_rows; + } + } + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mul_result_acc_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mul_result_acc_s16.c new file mode 100644 index 0000000..b028902 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mul_result_acc_s16.c @@ -0,0 +1,362 @@ +/* + * SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mul_result_acc_s16 + * Description: s16 vector by matrix (transposed) multiplication + * + * $Date: 26 March 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportFC + * @{ + */ + +/* + * s16 vector(lhs) by matrix (transposed) multiplication with result accumulation + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mul_result_acc_s16(const int16_t *lhs, + const int8_t *rhs, + const int64_t *effective_bias, + int16_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t batches, + const int32_t batch_offset) +{ + + int32_t reduced_multiplier = REDUCE_MULTIPLIER(dst_multiplier); + + for (int batch = 0; batch < batches; batch++) + { + + const int8_t *rhs_ptr = &rhs[0]; + const int64_t *effective_bias_ptr = &effective_bias[0]; + +#if defined(ARM_MATH_DSP) + + int32_t rhs_cols_fast = rhs_cols; + + if (rhs_cols > MAX_COL_COUNT) + { + rhs_cols_fast = MAX_COL_COUNT; + } + + #if defined(ARM_MATH_MVEI) + int32_t row_loop_cnt = rhs_rows / 4; + const int32_t col_loop_cnt = (rhs_cols_fast + 7) / 8; + + for (int32_t i_row_loop_count = 0; i_row_loop_count < row_loop_cnt; i_row_loop_count++) + { + int32_t col_cnt = rhs_cols_fast; + + const int16_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = rhs_ptr; + const int8_t *rhs_ptr_1 = rhs_ptr + rhs_cols; + const int8_t *rhs_ptr_2 = rhs_ptr + rhs_cols * 2; + const int8_t *rhs_ptr_3 = rhs_ptr + rhs_cols * 3; + + int32_t result_0 = *effective_bias_ptr++; + int32_t result_1 = *effective_bias_ptr++; + int32_t result_2 = *effective_bias_ptr++; + int32_t result_3 = *effective_bias_ptr++; + + for (int i_col_loop_cnt = 0; i_col_loop_cnt < col_loop_cnt; i_col_loop_cnt++) + { + mve_pred16_t pred = vctp16q(col_cnt); + col_cnt -= 8; + + int16x8_t lhs_input = vldrhq_z_s16(lhs_ptr, pred); + + int16x8_t rhs_input_0 = vldrbq_z_s16(rhs_ptr_0, pred); + int16x8_t rhs_input_1 = vldrbq_z_s16(rhs_ptr_1, pred); + int16x8_t rhs_input_2 = vldrbq_z_s16(rhs_ptr_2, pred); + int16x8_t rhs_input_3 = vldrbq_z_s16(rhs_ptr_3, pred); + + result_0 = vmladavaq_s16(result_0, lhs_input, rhs_input_0); + result_1 = vmladavaq_s16(result_1, lhs_input, rhs_input_1); + result_2 = vmladavaq_s16(result_2, lhs_input, rhs_input_2); + result_3 = vmladavaq_s16(result_3, lhs_input, rhs_input_3); + + lhs_ptr += 8; + + rhs_ptr_0 += 8; + rhs_ptr_1 += 8; + rhs_ptr_2 += 8; + rhs_ptr_3 += 8; + } + + int64_t result_64_0 = result_0; + int64_t result_64_1 = result_1; + int64_t result_64_2 = result_2; + int64_t result_64_3 = result_3; + + if (rhs_cols > MAX_COL_COUNT) + { + for (int i_rhs_cols = MAX_COL_COUNT; i_rhs_cols < rhs_cols; i_rhs_cols++) + { + const int16_t lhs_temp = *lhs_ptr++; + + result_64_0 += *rhs_ptr_0++ * lhs_temp; + result_64_1 += *rhs_ptr_1++ * lhs_temp; + result_64_2 += *rhs_ptr_2++ * lhs_temp; + result_64_3 += *rhs_ptr_3++ * lhs_temp; + } + } + + int32_t tmp; + tmp = arm_nn_requantize_s64(result_64_0, reduced_multiplier, dst_shift); + tmp += (int64_t)*dst; + tmp = MAX(tmp, NN_Q15_MIN); + tmp = MIN(tmp, NN_Q15_MAX); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_1, reduced_multiplier, dst_shift); + tmp += (int64_t)*dst; + tmp = MAX(tmp, NN_Q15_MIN); + tmp = MIN(tmp, NN_Q15_MAX); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_2, reduced_multiplier, dst_shift); + tmp += (int64_t)*dst; + tmp = MAX(tmp, NN_Q15_MIN); + tmp = MIN(tmp, NN_Q15_MAX); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_3, reduced_multiplier, dst_shift); + tmp += (int64_t)*dst; + tmp = MAX(tmp, NN_Q15_MIN); + tmp = MIN(tmp, NN_Q15_MAX); + *dst++ = (int16_t)tmp; + + rhs_ptr += 4 * rhs_cols; + } + + for (int8_t rows_left = rhs_rows & 0x3; rows_left > 0; rows_left--) + { + int32_t result = *effective_bias_ptr++; + + const int16_t *lhs_ptr = lhs; + const int8_t *rhs_ptr0 = rhs_ptr; + + int32_t col_cnt = (int32_t)rhs_cols_fast; + + for (int i_col_loop_cnt = 0; i_col_loop_cnt < col_loop_cnt; i_col_loop_cnt++) + { + mve_pred16_t pred = vctp16q(col_cnt); + col_cnt -= 8; + + int16x8_t lhs_input = vldrhq_z_s16(lhs_ptr, pred); + int16x8_t rhs_input = vldrbq_z_s16(rhs_ptr0, pred); + + result = vmladavaq_p_s16(result, lhs_input, rhs_input, pred); + + lhs_ptr += 8; + rhs_ptr0 += 8; + } + + int64_t result_64 = result; + + if (rhs_cols > MAX_COL_COUNT) + { + for (int i_rhs_cols = MAX_COL_COUNT; i_rhs_cols < rhs_cols; i_rhs_cols++) + { + const int16_t lhs_temp = *lhs_ptr++; + + result_64 += *rhs_ptr0++ * lhs_temp; + } + } + + int32_t tmp = 0; + tmp = arm_nn_requantize_s64(result_64, reduced_multiplier, dst_shift); + tmp += (int64_t)*dst; + tmp = MAX(tmp, NN_Q15_MIN); + tmp = MIN(tmp, NN_Q15_MAX); + *dst++ = (int16_t)tmp; + + rhs_ptr += rhs_cols; + } + + #else // ARM_MATH_MVEI + + const int32_t row_loop_cnt = rhs_rows / 2; + + for (int32_t i = 0; i < row_loop_cnt; i++) + { + + int64_t acc_64_0 = 0; + int64_t acc_64_1 = 0; + int32_t acc_0 = 0; + int32_t acc_1 = 0; + + const int32_t col_loop_cnt = rhs_cols_fast / 4; + + const int16_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs_ptr; + rhs_ptr += rhs_cols; + const int8_t *rhs_1 = rhs_ptr; + rhs_ptr += rhs_cols; + + for (int j = col_loop_cnt; j != 0; j--) + { + int32_t ker_0, ker_1, vec_part_0, vec_part_1; + + vec_part_0 = arm_nn_read_q15x2_ia(&lhs_vec); + vec_part_1 = arm_nn_read_q15x2_ia(&lhs_vec); + + rhs_0 = read_and_pad(rhs_0, &ker_0, &ker_1); + + acc_0 = SMLAD(ker_0, vec_part_0, acc_0); + acc_0 = SMLAD(ker_1, vec_part_1, acc_0); + + rhs_1 = read_and_pad(rhs_1, &ker_0, &ker_1); + + acc_1 = SMLAD(ker_0, vec_part_0, acc_1); + acc_1 = SMLAD(ker_1, vec_part_1, acc_1); + } + + acc_64_0 += acc_0; + acc_64_1 += acc_1; + + for (int k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_64_0 += lhs_temp * (*rhs_0); + rhs_0++; + acc_64_1 += lhs_temp * (*rhs_1); + rhs_1++; + } + + acc_64_0 += *effective_bias_ptr++; + acc_64_1 += *effective_bias_ptr++; + int32_t tmp; + + tmp = arm_nn_requantize_s64(acc_64_0, reduced_multiplier, dst_shift); + tmp += (int64_t)*dst; + tmp = MAX(tmp, NN_Q15_MIN); + tmp = MIN(tmp, NN_Q15_MAX); + *dst++ = (int16_t)tmp; + + tmp = arm_nn_requantize_s64(acc_64_1, reduced_multiplier, dst_shift); + tmp += (int64_t)*dst; + tmp = MAX(tmp, NN_Q15_MIN); + tmp = MIN(tmp, NN_Q15_MAX); + *dst++ = (int16_t)tmp; + } + + if (rhs_rows & 0x1) + { + int64_t acc_64_0 = 0; + int32_t acc_0 = 0; + const int32_t col_loop_cnt = rhs_cols_fast / 4; + + const int16_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs_ptr; + + for (int i = col_loop_cnt; i != 0; i--) + { + int32_t ker_0, ker_1, vec; + rhs_0 = read_and_pad(rhs_0, &ker_0, &ker_1); + + vec = arm_nn_read_q15x2_ia(&lhs_vec); + acc_0 = SMLAD(ker_0, vec, acc_0); + + vec = arm_nn_read_q15x2_ia(&lhs_vec); + acc_0 = SMLAD(ker_1, vec, acc_0); + } + + acc_64_0 += acc_0; + + for (int j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_64_0 += lhs_temp * (*rhs_0); + rhs_0++; + } + + acc_64_0 += *effective_bias_ptr++; + + int32_t tmp; + tmp = arm_nn_requantize_s64(acc_64_0, reduced_multiplier, dst_shift); + tmp += (int64_t)*dst; + tmp = MAX(tmp, NN_Q15_MIN); + tmp = MIN(tmp, NN_Q15_MAX); + *dst++ = (int16_t)tmp; + } + + #endif // ARM_MATH_MVEI +#else // ARM_MATH_DSP + for (int i_row_loop_cnt = 0; i_row_loop_cnt < rhs_rows; i_row_loop_cnt++) + { + const int16_t *lhs_ptr = lhs; + + int64_t result = *effective_bias_ptr++; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int64_t rhs_value0 = (int8_t)*rhs_ptr; + const int64_t lhs_value = *lhs_ptr; + + result += lhs_value * rhs_value0; + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + result = arm_nn_requantize_s64(result, reduced_multiplier, dst_shift); + result += (int64_t)*dst; + + // Clamp the result + result = ((result) > (NN_Q15_MIN) ? (result) : (NN_Q15_MIN)); + result = ((result) < (NN_Q15_MAX) ? (result) : (NN_Q15_MAX)); + + *dst++ = (int16_t)result; + } +#endif // ARM_MATH_DSP + + lhs += rhs_cols * batch_offset; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mul_result_acc_s8_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mul_result_acc_s8_s16.c new file mode 100644 index 0000000..54ee927 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mul_result_acc_s8_s16.c @@ -0,0 +1,351 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mul_result_acc_s8_s16.c + * Description: Multiplies a matrix by a vector and accumulate with output. + * + * $Date: 19 January 2024 + * $Revision: V.2.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportLSTM + * @{ + */ + +/* + * Refer to header file for details. + */ +arm_cmsis_nn_status arm_nn_vec_mat_mul_result_acc_s8_s16(const int8_t *lhs, + const int8_t *rhs, + const int32_t *effective_bias, + int16_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t batches, + const int32_t batch_offset) +{ + + for (int batch = 0; batch < batches; batch++) + { + const int8_t *rhs_ptr = &rhs[0]; + const int32_t *effective_bias_ptr = &effective_bias[0]; + +#if defined(ARM_MATH_MVEI) + + for (size_t row_loop_cnt = rhs_rows / 4; row_loop_cnt != 0; --row_loop_cnt) + { + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs_ptr; + rhs_ptr += rhs_cols; + const int8_t *rhs_1 = rhs_ptr; + rhs_ptr += rhs_cols; + const int8_t *rhs_2 = rhs_ptr; + rhs_ptr += rhs_cols; + const int8_t *rhs_3 = rhs_ptr; + rhs_ptr += rhs_cols; + + int32_t acc_0 = *effective_bias_ptr++; + int32_t acc_1 = *effective_bias_ptr++; + int32_t acc_2 = *effective_bias_ptr++; + int32_t acc_3 = *effective_bias_ptr++; + + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + const int8x16_t ker_1 = vldrbq_z_s8(rhs_1, p); + acc_1 = vmladavaq_s8(acc_1, ker_1, input); + + const int8x16_t ker_2 = vldrbq_z_s8(rhs_2, p); + acc_2 = vmladavaq_s8(acc_2, ker_2, input); + + const int8x16_t ker_3 = vldrbq_z_s8(rhs_3, p); + acc_3 = vmladavaq_s8(acc_3, ker_3, input); + + lhs_vec += 16; + rhs_0 += 16; + rhs_1 += 16; + rhs_2 += 16; + rhs_3 += 16; + } + + int32x4_t acc = {acc_0, acc_1, acc_2, acc_3}; + + acc = arm_requantize_mve(acc, dst_multiplier, dst_shift); + acc = vaddq_s32(acc, vldrhq_s32(dst)); + + acc = vmaxq_s32(acc, vdupq_n_s32(NN_Q15_MIN)); + acc = vminq_s32(acc, vdupq_n_s32(NN_Q15_MAX)); + + vstrhq_s32(dst, acc); + dst += 4; + } + + for (size_t row_loop_cnt = rhs_rows % 4; row_loop_cnt != 0; --row_loop_cnt) + { + int32_t acc_0 = *effective_bias_ptr++; + + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs_ptr; + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + lhs_vec += 16; + rhs_0 += 16; + } + rhs_ptr += rhs_cols; + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_0 += *dst; + + // Clamp the result + acc_0 = MAX(acc_0, NN_Q15_MIN); + acc_0 = MIN(acc_0, NN_Q15_MAX); + *dst++ = (int16_t)acc_0; + } + +#elif defined(ARM_MATH_DSP) + + for (int32_t row_loop_cnt = rhs_rows / 2; row_loop_cnt != 0; --row_loop_cnt) + { + int32_t acc_0 = *effective_bias_ptr++; + int32_t acc_1 = *effective_bias_ptr++; + + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs_ptr; + rhs_ptr += rhs_cols; + const int8_t *rhs_1 = rhs_ptr; + rhs_ptr += rhs_cols; + + for (int j = col_loop_cnt; j != 0; j--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTB16_RORn((uint32_t)vec_0, 8); + vec_0 = SXTB16(vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_0); + int32_t ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + + ker_0 = arm_nn_read_s8x4_ia(&rhs_1); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + } + + for (int k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0); + rhs_0++; + acc_1 += lhs_temp * (*rhs_1); + rhs_1++; + } + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_1 = arm_nn_requantize(acc_1, dst_multiplier, dst_shift); + + // Add offset + acc_0 += *dst; + // Clamp the result + acc_0 = MAX(acc_0, NN_Q15_MIN); + acc_0 = MIN(acc_0, NN_Q15_MAX); + *dst++ = (int16_t)acc_0; + + acc_1 += *dst; + acc_1 = MAX(acc_1, NN_Q15_MIN); + acc_1 = MIN(acc_1, NN_Q15_MAX); + + *dst++ = (int16_t)acc_1; + } + + if (rhs_rows & 0x1) + { + int32_t acc_0 = *effective_bias_ptr++; + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs_ptr; + + for (int i = col_loop_cnt; i != 0; i--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTB16_RORn((uint32_t)vec_0, 8); + vec_0 = SXTB16(vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_0); + int32_t ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + } + + for (int j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0); + rhs_0++; + } + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + + // Accumulate + acc_0 += dst[0]; + // Clamp the result + acc_0 = MAX(acc_0, NN_Q15_MIN); + acc_0 = MIN(acc_0, NN_Q15_MAX); + *dst++ = (int16_t)acc_0; + } + +#else + + const int32_t row_loop_cnt = rhs_rows / 3; + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const int8_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = rhs_ptr; + rhs_ptr += rhs_cols; + const int8_t *rhs_ptr_1 = rhs_ptr; + rhs_ptr += rhs_cols; + const int8_t *rhs_ptr_2 = rhs_ptr; + rhs_ptr += rhs_cols; + + int32_t res00 = *effective_bias_ptr++; + int32_t res01 = *effective_bias_ptr++; + int32_t res02 = *effective_bias_ptr++; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int32_t rhs_value0 = (int8_t)*rhs_ptr_0; + const int32_t rhs_value1 = (int8_t)*rhs_ptr_1; + const int32_t rhs_value2 = (int8_t)*rhs_ptr_2; + const int32_t lhs_value = (int8_t)*lhs_ptr; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + res02 += lhs_value * rhs_value2; + + ++rhs_ptr_0; + ++rhs_ptr_1; + ++rhs_ptr_2; + ++lhs_ptr; + } + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + res02 = arm_nn_requantize(res02, dst_multiplier, dst_shift); + + // Add offset + res00 += (int32_t)*dst; + res00 = CLAMP(res00, NN_Q15_MAX, NN_Q15_MIN); + *dst++ = (int16_t)res00; + + res01 += (int32_t)*dst; + res01 = CLAMP(res01, NN_Q15_MAX, NN_Q15_MIN); + *dst++ = (int16_t)res01; + + res02 += (int32_t)*dst; + res02 = CLAMP(res02, NN_Q15_MAX, NN_Q15_MIN); + *dst++ = (int16_t)res02; + } + + const int loop_cnt = rhs_rows % 3; + + for (int i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++) + { + const int8_t *lhs_ptr = &lhs[0]; + const int8_t *rhs_ptr_0 = &rhs_ptr[0]; + + int32_t res00 = *effective_bias_ptr++; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int32_t rhs_value0 = (int8_t)rhs_ptr_0[0]; + int32_t lhs_value = (int8_t)lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr_0; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + + // Accumulate + res00 += (int32_t)dst[0]; + + // Clamp the result + res00 = CLAMP(res00, NN_Q15_MAX, NN_Q15_MIN); + + *dst++ = (int16_t)res00; + rhs_ptr += rhs_cols; + } +#endif + + lhs += rhs_cols * batch_offset; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of supportLSTM group + */ \ No newline at end of file diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_per_ch_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_per_ch_s8.c new file mode 100644 index 0000000..94b46af --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_per_ch_s8.c @@ -0,0 +1,731 @@ +/* + * SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_per_ch_s8 + * Description: s8 vector by matrix (transposed) multiplication + * + * $Date: 5 Sep 2024 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @defgroup supportFC Fully Connected + * + * Support functions for Fully Connected + * + */ + +/** + * @addtogroup supportFC + * @{ + */ + +/* + * s8 vector(lhs) by matrix (transposed) multiplication and per channel quant output + * + * Refer header file for details. + * + */ +#if !defined(ARM_MATH_MVEI) && defined(ARM_MATH_DSP) && !defined(__ARMCC_VERSION) && !defined(__ICCARM__) + #pragma GCC optimize("unroll-loops") +#endif +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_per_ch_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t *kernel_sum, + const int32_t *bias, + int8_t *dst, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t *dst_multiplier, + const int32_t *dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max, + const int32_t address_offset, + const int32_t rhs_offset) +{ + if (rhs_offset) + { +#if defined(ARM_MATH_MVEI) + (void)bias; + (void)lhs_offset; + const int32_t row_loop_cnt = rhs_rows / 4; + const uint32x4_t address_offset_array = {0, address_offset, address_offset * 2, address_offset * 3}; + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = *kernel_sum++; + int32_t acc_1 = *kernel_sum++; + int32_t acc_2 = *kernel_sum++; + int32_t acc_3 = *kernel_sum++; + + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0_ptr = rhs; + const int8_t *rhs_1_ptr = rhs + rhs_cols; + const int8_t *rhs_2_ptr = rhs + 2 * rhs_cols; + const int8_t *rhs_3_ptr = rhs + 3 * rhs_cols; + + int32_t lhs_sum = 0; + + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int32_t i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + lhs_sum = vaddvaq_s8(lhs_sum, input); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0_ptr, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + const int8x16_t ker_1 = vldrbq_z_s8(rhs_1_ptr, p); + acc_1 = vmladavaq_s8(acc_1, ker_1, input); + + const int8x16_t ker_2 = vldrbq_z_s8(rhs_2_ptr, p); + acc_2 = vmladavaq_s8(acc_2, ker_2, input); + + const int8x16_t ker_3 = vldrbq_z_s8(rhs_3_ptr, p); + acc_3 = vmladavaq_s8(acc_3, ker_3, input); + + lhs_vec += 16; + rhs_0_ptr += 16; + rhs_1_ptr += 16; + rhs_2_ptr += 16; + rhs_3_ptr += 16; + } + rhs += 4 * rhs_cols; + + int32x4_t acc = {acc_0, acc_1, acc_2, acc_3}; + + acc += vdupq_n_s32(rhs_offset) * vdupq_n_s32(lhs_sum); + + acc = arm_requantize_mve(acc, *dst_multiplier++, *dst_shift++); + + acc = vaddq_s32(acc, vdupq_n_s32(dst_offset)); + acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); + acc = vminq_s32(acc, vdupq_n_s32(activation_max)); + + vstrbq_scatter_offset_s32(dst, address_offset_array, acc); + + dst += 4 * address_offset; + } + + const int loop_cnt = rhs_rows % 4; + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = *kernel_sum++; + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_ptr = rhs; + int32_t lhs_sum = 0; + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int32_t i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + lhs_sum = vaddvaq_s8(lhs_sum, input); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_ptr, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + lhs_vec += 16; + rhs_ptr += 16; + } + rhs += rhs_cols; + + acc_0 += lhs_sum * rhs_offset; + + acc_0 = arm_nn_requantize(acc_0, *dst_multiplier++, *dst_shift++); + acc_0 += dst_offset; + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + *dst = MIN(acc_0, activation_max); + dst += address_offset; + } + +#elif defined(ARM_MATH_DSP) + (void)kernel_sum; + + const int32_t row_loop_cnt = rhs_rows / 2; + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + + const int16_t rhs_offset_s16 = (int16_t)rhs_offset; + const uint32_t rhs_offset_s16x2 = PKHBT(rhs_offset_s16, rhs_offset_s16, 16); + + for (int32_t i = 0; i < row_loop_cnt; i++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + if (bias) + { + acc_0 = *bias++; + acc_1 = *bias++; + } + + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0_ptr = rhs; + const int8_t *rhs_1_ptr = rhs + rhs_cols; + rhs += 2 * rhs_cols; + + for (int32_t j = col_loop_cnt; j != 0; j--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_0_ptr); + int32_t ker_1 = SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8); + ker_0 = SXTAB16(rhs_offset_s16x2, ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + + ker_0 = arm_nn_read_s8x4_ia(&rhs_1_ptr); + ker_1 = SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8); + ker_0 = SXTAB16(rhs_offset_s16x2, ker_0); + + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + } + + for (int32_t k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0_ptr + rhs_offset); + rhs_0_ptr++; + acc_1 += lhs_temp * (*rhs_1_ptr + rhs_offset); + rhs_1_ptr++; + } + + acc_0 = arm_nn_requantize(acc_0, *dst_multiplier++, *dst_shift++); + acc_1 = arm_nn_requantize(acc_1, *dst_multiplier++, *dst_shift++); + + // Add offset + acc_0 += dst_offset; + acc_1 += dst_offset; + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + acc_1 = MAX(acc_1, activation_min); + acc_1 = MIN(acc_1, activation_max); + *dst = (int8_t)acc_0; + *(dst + address_offset) = (int8_t)acc_1; + dst += 2 * address_offset; + } + + if (rhs_rows & 0x1) + { + int32_t acc_0 = 0; + if (bias) + { + acc_0 = *bias++; + } + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_ptr = rhs; + + for (int32_t i = col_loop_cnt; i != 0; i--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_ptr); + int32_t ker_1 = SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8); + ker_0 = SXTAB16(rhs_offset_s16x2, ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + } + + for (int32_t j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_ptr + rhs_offset); + rhs_ptr++; + } + + acc_0 = arm_nn_requantize(acc_0, *dst_multiplier++, *dst_shift++); + + // Add offset + acc_0 += dst_offset; + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + *dst = (int8_t)acc_0; + dst += address_offset; + } + +#else + (void)kernel_sum; + + const int32_t row_loop_cnt = rhs_rows / 3; + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const int8_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = &rhs[0]; + const int8_t *rhs_ptr_1 = &rhs[rhs_cols]; + const int8_t *rhs_ptr_2 = &rhs[rhs_cols * 2]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res02 = 0; + if (bias) + { + res00 = *bias++; + res01 = *bias++; + res02 = *bias++; + } + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int32_t rhs_value0 = (int8_t)*rhs_ptr_0 + rhs_offset; + const int32_t rhs_value1 = (int8_t)*rhs_ptr_1 + rhs_offset; + const int32_t rhs_value2 = (int8_t)*rhs_ptr_2 + rhs_offset; + const int32_t lhs_value = (int8_t)*lhs_ptr + lhs_offset; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + res02 += lhs_value * rhs_value2; + + ++rhs_ptr_0; + ++rhs_ptr_1; + ++rhs_ptr_2; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, *dst_multiplier++, *dst_shift++); + res01 = arm_nn_requantize(res01, *dst_multiplier++, *dst_shift++); + res02 = arm_nn_requantize(res02, *dst_multiplier++, *dst_shift++); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res02 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res02 = MAX(res02, activation_min); + res02 = MIN(res02, activation_max); + + *dst = (int8_t)res00; + *(dst + address_offset) = (int8_t)res01; + *(dst + 2 * address_offset) = (int8_t)res02; + dst += 3 * address_offset; + + rhs += 3 * rhs_cols; + } + + const int loop_cnt = rhs_rows % 3; + + for (int32_t i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++) + { + const int8_t *lhs_ptr = &lhs[0]; + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = 0; + if (bias) + { + res00 = *bias++; + } + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int32_t rhs_value0 = (int8_t)rhs_ptr[0] + rhs_offset; + int32_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, *dst_multiplier++, *dst_shift++); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + *dst = (int8_t)res00; + dst += address_offset; + rhs += rhs_cols; + } +#endif + } + + else + { +#if defined(ARM_MATH_MVEI) + const int32_t row_loop_cnt = rhs_rows / 4; + const uint32x4_t address_offset_array = {0, address_offset, address_offset * 2, address_offset * 3}; + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = *kernel_sum++; + int32_t acc_1 = *kernel_sum++; + int32_t acc_2 = *kernel_sum++; + int32_t acc_3 = *kernel_sum++; + + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0_ptr = rhs; + const int8_t *rhs_1_ptr = rhs + rhs_cols; + const int8_t *rhs_2_ptr = rhs + 2 * rhs_cols; + const int8_t *rhs_3_ptr = rhs + 3 * rhs_cols; + + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int32_t i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0_ptr, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + const int8x16_t ker_1 = vldrbq_z_s8(rhs_1_ptr, p); + acc_1 = vmladavaq_s8(acc_1, ker_1, input); + + const int8x16_t ker_2 = vldrbq_z_s8(rhs_2_ptr, p); + acc_2 = vmladavaq_s8(acc_2, ker_2, input); + + const int8x16_t ker_3 = vldrbq_z_s8(rhs_3_ptr, p); + acc_3 = vmladavaq_s8(acc_3, ker_3, input); + + lhs_vec += 16; + rhs_0_ptr += 16; + rhs_1_ptr += 16; + rhs_2_ptr += 16; + rhs_3_ptr += 16; + } + rhs += 4 * rhs_cols; + + int32x4_t acc = {acc_0, acc_1, acc_2, acc_3}; + + acc = arm_requantize_mve_32x4(acc, vldrwq_s32(dst_multiplier), vldrwq_s32(dst_shift)); + dst_multiplier += 4; + dst_shift += 4; + + acc = vaddq_s32(acc, vdupq_n_s32(dst_offset)); + acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); + acc = vminq_s32(acc, vdupq_n_s32(activation_max)); + + vstrbq_scatter_offset_s32(dst, address_offset_array, acc); + + dst += 4 * address_offset; + } + + const int loop_cnt = rhs_rows % 4; + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = *kernel_sum++; + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_ptr = rhs; + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int32_t i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_ptr, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + lhs_vec += 16; + rhs_ptr += 16; + } + rhs += rhs_cols; + + acc_0 = arm_nn_requantize(acc_0, *dst_multiplier++, *dst_shift++); + + acc_0 += dst_offset; + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + *dst = MIN(acc_0, activation_max); + dst += address_offset; + } + +#elif defined(ARM_MATH_DSP) + (void)kernel_sum; + + const int32_t row_loop_cnt = rhs_rows / 2; + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + + for (int32_t i = 0; i < row_loop_cnt; i++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + if (bias) + { + acc_0 = *bias++; + acc_1 = *bias++; + } + + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0_ptr = rhs; + const int8_t *rhs_1_ptr = rhs + rhs_cols; + rhs += 2 * rhs_cols; + + for (int32_t j = col_loop_cnt; j != 0; j--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_0_ptr); + int32_t ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + + ker_0 = arm_nn_read_s8x4_ia(&rhs_1_ptr); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + } + + for (int32_t k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0_ptr); + rhs_0_ptr++; + acc_1 += lhs_temp * (*rhs_1_ptr); + rhs_1_ptr++; + } + + acc_0 = arm_nn_requantize(acc_0, *dst_multiplier++, *dst_shift++); + acc_1 = arm_nn_requantize(acc_1, *dst_multiplier++, *dst_shift++); + + // Add offset + acc_0 += dst_offset; + acc_1 += dst_offset; + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + acc_1 = MAX(acc_1, activation_min); + acc_1 = MIN(acc_1, activation_max); + *dst = (int8_t)acc_0; + *(dst + address_offset) = (int8_t)acc_1; + dst += 2 * address_offset; + } + + if (rhs_rows & 0x1) + { + int32_t acc_0 = 0; + if (bias) + { + acc_0 = *bias++; + } + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_ptr = rhs; + + for (int32_t i = col_loop_cnt; i != 0; i--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_ptr); + int32_t ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + } + + for (int32_t j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_ptr); + rhs_ptr++; + } + + acc_0 = arm_nn_requantize(acc_0, *dst_multiplier++, *dst_shift++); + + // Add offset + acc_0 += dst_offset; + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + *dst = (int8_t)acc_0; + dst += address_offset; + } + +#else + (void)kernel_sum; + + const int32_t row_loop_cnt = rhs_rows / 3; + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const int8_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = &rhs[0]; + const int8_t *rhs_ptr_1 = &rhs[rhs_cols]; + const int8_t *rhs_ptr_2 = &rhs[rhs_cols * 2]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res02 = 0; + if (bias) + { + res00 = *bias++; + res01 = *bias++; + res02 = *bias++; + } + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int32_t rhs_value0 = (int8_t)*rhs_ptr_0; + const int32_t rhs_value1 = (int8_t)*rhs_ptr_1; + const int32_t rhs_value2 = (int8_t)*rhs_ptr_2; + const int32_t lhs_value = (int8_t)*lhs_ptr + lhs_offset; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + res02 += lhs_value * rhs_value2; + + ++rhs_ptr_0; + ++rhs_ptr_1; + ++rhs_ptr_2; + ++lhs_ptr; + } + // Quantize down + res00 = arm_nn_requantize(res00, *dst_multiplier++, *dst_shift++); + res01 = arm_nn_requantize(res01, *dst_multiplier++, *dst_shift++); + res02 = arm_nn_requantize(res02, *dst_multiplier++, *dst_shift++); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res02 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res02 = MAX(res02, activation_min); + res02 = MIN(res02, activation_max); + + *dst = (int8_t)res00; + *(dst + address_offset) = (int8_t)res01; + *(dst + 2 * address_offset) = (int8_t)res02; + dst += 3 * address_offset; + + rhs += 3 * rhs_cols; + } + + const int loop_cnt = rhs_rows % 3; + + for (int32_t i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++) + { + const int8_t *lhs_ptr = &lhs[0]; + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = 0; + if (bias) + { + res00 = *bias++; + } + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int32_t rhs_value0 = (int8_t)rhs_ptr[0]; + int32_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, *dst_multiplier++, *dst_shift++); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + *dst = (int8_t)res00; + dst += address_offset; + rhs += rhs_cols; + } +#endif + } + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16.c new file mode 100644 index 0000000..225795f --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16.c @@ -0,0 +1,367 @@ +/* + * SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_s16 + * Description: s16 vector by s8 matrix (transposed) multiplication + * + * $Date: 19 June 2024 + * $Revision: V.2.4.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportFC + * @{ + */ + +/* + * s16 vector(lhs) by s8 matrix (transposed) multiplication + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s16(const int16_t *lhs, + const int8_t *rhs, + const int64_t *bias, + int16_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max) +{ +#if defined(ARM_MATH_DSP) + + int32_t rhs_cols_fast = rhs_cols; + + if (rhs_cols > MAX_COL_COUNT) + { + rhs_cols_fast = MAX_COL_COUNT; + } + + #if defined(ARM_MATH_MVEI) + int32_t row_loop_cnt = rhs_rows / 4; + int32_t col_loop_cnt = (rhs_cols_fast + 7) / 8; + + for (int32_t i_row_loop_count = 0; i_row_loop_count < row_loop_cnt; i_row_loop_count++) + { + int32_t col_cnt = rhs_cols_fast; + + const int16_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = rhs; + const int8_t *rhs_ptr_1 = rhs + rhs_cols; + const int8_t *rhs_ptr_2 = rhs + rhs_cols * 2; + const int8_t *rhs_ptr_3 = rhs + rhs_cols * 3; + + int32_t result_0 = 0; + int32_t result_1 = 0; + int32_t result_2 = 0; + int32_t result_3 = 0; + + for (int i_col_loop_cnt = 0; i_col_loop_cnt < col_loop_cnt; i_col_loop_cnt++) + { + mve_pred16_t pred = vctp16q(col_cnt); + col_cnt -= 8; + + int16x8_t lhs_input = vldrhq_z_s16(lhs_ptr, pred); + + int16x8_t rhs_input_0 = vldrbq_z_s16(rhs_ptr_0, pred); + int16x8_t rhs_input_1 = vldrbq_z_s16(rhs_ptr_1, pred); + int16x8_t rhs_input_2 = vldrbq_z_s16(rhs_ptr_2, pred); + int16x8_t rhs_input_3 = vldrbq_z_s16(rhs_ptr_3, pred); + + result_0 = vmladavaq_s16(result_0, lhs_input, rhs_input_0); + result_1 = vmladavaq_s16(result_1, lhs_input, rhs_input_1); + result_2 = vmladavaq_s16(result_2, lhs_input, rhs_input_2); + result_3 = vmladavaq_s16(result_3, lhs_input, rhs_input_3); + + lhs_ptr += 8; + + rhs_ptr_0 += 8; + rhs_ptr_1 += 8; + rhs_ptr_2 += 8; + rhs_ptr_3 += 8; + } + + int64_t result_64_0 = result_0; + int64_t result_64_1 = result_1; + int64_t result_64_2 = result_2; + int64_t result_64_3 = result_3; + + if (rhs_cols > MAX_COL_COUNT) + { + for (int i_rhs_cols = MAX_COL_COUNT; i_rhs_cols < rhs_cols; i_rhs_cols++) + { + const int16_t lhs_temp = *lhs_ptr++; + + result_64_0 += *rhs_ptr_0++ * lhs_temp; + result_64_1 += *rhs_ptr_1++ * lhs_temp; + result_64_2 += *rhs_ptr_2++ * lhs_temp; + result_64_3 += *rhs_ptr_3++ * lhs_temp; + } + } + + if (bias) + { + result_64_0 += *bias++; + result_64_1 += *bias++; + result_64_2 += *bias++; + result_64_3 += *bias++; + } + + int32_t tmp; + tmp = arm_nn_requantize_s64(result_64_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_1, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_2, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_3, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + rhs += 4 * rhs_cols; + } + + for (int8_t rows_left = rhs_rows & 0x3; rows_left > 0; rows_left--) + { + int32_t result = 0; + + col_loop_cnt = (rhs_cols_fast + 7) / 8; + + const int16_t *lhs_ptr = lhs; + const int8_t *rhs_ptr = rhs; + + int32_t col_cnt = (int32_t)rhs_cols_fast; + + for (int i_col_loop_cnt = 0; i_col_loop_cnt < col_loop_cnt; i_col_loop_cnt++) + { + mve_pred16_t pred = vctp16q(col_cnt); + col_cnt -= 8; + + int16x8_t lhs_input = vldrhq_z_s16(lhs_ptr, pred); + int16x8_t rhs_input = vldrbq_z_s16(rhs_ptr, pred); + + result = vmladavaq_p_s16(result, lhs_input, rhs_input, pred); + + lhs_ptr += 8; + rhs_ptr += 8; + } + + int64_t result_64 = result; + + if (bias) + { + result_64 += *bias++; + } + + if (rhs_cols > MAX_COL_COUNT) + { + for (int i_rhs_cols = MAX_COL_COUNT; i_rhs_cols < rhs_cols; i_rhs_cols++) + { + const int16_t lhs_temp = *lhs_ptr++; + + result_64 += *rhs_ptr++ * lhs_temp; + } + } + + int32_t tmp = 0; + tmp = arm_nn_requantize_s64(result_64, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + rhs += rhs_cols; + } + + #else // ARM_MATH_MVEI + + const int32_t row_loop_cnt = rhs_rows / 2; + + for (int32_t i = 0; i < row_loop_cnt; i++) + { + + int64_t acc_64_0 = 0; + int64_t acc_64_1 = 0; + int32_t acc_0 = 0; + int32_t acc_1 = 0; + + const int32_t col_loop_cnt = rhs_cols_fast / 4; + + const int16_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + const int8_t *rhs_1 = rhs + rhs_cols; + rhs += 2 * rhs_cols; + + for (int j = col_loop_cnt; j != 0; j--) + { + int32_t ker_0, ker_1, vec_part_0, vec_part_1; + + vec_part_0 = arm_nn_read_q15x2_ia(&lhs_vec); + vec_part_1 = arm_nn_read_q15x2_ia(&lhs_vec); + + rhs_0 = read_and_pad(rhs_0, &ker_0, &ker_1); + + acc_0 = SMLAD(ker_0, vec_part_0, acc_0); + acc_0 = SMLAD(ker_1, vec_part_1, acc_0); + + rhs_1 = read_and_pad(rhs_1, &ker_0, &ker_1); + + acc_1 = SMLAD(ker_0, vec_part_0, acc_1); + acc_1 = SMLAD(ker_1, vec_part_1, acc_1); + } + + acc_64_0 += acc_0; + acc_64_1 += acc_1; + + for (int k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_64_0 += lhs_temp * (*rhs_0); + rhs_0++; + acc_64_1 += lhs_temp * (*rhs_1); + rhs_1++; + } + + if (bias) + { + acc_64_0 += *bias++; + acc_64_1 += *bias++; + } + int32_t tmp; + + tmp = arm_nn_requantize_s64(acc_64_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + tmp = arm_nn_requantize_s64(acc_64_1, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + } + + if (rhs_rows & 0x1) + { + int64_t acc_64_0 = 0; + int32_t acc_0 = 0; + const int32_t col_loop_cnt = rhs_cols_fast / 4; + + const int16_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + + for (int i = col_loop_cnt; i != 0; i--) + { + int32_t ker_0, ker_1, vec; + rhs_0 = read_and_pad(rhs_0, &ker_0, &ker_1); + + vec = arm_nn_read_q15x2_ia(&lhs_vec); + acc_0 = SMLAD(ker_0, vec, acc_0); + + vec = arm_nn_read_q15x2_ia(&lhs_vec); + acc_0 = SMLAD(ker_1, vec, acc_0); + } + + acc_64_0 += acc_0; + + for (int j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_64_0 += lhs_temp * (*rhs_0); + rhs_0++; + } + + if (bias) + { + acc_64_0 += *bias++; + } + int32_t tmp; + tmp = arm_nn_requantize_s64(acc_64_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + } + + #endif // ARM_MATH_MVEI +#else // ARM_MATH_DSP + for (int i_row_loop_cnt = 0; i_row_loop_cnt < rhs_rows; i_row_loop_cnt++) + { + const int16_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = &rhs[0]; + + int64_t result = 0; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int64_t rhs_value0 = (int8_t)*rhs_ptr_0; + const int64_t lhs_value = *lhs_ptr; + + result += lhs_value * rhs_value0; + + ++rhs_ptr_0; + ++lhs_ptr; + } + + if (bias) + { + result += *bias++; + } + // Quantize down + result = arm_nn_requantize_s64(result, dst_multiplier, dst_shift); + + // Clamp the result + result = MAX(result, activation_min); + result = MIN(result, activation_max); + + *dst++ = (int16_t)result; + rhs += rhs_cols; + } +#endif // ARM_MATH_DSP + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16_s16.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16_s16.c new file mode 100644 index 0000000..817ee91 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16_s16.c @@ -0,0 +1,324 @@ +/* + * SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_s16_s16 + * Description: s16 vector by s16 matrix (transposed) multiplication + * + * $Date: 19 June 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportFC + * @{ + */ + +/* + * s16 vector(lhs) by s16 matrix (transposed) multiplication + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s16_s16(const int16_t *lhs, + const int16_t *rhs, + const int64_t *bias, + int16_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max) +{ +#if defined(ARM_MATH_DSP) + + #if defined(ARM_MATH_MVEI) + int32_t row_loop_cnt = rhs_rows / 4; + int32_t col_loop_cnt = (rhs_cols + 7) / 8; + + for (int32_t i_row_loop_count = 0; i_row_loop_count < row_loop_cnt; i_row_loop_count++) + { + int32_t col_cnt = rhs_cols; + + const int16_t *lhs_ptr = lhs; + const int16_t *rhs_ptr_0 = rhs; + const int16_t *rhs_ptr_1 = rhs + rhs_cols; + const int16_t *rhs_ptr_2 = rhs + rhs_cols * 2; + const int16_t *rhs_ptr_3 = rhs + rhs_cols * 3; + + int64_t result_0 = 0; + int64_t result_1 = 0; + int64_t result_2 = 0; + int64_t result_3 = 0; + + for (int i_col_loop_cnt = 0; i_col_loop_cnt < col_loop_cnt; i_col_loop_cnt++) + { + mve_pred16_t pred = vctp16q(col_cnt); + col_cnt -= 8; + + int16x8_t lhs_input = vldrhq_z_s16(lhs_ptr, pred); + + int16x8_t rhs_input_0 = vldrhq_z_s16(rhs_ptr_0, pred); + int16x8_t rhs_input_1 = vldrhq_z_s16(rhs_ptr_1, pred); + int16x8_t rhs_input_2 = vldrhq_z_s16(rhs_ptr_2, pred); + int16x8_t rhs_input_3 = vldrhq_z_s16(rhs_ptr_3, pred); + + result_0 = vmlaldavaq_s16(result_0, lhs_input, rhs_input_0); + result_1 = vmlaldavaq_s16(result_1, lhs_input, rhs_input_1); + result_2 = vmlaldavaq_s16(result_2, lhs_input, rhs_input_2); + result_3 = vmlaldavaq_s16(result_3, lhs_input, rhs_input_3); + + lhs_ptr += 8; + + rhs_ptr_0 += 8; + rhs_ptr_1 += 8; + rhs_ptr_2 += 8; + rhs_ptr_3 += 8; + } + + if (bias) + { + result_0 += *bias++; + result_1 += *bias++; + result_2 += *bias++; + result_3 += *bias++; + } + + int32_t tmp; + tmp = arm_nn_requantize_s64(result_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_1, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_2, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_3, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + rhs += 4 * rhs_cols; + } + + for (int8_t rows_left = rhs_rows & 0x3; rows_left > 0; rows_left--) + { + int64_t result = 0; + + col_loop_cnt = (rhs_cols + 7) / 8; + + const int16_t *lhs_ptr = lhs; + const int16_t *rhs_ptr = rhs; + + int32_t col_cnt = rhs_cols; + + for (int i_col_loop_cnt = 0; i_col_loop_cnt < col_loop_cnt; i_col_loop_cnt++) + { + mve_pred16_t pred = vctp16q(col_cnt); + col_cnt -= 8; + + int16x8_t lhs_input = vldrhq_z_s16(lhs_ptr, pred); + int16x8_t rhs_input = vldrhq_z_s16(rhs_ptr, pred); + + result = vmlaldavaq_p_s16(result, lhs_input, rhs_input, pred); + + lhs_ptr += 8; + rhs_ptr += 8; + } + + if (bias) + { + result += *bias++; + } + + int32_t tmp = 0; + tmp = arm_nn_requantize_s64(result, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + rhs += rhs_cols; + } + + #else // ARM_MATH_MVEI + + const int32_t row_loop_cnt = rhs_rows / 2; + + for (int32_t i = 0; i < row_loop_cnt; i++) + { + + int64_t acc_0 = 0; + int64_t acc_1 = 0; + + const int32_t col_loop_cnt = rhs_cols / 4; + + const int16_t *lhs_vec = lhs; + const int16_t *rhs_0 = rhs; + const int16_t *rhs_1 = rhs + rhs_cols; + rhs += 2 * rhs_cols; + + for (int j = col_loop_cnt; j != 0; j--) + { + int32_t ker_0, ker_1, vec_part_0, vec_part_1; + + vec_part_0 = arm_nn_read_q15x2_ia(&lhs_vec); + vec_part_1 = arm_nn_read_q15x2_ia(&lhs_vec); + + ker_0 = arm_nn_read_q15x2_ia(&rhs_0); + ker_1 = arm_nn_read_q15x2_ia(&rhs_0); + + acc_0 = SMLALD(ker_0, vec_part_0, acc_0); + acc_0 = SMLALD(ker_1, vec_part_1, acc_0); + + ker_0 = arm_nn_read_q15x2_ia(&rhs_1); + ker_1 = arm_nn_read_q15x2_ia(&rhs_1); + + acc_1 = SMLALD(ker_0, vec_part_0, acc_1); + acc_1 = SMLALD(ker_1, vec_part_1, acc_1); + } + + for (int k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int16_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0); + rhs_0++; + acc_1 += lhs_temp * (*rhs_1); + rhs_1++; + } + + if (bias) + { + acc_0 += *bias++; + acc_1 += *bias++; + } + int32_t tmp; + + tmp = arm_nn_requantize_s64(acc_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + + tmp = arm_nn_requantize_s64(acc_1, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + } + + if (rhs_rows & 0x1) + { + int64_t acc_0 = 0; + const int32_t col_loop_cnt = rhs_cols / 4; + + const int16_t *lhs_vec = lhs; + const int16_t *rhs_0 = rhs; + + for (int i = col_loop_cnt; i != 0; i--) + { + int32_t ker_0, vec; + + ker_0 = arm_nn_read_q15x2_ia(&rhs_0); + vec = arm_nn_read_q15x2_ia(&lhs_vec); + acc_0 = SMLALD(ker_0, vec, acc_0); + + ker_0 = arm_nn_read_q15x2_ia(&rhs_0); + vec = arm_nn_read_q15x2_ia(&lhs_vec); + acc_0 = SMLALD(ker_0, vec, acc_0); + } + + for (int j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int16_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0); + rhs_0++; + } + + if (bias) + { + acc_0 += *bias++; + } + int32_t tmp; + tmp = arm_nn_requantize_s64(acc_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (int16_t)tmp; + } + + #endif // ARM_MATH_MVEI +#else // ARM_MATH_DSP + for (int i_row_loop_cnt = 0; i_row_loop_cnt < rhs_rows; i_row_loop_cnt++) + { + const int16_t *lhs_ptr = lhs; + const int16_t *rhs_ptr_0 = &rhs[0]; + + int64_t result = 0; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int64_t rhs_value0 = *rhs_ptr_0; + const int64_t lhs_value = *lhs_ptr; + + result += lhs_value * rhs_value0; + + ++rhs_ptr_0; + ++lhs_ptr; + } + + if (bias) + { + result += *bias++; + } + // Quantize down + result = arm_nn_requantize_s64(result, dst_multiplier, dst_shift); + + // Clamp the result + result = MAX(result, activation_min); + result = MIN(result, activation_max); + + *dst++ = (int16_t)result; + rhs += rhs_cols; + } +#endif // ARM_MATH_DSP + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s4.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s4.c new file mode 100644 index 0000000..e2237e5 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s4.c @@ -0,0 +1,738 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_s4 + * Description: s4 vector by matrix (transposed) multiplication + * + * $Date: 26 April 2024 + * $Revision: V.2.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + */ + +/** + * @defgroup supportFC Fully Connected + * + * Support functions for Fully Connected + * + */ + +/** + * @addtogroup supportFC + * @{ + */ + +/* + * s4 vector(lhs) by matrix (transposed) multiplication + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s4(const int8_t *lhs, + const int8_t *packed_rhs, + const int32_t *bias, + int8_t *dst, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max) +{ + const int32_t row_loop_cnt = rhs_rows / 4; + const int rhs_offset = rhs_cols * row_loop_cnt; + const int8_t *rhs_ptr = &packed_rhs[0]; + +#if defined(ARM_MATH_MVEI) + const int rhs_cols_offset = rhs_cols % 16; +#else + const int rhs_cols_offset = rhs_cols; +#endif + +#if defined(ARM_MATH_DSP) + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); +#endif + + int32_t spillover0, spillover1; + +#if defined(ARM_MATH_MVEI) + const mve_pred16_t lower_nibble_mask = 21845; // 0101010101010101 + const uint8x16_t gather_offset = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}; + const int32_t col_loop_cnt = rhs_cols >> 5; + const int I6_elements_spill = rhs_cols & 0x10; + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; ++i_row_loop_cnt) + { + const uint32x4_t scatter_offset = {0, 1, 2 * row_loop_cnt, 2 * row_loop_cnt + 1}; + const int8_t *lhs_ptr = &lhs[0]; + + mve_pred16_t rmdr_mask = vctp8q(rhs_cols_offset); + + int32_t acc0 = 0; + int32_t acc2 = 0; + + int32_t rhs_sum_0 = 0; + int32_t rhs_sum_2 = 0; + + if (bias) + { + acc0 += *bias; + acc2 += bias[2 * row_loop_cnt]; + ++bias; + } + + for (int i = 0; i < col_loop_cnt; i++) + { + const int8x16x2_t inputx2 = vld2q_s8(lhs_ptr); + const int8x16_t ker_0 = vldrbq_s8(rhs_ptr); + + int8x16_t ker_low_0 = vrshlq_n_s8(ker_0, 4); + ker_low_0 = vshrq_n_s8(ker_low_0, 4); + int8x16_t ker_high_0 = vshrq_n_s8(ker_0, 4); + + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_low_0); + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_high_0); + acc0 = vmladavaq_s8(acc0, ker_low_0, inputx2.val[0]); + acc0 = vmladavaq_s8(acc0, ker_high_0, inputx2.val[1]); + + const int8x16_t ker_1 = vldrbq_s8(&rhs_ptr[rhs_offset]); + int8x16_t ker_low_1 = vrshlq_n_s8(ker_1, 4); + ker_low_1 = vshrq_n_s8(ker_low_1, 4); + int8x16_t ker_high_1 = vshrq_n_s8(ker_1, 4); + + rhs_sum_2 = vaddvaq_s8(rhs_sum_2, ker_low_1); + rhs_sum_2 = vaddvaq_s8(rhs_sum_2, ker_high_1); + acc2 = vmladavaq_s8(acc2, ker_low_1, inputx2.val[0]); + acc2 = vmladavaq_s8(acc2, ker_high_1, inputx2.val[1]); + + lhs_ptr += 32; + rhs_ptr += 16; + } + + if (I6_elements_spill) + { + const int8x16_t input = vldrbq_s8(lhs_ptr); + + int8x16_t ker_0 = vldrbq_gather_offset_s8(rhs_ptr, gather_offset); + ker_0 = vrshlq_m_n_s8(ker_0, 4, lower_nibble_mask); + ker_0 = vshrq_n_s8(ker_0, 4); + + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_0); + acc0 = vmladavaq_s8(acc0, ker_0, input); + + int8x16_t ker_1 = vldrbq_gather_offset_s8(&rhs_ptr[rhs_offset], gather_offset); + ker_1 = vrshlq_m_n_s8(ker_1, 4, lower_nibble_mask); + ker_1 = vshrq_n_s8(ker_1, 4); + + rhs_sum_2 = vaddvaq_s8(rhs_sum_2, ker_1); + acc2 = vmladavaq_s8(acc2, ker_1, input); + + lhs_ptr += 16; + rhs_ptr += 8; + } + + if (rmdr_mask) + { + const int8x16_t input = vldrbq_z_s8(lhs_ptr, rmdr_mask); + + int8x16_t ker_0 = vldrbq_gather_offset_z_s8(rhs_ptr, gather_offset, rmdr_mask); + ker_0 = vrshlq_m_n_s8(ker_0, 4, lower_nibble_mask); + ker_0 = vshrq_n_s8(ker_0, 4); + + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_0); + acc0 = vmladavaq_s8(acc0, ker_0, input); + + int8x16_t ker_1 = vldrbq_gather_offset_z_s8(&rhs_ptr[rhs_offset], gather_offset, rmdr_mask); + ker_1 = vrshlq_m_n_s8(ker_1, 4, lower_nibble_mask); + ker_1 = vshrq_n_s8(ker_1, 4); + + rhs_sum_2 = vaddvaq_s8(rhs_sum_2, ker_1); + acc2 = vmladavaq_s8(acc2, ker_1, input); + + rhs_ptr += rhs_cols_offset >> 1; + } + + if (rhs_cols & 1) + { + const int32_t rhs_high0 = rhs_ptr[0] >> 4; + const int32_t rhs_high1 = rhs_ptr[rhs_offset] >> 4; + + lhs_ptr = &lhs[0]; + const int32_t lhs_high = (int8_t)lhs_ptr[0] + lhs_offset; + + spillover0 = lhs_high * rhs_high0; + spillover1 = lhs_high * rhs_high1; + + rmdr_mask >>= 1; + + ++lhs_ptr; + ++rhs_ptr; + } + else + { + spillover0 = 0; + spillover1 = 0; + lhs_ptr = &lhs[0]; + } + + int32_t acc1 = spillover0; + int32_t acc3 = spillover1; + + if (bias) + { + acc1 += *bias; + acc3 += bias[2 * row_loop_cnt]; + ++bias; + } + + int32_t rhs_sum_1 = 0; + int32_t rhs_sum_3 = 0; + + for (int i = 0; i < col_loop_cnt; i++) + { + const int8x16x2_t inputx2 = vld2q_s8(lhs_ptr); + int8x16_t ker_0 = vldrbq_s8(rhs_ptr); + + int8x16_t ker_low_0 = vrshlq_n_s8(ker_0, 4); + ker_low_0 = vshrq_n_s8(ker_low_0, 4); + int8x16_t ker_high_0 = vshrq_n_s8(ker_0, 4); + + rhs_sum_1 = vaddvaq_s8(rhs_sum_1, ker_low_0); + rhs_sum_1 = vaddvaq_s8(rhs_sum_1, ker_high_0); + acc1 = vmladavaq_s8(acc1, ker_low_0, inputx2.val[0]); + acc1 = vmladavaq_s8(acc1, ker_high_0, inputx2.val[1]); + + int8x16_t ker_1 = vldrbq_s8(&rhs_ptr[rhs_offset]); + int8x16_t ker_low_1 = vrshlq_n_s8(ker_1, 4); + ker_low_1 = vshrq_n_s8(ker_low_1, 4); + int8x16_t ker_high_1 = vshrq_n_s8(ker_1, 4); + + rhs_sum_3 = vaddvaq_s8(rhs_sum_3, ker_low_1); + rhs_sum_3 = vaddvaq_s8(rhs_sum_3, ker_high_1); + acc3 = vmladavaq_s8(acc3, ker_low_1, inputx2.val[0]); + acc3 = vmladavaq_s8(acc3, ker_high_1, inputx2.val[1]); + + lhs_ptr += 32; + rhs_ptr += 16; + } + + if (I6_elements_spill) + { + const int8x16_t input = vldrbq_s8(lhs_ptr); + + int8x16_t ker_0 = vldrbq_gather_offset_s8(rhs_ptr, gather_offset); + ker_0 = vrshlq_m_n_s8(ker_0, 4, lower_nibble_mask); + ker_0 = vshrq_n_s8(ker_0, 4); + + rhs_sum_1 = vaddvaq_s8(rhs_sum_1, ker_0); + acc1 = vmladavaq_s8(acc1, ker_0, input); + + int8x16_t ker_1 = vldrbq_gather_offset_s8(&rhs_ptr[rhs_offset], gather_offset); + ker_1 = vrshlq_m_n_s8(ker_1, 4, lower_nibble_mask); + ker_1 = vshrq_n_s8(ker_1, 4); + + rhs_sum_3 = vaddvaq_s8(rhs_sum_3, ker_1); + acc3 = vmladavaq_s8(acc3, ker_1, input); + + lhs_ptr += 16; + rhs_ptr += 8; + } + + if (rmdr_mask) + { + const int8x16_t input = vldrbq_z_s8(lhs_ptr, rmdr_mask); + + int8x16_t ker_0 = vldrbq_gather_offset_z_s8(rhs_ptr, gather_offset, rmdr_mask); + ker_0 = vrshlq_m_n_s8(ker_0, 4, lower_nibble_mask); + ker_0 = vshrq_n_s8(ker_0, 4); + + rhs_sum_1 = vaddvaq_s8(rhs_sum_1, ker_0); + acc1 = vmladavaq_s8(acc1, ker_0, input); + + int8x16_t ker_1 = vldrbq_gather_offset_z_s8(&rhs_ptr[rhs_offset], gather_offset, rmdr_mask); + ker_1 = vrshlq_m_n_s8(ker_1, 4, lower_nibble_mask); + ker_1 = vshrq_n_s8(ker_1, 4); + + rhs_sum_3 = vaddvaq_s8(rhs_sum_3, ker_1); + acc3 = vmladavaq_s8(acc3, ker_1, input); + + rhs_ptr += rhs_cols_offset >> 1; + } + + int32x4_t acc = {acc0, acc1, acc2, acc3}; + + const int32x4_t rhs_sum = {rhs_sum_0, rhs_sum_1, rhs_sum_2, rhs_sum_3}; + acc += vdupq_n_s32(lhs_offset) * rhs_sum; + + acc = arm_requantize_mve(acc, dst_multiplier, dst_shift); + acc = vaddq_s32(acc, vdupq_n_s32(dst_offset)); + acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); + acc = vminq_s32(acc, vdupq_n_s32(activation_max)); + + vstrbq_scatter_offset_s32(dst, scatter_offset, acc); + + dst += 2; + } + +#elif defined(ARM_MATH_DSP) + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; ++i_row_loop_cnt) + { + const int8_t *lhs_ptr = &lhs[0]; + int32_t res0 = 0; + int32_t res1 = 0; + + if (bias) + { + res0 += *bias; + res1 += bias[2 * row_loop_cnt]; + ++bias; + } + + for (int rhs_cols_idx = 0; rhs_cols_idx < (rhs_cols / 4); ++rhs_cols_idx) + { + int32_t lhs_high, rhs_high0, rhs_low0, lhs_low, rhs_high1, rhs_low1; + + read_and_pad_s4(rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&rhs_ptr[rhs_offset], &rhs_low1, &rhs_high1); + rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(lhs_offset_s16x2, lhs_high); + lhs_high = SXTAB16_RORn(lhs_offset_s16x2, lhs_high, 8); + + res0 = SMLAD(lhs_low, rhs_low0, res0); + res0 = SMLAD(lhs_high, rhs_high0, res0); + res1 = SMLAD(lhs_low, rhs_low1, res1); + res1 = SMLAD(lhs_high, rhs_high1, res1); + } + + if (((rhs_cols % 4) == 2) || ((rhs_cols % 4) == 3)) + { + const int32_t rhs_value0 = rhs_ptr[0]; + const int32_t lower0 = (int8_t)(rhs_value0 << 4) >> 4; + const int32_t higher0 = rhs_value0 >> 4; + + const int32_t rhs_value1 = rhs_ptr[rhs_offset]; + const int32_t lower1 = (int8_t)(rhs_value1 << 4) >> 4; + const int32_t higher1 = rhs_value1 >> 4; + + const int32_t lhs_value_0 = lhs_ptr[0] + lhs_offset; + const int32_t lhs_value_1 = lhs_ptr[1] + lhs_offset; + + res0 += lhs_value_0 * lower0; + res0 += lhs_value_1 * higher0; + res1 += lhs_value_0 * lower1; + res1 += lhs_value_1 * higher1; + + ++rhs_ptr; + lhs_ptr += 2; + } + + if (rhs_cols % 2 == 1) + { + const int32_t rhs_low0 = (int8_t)(rhs_ptr[0] << 4) >> 4; + const int32_t rhs_high0 = rhs_ptr[0] >> 4; + const int32_t rhs_low1 = (int8_t)(rhs_ptr[rhs_offset] << 4) >> 4; + const int32_t rhs_high1 = rhs_ptr[rhs_offset] >> 4; + + const int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_ptr = &lhs[0]; + const int32_t lhs_high = (int8_t)lhs_ptr[0] + lhs_offset; + ++lhs_ptr; + + res0 += lhs_low * rhs_low0; + spillover0 = lhs_high * rhs_high0; + res1 += lhs_low * rhs_low1; + spillover1 = lhs_high * rhs_high1; + + ++rhs_ptr; + } + else + { + spillover0 = 0; + spillover1 = 0; + lhs_ptr = &lhs[0]; + } + + // Quantize down + res0 = arm_nn_requantize(res0, dst_multiplier, dst_shift); + res1 = arm_nn_requantize(res1, dst_multiplier, dst_shift); + + // Add offset + res0 += dst_offset; + res1 += dst_offset; + + // Clamp the result + res0 = MAX(res0, activation_min); + res0 = MIN(res0, activation_max); + res1 = MAX(res1, activation_min); + res1 = MIN(res1, activation_max); + + *dst = (int8_t)res0; + *(dst + 2 * row_loop_cnt) = (int8_t)res1; + dst++; + + res0 = spillover0; + res1 = spillover1; + + if (bias) + { + res0 += *bias; + res1 += bias[2 * row_loop_cnt]; + ++bias; + } + + for (int rhs_cols_idx = 0; rhs_cols_idx < rhs_cols / 4; ++rhs_cols_idx) + { + int32_t lhs_high, rhs_high0, rhs_low0, lhs_low, rhs_high1, rhs_low1; + + read_and_pad_s4(rhs_ptr, &rhs_low0, &rhs_high0); + read_and_pad_s4((const int8_t *)&rhs_ptr[rhs_offset], &rhs_low1, &rhs_high1); + rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(lhs_offset_s16x2, lhs_high); + lhs_high = SXTAB16_RORn(lhs_offset_s16x2, lhs_high, 8); + + res0 = SMLAD(lhs_low, rhs_low0, res0); + res0 = SMLAD(lhs_high, rhs_high0, res0); + res1 = SMLAD(lhs_low, rhs_low1, res1); + res1 = SMLAD(lhs_high, rhs_high1, res1); + } + + if (((rhs_cols % 4) == 2) || ((rhs_cols % 4) == 3)) + { + const int32_t rhs_value0 = rhs_ptr[0]; + const int32_t lower0 = (int8_t)(rhs_value0 << 4) >> 4; + const int32_t higher0 = rhs_value0 >> 4; + + const int32_t rhs_value1 = rhs_ptr[rhs_offset]; + const int32_t lower1 = (int8_t)(rhs_value1 << 4) >> 4; + const int32_t higher1 = rhs_value1 >> 4; + + const int32_t lhs_value_0 = lhs_ptr[0] + lhs_offset; + const int32_t lhs_value_1 = lhs_ptr[1] + lhs_offset; + + res0 += lhs_value_0 * lower0; + res0 += lhs_value_1 * higher0; + res1 += lhs_value_0 * lower1; + res1 += lhs_value_1 * higher1; + + ++rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res0 = arm_nn_requantize(res0, dst_multiplier, dst_shift); + res1 = arm_nn_requantize(res1, dst_multiplier, dst_shift); + + // Add offset + res0 += dst_offset; + res1 += dst_offset; + + // Clamp the result + res0 = MAX(res0, activation_min); + res0 = MIN(res0, activation_max); + res1 = MAX(res1, activation_min); + res1 = MIN(res1, activation_max); + + *dst = (int8_t)res0; + + *(dst + 2 * row_loop_cnt) = (int8_t)res1; + dst++; + } + +#else + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; ++i_row_loop_cnt) + { + const int8_t *lhs_ptr = &lhs[0]; + + int32_t res0 = 0; + int32_t res1 = 0; + + if (bias) + { + res0 += *bias; + res1 += bias[2 * row_loop_cnt]; + ++bias; + } + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols / 2; ++rhs_cols_idx) + { + const int32_t rhs_low0 = (int8_t)(rhs_ptr[0] << 4) >> 4; + const int32_t rhs_high0 = rhs_ptr[0] >> 4; + const int32_t rhs_low1 = (int8_t)(rhs_ptr[rhs_offset] << 4) >> 4; + const int32_t rhs_high1 = rhs_ptr[rhs_offset] >> 4; + + const int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + const int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res0 += lhs_low * rhs_low0; + res0 += lhs_high * rhs_high0; + res1 += lhs_low * rhs_low1; + res1 += lhs_high * rhs_high1; + + ++rhs_ptr; + lhs_ptr += 2; + } + + if (rhs_cols % 2 == 1) + { + const int32_t rhs_low0 = (int8_t)(rhs_ptr[0] << 4) >> 4; + const int32_t rhs_high0 = rhs_ptr[0] >> 4; + const int32_t rhs_low1 = (int8_t)(rhs_ptr[rhs_offset] << 4) >> 4; + const int32_t rhs_high1 = rhs_ptr[rhs_offset] >> 4; + + const int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_ptr = &lhs[0]; + const int32_t lhs_high = (int8_t)lhs_ptr[0] + lhs_offset; + ++lhs_ptr; + + res0 += lhs_low * rhs_low0; + spillover0 = lhs_high * rhs_high0; + res1 += lhs_low * rhs_low1; + spillover1 = lhs_high * rhs_high1; + + ++rhs_ptr; + } + else + { + spillover0 = 0; + spillover1 = 0; + lhs_ptr = &lhs[0]; + } + + // Quantize down + res0 = arm_nn_requantize(res0, dst_multiplier, dst_shift); + res1 = arm_nn_requantize(res1, dst_multiplier, dst_shift); + + // Add offset + res0 += dst_offset; + res1 += dst_offset; + + // Clamp the result + res0 = MAX(res0, activation_min); + res0 = MIN(res0, activation_max); + res1 = MAX(res1, activation_min); + res1 = MIN(res1, activation_max); + + *dst = (int8_t)res0; + + *(dst + 2 * row_loop_cnt) = (int8_t)res1; + dst++; + + res0 = spillover0; + res1 = spillover1; + if (bias) + { + res0 += *bias; + res1 += bias[2 * row_loop_cnt]; + ++bias; + } + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols / 2; ++rhs_cols_idx) + { + const int32_t rhs_low0 = (int8_t)(rhs_ptr[0] << 4) >> 4; + const int32_t rhs_high0 = rhs_ptr[0] >> 4; + const int32_t rhs_low1 = (int8_t)(rhs_ptr[rhs_offset] << 4) >> 4; + const int32_t rhs_high1 = rhs_ptr[rhs_offset] >> 4; + + const int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + const int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res0 += lhs_low * rhs_low0; + res0 += lhs_high * rhs_high0; + res1 += lhs_low * rhs_low1; + res1 += lhs_high * rhs_high1; + + ++rhs_ptr; + lhs_ptr += 2; + } + + // Quantize down + res0 = arm_nn_requantize(res0, dst_multiplier, dst_shift); + res1 = arm_nn_requantize(res1, dst_multiplier, dst_shift); + + // Add offset + res0 += dst_offset; + res1 += dst_offset; + + // Clamp the result + res0 = MAX(res0, activation_min); + res0 = MIN(res0, activation_max); + res1 = MAX(res1, activation_min); + res1 = MIN(res1, activation_max); + + *dst = (int8_t)res0; + *(dst + 2 * row_loop_cnt) = (int8_t)res1; + dst++; + } + +#endif + + const int8_t *lhs_ptr = &lhs[0]; + spillover0 = 0; + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < rhs_rows % 4; ++i_row_loop_cnt) + { + int32_t res0 = spillover0; + if (bias) + { + res0 += bias[2 * row_loop_cnt]; + ++bias; + } + +#if defined(ARM_MATH_MVEI) + int32_t rhs_sum_0 = 0; + + for (int i = 0; i < col_loop_cnt; i++) + { + const int8x16x2_t inputx2 = vld2q_s8(lhs_ptr); + const int8x16_t ker_0 = vldrbq_s8(&rhs_ptr[rhs_offset]); + + int8x16_t ker_low_0 = vrshlq_n_s8(ker_0, 4); + ker_low_0 = vshrq_n_s8(ker_low_0, 4); + int8x16_t ker_high_0 = vshrq_n_s8(ker_0, 4); + + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_low_0); + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_high_0); + res0 = vmladavaq_s8(res0, ker_low_0, inputx2.val[0]); + res0 = vmladavaq_s8(res0, ker_high_0, inputx2.val[1]); + + lhs_ptr += 32; + rhs_ptr += 16; + } + + if (I6_elements_spill) + { + const int8x16_t input = vldrbq_s8(lhs_ptr); + + int8x16_t ker_0 = vldrbq_gather_offset_s8(&rhs_ptr[rhs_offset], gather_offset); + ker_0 = vrshlq_m_n_s8(ker_0, 4, lower_nibble_mask); + ker_0 = vshrq_n_s8(ker_0, 4); + + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_0); + res0 = vmladavaq_s8(res0, ker_0, input); + + lhs_ptr += 16; + rhs_ptr += 8; + } + + res0 += lhs_offset * rhs_sum_0; +#endif + +#if defined(ARM_MATH_DSP) + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols_offset / 4; ++rhs_cols_idx) + { + int32_t lhs_high, rhs_high0, rhs_low0, lhs_low; + + read_and_pad_s4((const int8_t *)&rhs_ptr[rhs_offset], &rhs_high0, &rhs_low0); + rhs_ptr += 2; + + lhs_high = arm_nn_read_s8x4_ia((const int8_t **)&lhs_ptr); + lhs_low = SXTAB16(lhs_offset_s16x2, lhs_high); + lhs_high = SXTAB16_RORn(lhs_offset_s16x2, lhs_high, 8); + + res0 = SMLAD(lhs_low, rhs_high0, res0); + res0 = SMLAD(lhs_high, rhs_low0, res0); + } + + if ((rhs_cols % 4) == 2 || (rhs_cols % 4 == 3)) + { + const int32_t rhs_value0 = rhs_ptr[rhs_offset]; + const int32_t lower0 = (int8_t)(rhs_value0 << 4) >> 4; + const int32_t higher0 = rhs_value0 >> 4; + + const int32_t lhs_value_0 = lhs_ptr[0] + lhs_offset; + const int32_t lhs_value_1 = lhs_ptr[1] + lhs_offset; + + res0 += lhs_value_0 * lower0; + res0 += lhs_value_1 * higher0; + + ++rhs_ptr; + lhs_ptr += 2; + } +#else + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols_offset / 2; ++rhs_cols_idx) + { + const int32_t rhs_low0 = (int8_t)(rhs_ptr[rhs_offset] << 4) >> 4; + const int32_t rhs_high0 = rhs_ptr[rhs_offset] >> 4; + + const int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + const int32_t lhs_high = (int8_t)lhs_ptr[1] + lhs_offset; + + res0 += lhs_low * rhs_low0; + res0 += lhs_high * rhs_high0; + + ++rhs_ptr; + lhs_ptr += 2; + } +#endif + + if ((rhs_cols % 2 == 1) && (i_row_loop_cnt % 2 == 0)) + { + const int32_t rhs_low0 = (int8_t)(rhs_ptr[rhs_offset] << 4) >> 4; + const int32_t rhs_high0 = rhs_ptr[rhs_offset] >> 4; + + const int32_t lhs_low = (int8_t)lhs_ptr[0] + lhs_offset; + lhs_ptr = &lhs[0]; + const int32_t lhs_high = (int8_t)lhs_ptr[0] + lhs_offset; + ++lhs_ptr; + + res0 += lhs_low * rhs_low0; + spillover0 = lhs_high * rhs_high0; + ++rhs_ptr; + } + else + { + spillover0 = 0; + lhs_ptr = &lhs[0]; + } + + // Quantize down + res0 = arm_nn_requantize(res0, dst_multiplier, dst_shift); + + // Add offset + res0 += dst_offset; + + // Clamp the result + res0 = MAX(res0, activation_min); + res0 = MIN(res0, activation_max); + + *(dst + 2 * row_loop_cnt) = (int8_t)res0; + dst++; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c new file mode 100644 index 0000000..0f13e90 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c @@ -0,0 +1,723 @@ +/* + * SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_s8 + * Description: s8 vector by matrix (transposed) multiplication + * + * $Date: 5 Sep 2024 + * $Revision: V.6.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @defgroup supportFC Fully Connected + * + * Support functions for Fully Connected + * + */ + +/** + * @addtogroup supportFC + * @{ + */ + +/* + * s8 vector(lhs) by matrix (transposed) multiplication + * + * Refer header file for details. + * + */ +#if !defined(ARM_MATH_MVEI) && defined(ARM_MATH_DSP) && !defined(__ARMCC_VERSION) && !defined(__ICCARM__) + #pragma GCC optimize("unroll-loops") +#endif +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s8(const int8_t *lhs, + const int8_t *rhs, + const int32_t *kernel_sum, + const int32_t *bias, + int8_t *dst, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max, + const int32_t address_offset, + const int32_t rhs_offset) +{ + if (rhs_offset) + { +#if defined(ARM_MATH_MVEI) + (void)bias; + (void)lhs_offset; + const int32_t row_loop_cnt = rhs_rows / 4; + const uint32x4_t address_offset_array = {0, address_offset, address_offset * 2, address_offset * 3}; + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = *kernel_sum++; + int32_t acc_1 = *kernel_sum++; + int32_t acc_2 = *kernel_sum++; + int32_t acc_3 = *kernel_sum++; + + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0_ptr = rhs; + const int8_t *rhs_1_ptr = rhs + rhs_cols; + const int8_t *rhs_2_ptr = rhs + 2 * rhs_cols; + const int8_t *rhs_3_ptr = rhs + 3 * rhs_cols; + + int32_t lhs_sum = 0; + + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int32_t i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + lhs_sum = vaddvaq_s8(lhs_sum, input); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0_ptr, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + const int8x16_t ker_1 = vldrbq_z_s8(rhs_1_ptr, p); + acc_1 = vmladavaq_s8(acc_1, ker_1, input); + + const int8x16_t ker_2 = vldrbq_z_s8(rhs_2_ptr, p); + acc_2 = vmladavaq_s8(acc_2, ker_2, input); + + const int8x16_t ker_3 = vldrbq_z_s8(rhs_3_ptr, p); + acc_3 = vmladavaq_s8(acc_3, ker_3, input); + + lhs_vec += 16; + rhs_0_ptr += 16; + rhs_1_ptr += 16; + rhs_2_ptr += 16; + rhs_3_ptr += 16; + } + rhs += 4 * rhs_cols; + + int32x4_t acc = {acc_0, acc_1, acc_2, acc_3}; + + acc += vdupq_n_s32(rhs_offset) * vdupq_n_s32(lhs_sum); + + acc = arm_requantize_mve(acc, dst_multiplier, dst_shift); + acc = vaddq_s32(acc, vdupq_n_s32(dst_offset)); + acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); + acc = vminq_s32(acc, vdupq_n_s32(activation_max)); + + vstrbq_scatter_offset_s32(dst, address_offset_array, acc); + + dst += 4 * address_offset; + } + + const int loop_cnt = rhs_rows % 4; + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = *kernel_sum++; + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_ptr = rhs; + int32_t lhs_sum = 0; + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int32_t i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + lhs_sum = vaddvaq_s8(lhs_sum, input); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_ptr, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + lhs_vec += 16; + rhs_ptr += 16; + } + rhs += rhs_cols; + + acc_0 += lhs_sum * rhs_offset; + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_0 += dst_offset; + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + *dst = MIN(acc_0, activation_max); + dst += address_offset; + } + +#elif defined(ARM_MATH_DSP) + (void)kernel_sum; + + const int32_t row_loop_cnt = rhs_rows / 2; + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + + const int16_t rhs_offset_s16 = (int16_t)rhs_offset; + const uint32_t rhs_offset_s16x2 = PKHBT(rhs_offset_s16, rhs_offset_s16, 16); + + for (int32_t i = 0; i < row_loop_cnt; i++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + if (bias) + { + acc_0 = *bias++; + acc_1 = *bias++; + } + + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0_ptr = rhs; + const int8_t *rhs_1_ptr = rhs + rhs_cols; + rhs += 2 * rhs_cols; + + for (int32_t j = col_loop_cnt; j != 0; j--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_0_ptr); + int32_t ker_1 = SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8); + ker_0 = SXTAB16(rhs_offset_s16x2, ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + + ker_0 = arm_nn_read_s8x4_ia(&rhs_1_ptr); + ker_1 = SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8); + ker_0 = SXTAB16(rhs_offset_s16x2, ker_0); + + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + } + + for (int32_t k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0_ptr + rhs_offset); + rhs_0_ptr++; + acc_1 += lhs_temp * (*rhs_1_ptr + rhs_offset); + rhs_1_ptr++; + } + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_1 = arm_nn_requantize(acc_1, dst_multiplier, dst_shift); + + // Add offset + acc_0 += dst_offset; + acc_1 += dst_offset; + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + acc_1 = MAX(acc_1, activation_min); + acc_1 = MIN(acc_1, activation_max); + *dst = (int8_t)acc_0; + *(dst + address_offset) = (int8_t)acc_1; + dst += 2 * address_offset; + } + + if (rhs_rows & 0x1) + { + int32_t acc_0 = 0; + if (bias) + { + acc_0 = *bias++; + } + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_ptr = rhs; + + for (int32_t i = col_loop_cnt; i != 0; i--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_ptr); + int32_t ker_1 = SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8); + ker_0 = SXTAB16(rhs_offset_s16x2, ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + } + + for (int32_t j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_ptr + rhs_offset); + rhs_ptr++; + } + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + + // Add offset + acc_0 += dst_offset; + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + *dst = (int8_t)acc_0; + dst += address_offset; + } + +#else + (void)kernel_sum; + + const int32_t row_loop_cnt = rhs_rows / 3; + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const int8_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = &rhs[0]; + const int8_t *rhs_ptr_1 = &rhs[rhs_cols]; + const int8_t *rhs_ptr_2 = &rhs[rhs_cols * 2]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res02 = 0; + if (bias) + { + res00 = *bias++; + res01 = *bias++; + res02 = *bias++; + } + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int32_t rhs_value0 = (int8_t)*rhs_ptr_0 + rhs_offset; + const int32_t rhs_value1 = (int8_t)*rhs_ptr_1 + rhs_offset; + const int32_t rhs_value2 = (int8_t)*rhs_ptr_2 + rhs_offset; + const int32_t lhs_value = (int8_t)*lhs_ptr + lhs_offset; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + res02 += lhs_value * rhs_value2; + + ++rhs_ptr_0; + ++rhs_ptr_1; + ++rhs_ptr_2; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + res02 = arm_nn_requantize(res02, dst_multiplier, dst_shift); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res02 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res02 = MAX(res02, activation_min); + res02 = MIN(res02, activation_max); + + *dst = (int8_t)res00; + *(dst + address_offset) = (int8_t)res01; + *(dst + 2 * address_offset) = (int8_t)res02; + dst += 3 * address_offset; + + rhs += 3 * rhs_cols; + } + + const int loop_cnt = rhs_rows % 3; + + for (int32_t i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++) + { + const int8_t *lhs_ptr = &lhs[0]; + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = 0; + if (bias) + { + res00 = *bias++; + } + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int32_t rhs_value0 = (int8_t)rhs_ptr[0] + rhs_offset; + int32_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + *dst = (int8_t)res00; + dst += address_offset; + rhs += rhs_cols; + } +#endif + } + + else + { +#if defined(ARM_MATH_MVEI) + const int32_t row_loop_cnt = rhs_rows / 4; + const uint32x4_t address_offset_array = {0, address_offset, address_offset * 2, address_offset * 3}; + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = *kernel_sum++; + int32_t acc_1 = *kernel_sum++; + int32_t acc_2 = *kernel_sum++; + int32_t acc_3 = *kernel_sum++; + + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0_ptr = rhs; + const int8_t *rhs_1_ptr = rhs + rhs_cols; + const int8_t *rhs_2_ptr = rhs + 2 * rhs_cols; + const int8_t *rhs_3_ptr = rhs + 3 * rhs_cols; + + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int32_t i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0_ptr, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + const int8x16_t ker_1 = vldrbq_z_s8(rhs_1_ptr, p); + acc_1 = vmladavaq_s8(acc_1, ker_1, input); + + const int8x16_t ker_2 = vldrbq_z_s8(rhs_2_ptr, p); + acc_2 = vmladavaq_s8(acc_2, ker_2, input); + + const int8x16_t ker_3 = vldrbq_z_s8(rhs_3_ptr, p); + acc_3 = vmladavaq_s8(acc_3, ker_3, input); + + lhs_vec += 16; + rhs_0_ptr += 16; + rhs_1_ptr += 16; + rhs_2_ptr += 16; + rhs_3_ptr += 16; + } + rhs += 4 * rhs_cols; + + int32x4_t acc = {acc_0, acc_1, acc_2, acc_3}; + + acc = arm_requantize_mve(acc, dst_multiplier, dst_shift); + acc = vaddq_s32(acc, vdupq_n_s32(dst_offset)); + acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); + acc = vminq_s32(acc, vdupq_n_s32(activation_max)); + + vstrbq_scatter_offset_s32(dst, address_offset_array, acc); + + dst += 4 * address_offset; + } + + const int loop_cnt = rhs_rows % 4; + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = *kernel_sum++; + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_ptr = rhs; + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int32_t i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_ptr, p); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + lhs_vec += 16; + rhs_ptr += 16; + } + rhs += rhs_cols; + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_0 += dst_offset; + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + *dst = MIN(acc_0, activation_max); + dst += address_offset; + } + +#elif defined(ARM_MATH_DSP) + (void)kernel_sum; + + const int32_t row_loop_cnt = rhs_rows / 2; + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + + for (int32_t i = 0; i < row_loop_cnt; i++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + if (bias) + { + acc_0 = *bias++; + acc_1 = *bias++; + } + + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0_ptr = rhs; + const int8_t *rhs_1_ptr = rhs + rhs_cols; + rhs += 2 * rhs_cols; + + for (int32_t j = col_loop_cnt; j != 0; j--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_0_ptr); + int32_t ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + + ker_0 = arm_nn_read_s8x4_ia(&rhs_1_ptr); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + } + + for (int32_t k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0_ptr); + rhs_0_ptr++; + acc_1 += lhs_temp * (*rhs_1_ptr); + rhs_1_ptr++; + } + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_1 = arm_nn_requantize(acc_1, dst_multiplier, dst_shift); + + // Add offset + acc_0 += dst_offset; + acc_1 += dst_offset; + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + acc_1 = MAX(acc_1, activation_min); + acc_1 = MIN(acc_1, activation_max); + *dst = (int8_t)acc_0; + *(dst + address_offset) = (int8_t)acc_1; + dst += 2 * address_offset; + } + + if (rhs_rows & 0x1) + { + int32_t acc_0 = 0; + if (bias) + { + acc_0 = *bias++; + } + const int32_t col_loop_cnt = rhs_cols / 4; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_ptr = rhs; + + for (int32_t i = col_loop_cnt; i != 0; i--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_ptr); + int32_t ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + } + + for (int32_t j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_ptr); + rhs_ptr++; + } + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + + // Add offset + acc_0 += dst_offset; + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + *dst = (int8_t)acc_0; + dst += address_offset; + } + +#else + (void)kernel_sum; + + const int32_t row_loop_cnt = rhs_rows / 3; + + for (int32_t i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const int8_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = &rhs[0]; + const int8_t *rhs_ptr_1 = &rhs[rhs_cols]; + const int8_t *rhs_ptr_2 = &rhs[rhs_cols * 2]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res02 = 0; + if (bias) + { + res00 = *bias++; + res01 = *bias++; + res02 = *bias++; + } + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int32_t rhs_value0 = (int8_t)*rhs_ptr_0; + const int32_t rhs_value1 = (int8_t)*rhs_ptr_1; + const int32_t rhs_value2 = (int8_t)*rhs_ptr_2; + const int32_t lhs_value = (int8_t)*lhs_ptr + lhs_offset; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + res02 += lhs_value * rhs_value2; + + ++rhs_ptr_0; + ++rhs_ptr_1; + ++rhs_ptr_2; + ++lhs_ptr; + } + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + res02 = arm_nn_requantize(res02, dst_multiplier, dst_shift); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res02 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res02 = MAX(res02, activation_min); + res02 = MIN(res02, activation_max); + + *dst = (int8_t)res00; + *(dst + address_offset) = (int8_t)res01; + *(dst + 2 * address_offset) = (int8_t)res02; + dst += 3 * address_offset; + + rhs += 3 * rhs_cols; + } + + const int loop_cnt = rhs_rows % 3; + + for (int32_t i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++) + { + const int8_t *lhs_ptr = &lhs[0]; + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = 0; + if (bias) + { + res00 = *bias++; + } + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int32_t rhs_value0 = (int8_t)rhs_ptr[0]; + int32_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + *dst = (int8_t)res00; + dst += address_offset; + rhs += rhs_cols; + } +#endif + } + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_svdf_s8.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_svdf_s8.c new file mode 100644 index 0000000..9963697 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_svdf_s8.c @@ -0,0 +1,421 @@ +/* + * SPDX-FileCopyrightText: Copyright 2021-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_svdf_s8 + * Description: s8 vector by matrix (transposed) multiplication with + * s16 output. Targetted at SVDF operator. + * + * $Date: 28 March 2023 + * $Revision: V.3.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportFC + * @{ + */ + +/* + * s8 vector(lhs) by matrix (transposed) multiplication + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_svdf_s8(const int8_t *lhs, + const int8_t *rhs, + int16_t *dst, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max) +{ + if (rhs_cols < 0 || (NN_Q31_MAX - rhs_cols) < 16 || dst_offset < 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + +#if defined(ARM_MATH_MVEI) + int32_t row_loop_cnt = rhs_rows / 3; + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + int32_t acc_2 = 0; + + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + const int8_t *rhs_1 = rhs + rhs_cols; + const int8_t *rhs_2 = rhs + 2 * rhs_cols; + + int32_t rhs_sum_0 = 0; + int32_t rhs_sum_1 = 0; + int32_t rhs_sum_2 = 0; + + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p); + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_0); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + const int8x16_t ker_1 = vldrbq_z_s8(rhs_1, p); + rhs_sum_1 = vaddvaq_s8(rhs_sum_1, ker_1); + acc_1 = vmladavaq_s8(acc_1, ker_1, input); + + const int8x16_t ker_2 = vldrbq_z_s8(rhs_2, p); + rhs_sum_2 = vaddvaq_s8(rhs_sum_2, ker_2); + acc_2 = vmladavaq_s8(acc_2, ker_2, input); + + lhs_vec += 16; + rhs_0 += 16; + rhs_1 += 16; + rhs_2 += 16; + } + rhs += 3 * rhs_cols; + + int32x4_t acc = {acc_0, acc_1, acc_2, 0}; + const int32x4_t rhs_sum = {rhs_sum_0, rhs_sum_1, rhs_sum_2, 0}; + acc += vdupq_n_s32(lhs_offset) * rhs_sum; + + acc = arm_requantize_mve(acc, dst_multiplier, dst_shift); + acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); + acc = vminq_s32(acc, vdupq_n_s32(activation_max)); + *(dst) = (int16_t)acc[0]; + *(dst + dst_offset) = (int16_t)acc[1]; + *(dst + 2 * dst_offset) = (int16_t)acc[2]; + dst += 3 * dst_offset; + } + + const int loop_cnt = rhs_rows % 3; + for (int i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = 0; + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + int32_t rhs_sum_0 = 0; + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p); + rhs_sum_0 = vaddvaq_s8(rhs_sum_0, ker_0); + acc_0 = vmladavaq_s8(acc_0, ker_0, input); + + lhs_vec += 16; + rhs_0 += 16; + } + rhs += rhs_cols; + + const int32_t offsets = rhs_sum_0 * lhs_offset; + acc_0 = QADD(acc_0, offsets); + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + *dst = (int16_t)MIN(acc_0, activation_max); + dst += dst_offset; + } + +#elif defined(ARM_MATH_DSP) + int32_t row_loop_cnt = rhs_rows / 2; + + const int16_t lhs_offset_s16 = lhs_offset; + + const uint32_t lhs_offset_s16x2 = PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + for (int32_t i = 0; i < row_loop_cnt; i++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + const int8_t *rhs_1 = rhs + rhs_cols; + rhs += 2 * rhs_cols; + + int32_t rhs_cols_idx = 0; + + int32_t vec_0, vec_1, ker_0, ker_1; + + #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang loop unroll(disable) + #endif + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + // 4 x MAC acc_0, acc1 + vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + ker_0 = arm_nn_read_s8x4_ia(&rhs_0); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + ker_0 = arm_nn_read_s8x4_ia(&rhs_1); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + + // 4 x MAC acc_0, acc1 + vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + ker_0 = arm_nn_read_s8x4_ia(&rhs_0); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + ker_0 = arm_nn_read_s8x4_ia(&rhs_1); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + + // 4 x MAC acc_0, acc1 + vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + ker_0 = arm_nn_read_s8x4_ia(&rhs_0); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + ker_0 = arm_nn_read_s8x4_ia(&rhs_1); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + + // 4 x MAC acc_0, acc1 + vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + ker_0 = arm_nn_read_s8x4_ia(&rhs_0); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + ker_0 = arm_nn_read_s8x4_ia(&rhs_1); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + } + + #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang loop unroll(disable) + #endif + for (; rhs_cols_idx <= (rhs_cols - 4); rhs_cols_idx += 4) + { + vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + vec_1 = SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + ker_0 = arm_nn_read_s8x4_ia(&rhs_0); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + + ker_0 = arm_nn_read_s8x4_ia(&rhs_1); + ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_1 = SMLAD(ker_1, vec_1, acc_1); + acc_1 = SMLAD(ker_0, vec_0, acc_1); + } + + #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang loop unroll(disable) + #endif + for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0); + rhs_0++; + acc_1 += lhs_temp * (*rhs_1); + rhs_1++; + } + + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_1 = arm_nn_requantize(acc_1, dst_multiplier, dst_shift); + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + acc_1 = MAX(acc_1, activation_min); + acc_1 = MIN(acc_1, activation_max); + *dst = (int16_t)acc_0; + *(dst + dst_offset) = (int16_t)acc_1; + dst += 2 * dst_offset; + } + if (rhs_rows & 0x1) + { + int32_t acc_0 = 0; + const int32_t col_loop_cnt = rhs_cols / 4; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + for (int i = col_loop_cnt; i != 0; i--) + { + int32_t vec_0 = arm_nn_read_s8x4_ia(&lhs_vec); + int32_t vec_1 = SXTAB16(lhs_offset_s16x2, ROR((uint32_t)vec_0, 8)); + vec_0 = SXTAB16(lhs_offset_s16x2, vec_0); + + int32_t ker_0 = arm_nn_read_s8x4_ia(&rhs_0); + int32_t ker_1 = SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = SXTB16(ker_0); + + acc_0 = SMLAD(ker_1, vec_1, acc_0); + acc_0 = SMLAD(ker_0, vec_0, acc_0); + } + for (int j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * *rhs_0; + rhs_0++; + } + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + *dst = (int16_t)acc_0; + dst += dst_offset; + } + +#else + + int32_t row_loop_cnt = rhs_rows / 3; + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const int8_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = &rhs[0]; + const int8_t *rhs_ptr_1 = &rhs[rhs_cols]; + const int8_t *rhs_ptr_2 = &rhs[rhs_cols * 2]; + + int32_t res00 = 0; + int32_t res01 = 0; + int32_t res02 = 0; + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const int32_t rhs_value0 = (int8_t)*rhs_ptr_0; + const int32_t rhs_value1 = (int8_t)*rhs_ptr_1; + const int32_t rhs_value2 = (int8_t)*rhs_ptr_2; + const int32_t lhs_value = (int8_t)*lhs_ptr + lhs_offset; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + res02 += lhs_value * rhs_value2; + + ++rhs_ptr_0; + ++rhs_ptr_1; + ++rhs_ptr_2; + ++lhs_ptr; + } + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + res02 = arm_nn_requantize(res02, dst_multiplier, dst_shift); + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res02 = MAX(res02, activation_min); + res02 = MIN(res02, activation_max); + + *dst = (int16_t)res00; + *(dst + dst_offset) = (int16_t)res01; + *(dst + 2 * dst_offset) = (int16_t)res02; + dst += 3 * dst_offset; + rhs += 3 * rhs_cols; + } + + const int loop_cnt = rhs_rows % 3; + + for (int i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++) + { + const int8_t *lhs_ptr = &lhs[0]; + const int8_t *rhs_ptr = &rhs[0]; + + int32_t res00 = 0; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + int32_t rhs_value0 = (int8_t)rhs_ptr[0]; + int32_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + *dst = (int16_t)res00; + dst += dst_offset; + rhs += rhs_cols; + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nntables.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nntables.c new file mode 100644 index 0000000..7ccb89e --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_nntables.c @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nntables.c + * Description: Converts the elements of the Q7 vector to Q15 vector without left-shift + * + * $Date: 28 October 2022 + * $Revision: V.2.1.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @brief tables for various activation functions + * + * This file include the declaration of common tables. + * Most of them are used for activation functions + * + */ + +// Table of sigmoid(i/24) at 0.16 format - 256 elements. +// Combined sigmoid and tanh look-up table, since +// tanh(x) = 2*sigmoid(2*x) -1. +// Both functions are symmetric, so the LUT table is only needed +// for the absolute value of the input. +const uint16_t sigmoid_table_uint16[256] = { + 32768, 33451, 34133, 34813, 35493, 36169, 36843, 37513, 38180, 38841, 39498, 40149, 40794, 41432, 42064, 42688, + 43304, 43912, 44511, 45102, 45683, 46255, 46817, 47369, 47911, 48443, 48964, 49475, 49975, 50464, 50942, 51409, + 51865, 52311, 52745, 53169, 53581, 53983, 54374, 54755, 55125, 55485, 55834, 56174, 56503, 56823, 57133, 57433, + 57724, 58007, 58280, 58544, 58800, 59048, 59288, 59519, 59743, 59959, 60168, 60370, 60565, 60753, 60935, 61110, + 61279, 61441, 61599, 61750, 61896, 62036, 62172, 62302, 62428, 62549, 62666, 62778, 62886, 62990, 63090, 63186, + 63279, 63368, 63454, 63536, 63615, 63691, 63765, 63835, 63903, 63968, 64030, 64090, 64148, 64204, 64257, 64308, + 64357, 64405, 64450, 64494, 64536, 64576, 64614, 64652, 64687, 64721, 64754, 64786, 64816, 64845, 64873, 64900, + 64926, 64950, 64974, 64997, 65019, 65039, 65060, 65079, 65097, 65115, 65132, 65149, 65164, 65179, 65194, 65208, + 65221, 65234, 65246, 65258, 65269, 65280, 65291, 65301, 65310, 65319, 65328, 65337, 65345, 65352, 65360, 65367, + 65374, 65381, 65387, 65393, 65399, 65404, 65410, 65415, 65420, 65425, 65429, 65433, 65438, 65442, 65445, 65449, + 65453, 65456, 65459, 65462, 65465, 65468, 65471, 65474, 65476, 65479, 65481, 65483, 65485, 65488, 65489, 65491, + 65493, 65495, 65497, 65498, 65500, 65501, 65503, 65504, 65505, 65507, 65508, 65509, 65510, 65511, 65512, 65513, + 65514, 65515, 65516, 65517, 65517, 65518, 65519, 65520, 65520, 65521, 65522, 65522, 65523, 65523, 65524, 65524, + 65525, 65525, 65526, 65526, 65526, 65527, 65527, 65528, 65528, 65528, 65529, 65529, 65529, 65529, 65530, 65530, + 65530, 65530, 65531, 65531, 65531, 65531, 65531, 65532, 65532, 65532, 65532, 65532, 65532, 65533, 65533, 65533, + 65533, 65533, 65533, 65533, 65533, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65535}; diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c new file mode 100644 index 0000000..b46cbed --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in_q7x4 compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in_q7x4 writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_q7_to_q15_with_offset.c + * Description: Converts the elements of the Q7 vector to Q15 vector with an added offset + * + * $Date: 22 March 2023 + * $Revision: V.2.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConversion + * @{ + */ + +void arm_q7_to_q15_with_offset(const int8_t *src, int16_t *dst, int32_t block_size, int16_t offset) +{ + int32_t block_cnt; + +#if defined(ARM_MATH_MVEI) + + int16x8_t source; + const int16x8_t source_offset = vdupq_n_s16(offset); + block_cnt = block_size / 8; + + while (block_cnt > 0) + { + source = vldrbq_s16(src); + source = vaddq_s16(source, source_offset); + vstrhq_s16(dst, source); + dst += 8; + src += 8; + block_cnt--; + } + + block_cnt = block_size & 0x7; + +#elif defined(ARM_MATH_DSP) + /* Run the below code for cores that support SIMD instructions */ + int32_t in_q7x4; + int32_t in_q15x2_1; + int32_t in_q15x2_2; + int32_t out_q15x2_1; + int32_t out_q15x2_2; + + /*loop unrolling */ + block_cnt = block_size >> 2; + + /* First part of the processing with loop unrolling. Compute 4 outputs at a time. */ + const int32_t offset_q15x2 = PKHBT(offset, offset, 16); + while (block_cnt > 0) + { + /* convert from s8 to s16 and then store the results in the destination buffer */ + in_q7x4 = arm_nn_read_s8x4_ia(&src); + + /* Extract and sign extend each of the four s8 values to s16 */ + in_q15x2_1 = SXTAB16(offset_q15x2, ROR(in_q7x4, 8)); + in_q15x2_2 = SXTAB16(offset_q15x2, in_q7x4); + + out_q15x2_2 = PKHTB(in_q15x2_1, in_q15x2_2, 16); + out_q15x2_1 = PKHBT(in_q15x2_2, in_q15x2_1, 16); + + arm_nn_write_q15x2_ia(&dst, out_q15x2_1); + arm_nn_write_q15x2_ia(&dst, out_q15x2_2); + + block_cnt--; + } + /* Handle left over samples */ + block_cnt = block_size % 0x4; + +#else + /* Run the below code for Cortex-M0 */ + /* Loop over block_size number of values */ + block_cnt = block_size; +#endif + + while (block_cnt > 0) + { + *dst++ = (int16_t)*src++ + offset; + + /* Decrement the loop counter */ + block_cnt--; + } +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_s8_to_s16_unordered_with_offset.c b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_s8_to_s16_unordered_with_offset.c new file mode 100644 index 0000000..324523d --- /dev/null +++ b/src/third_party/cmsis_nn/Source/NNSupportFunctions/arm_s8_to_s16_unordered_with_offset.c @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in_q7x4 compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in_q7x4 writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_s8_to_s16_unordered_with_offset.c + * Description: Converts the elements of the S8 vector to S16 vector with an added offset + * + * $Date: 23 Mars 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup supportConversion + * @{ + */ +#if defined(ARM_MATH_DSP) +void arm_s8_to_s16_unordered_with_offset(const int8_t *src, int16_t *dst, int32_t block_size, int16_t offset) +{ + int32_t in_s8x4; + int32_t in_s16x2_1; + int32_t in_s16x2_2; + int32_t block_cnt = block_size >> 2; + + /* Compute 4 outputs at a time. */ + const int32_t offset_s16x2 = PKHBT(offset, offset, 16); + while (block_cnt > 0) + { + in_s8x4 = arm_nn_read_s8x4_ia(&src); + + in_s16x2_1 = SXTAB16(offset_s16x2, in_s8x4); + in_s16x2_2 = SXTAB16(offset_s16x2, ROR(in_s8x4, 8)); + + arm_nn_write_q15x2_ia(&dst, in_s16x2_1); + arm_nn_write_q15x2_ia(&dst, in_s16x2_2); + + block_cnt--; + } + + /* Handle left over samples. */ + block_cnt = block_size % 4; + while (block_cnt > 0) + { + *dst++ = (int16_t)*src++ + offset; + block_cnt--; + } +} +#endif + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/PadFunctions/arm_pad_s8.c b/src/third_party/cmsis_nn/Source/PadFunctions/arm_pad_s8.c new file mode 100644 index 0000000..effd1bf --- /dev/null +++ b/src/third_party/cmsis_nn/Source/PadFunctions/arm_pad_s8.c @@ -0,0 +1,117 @@ + +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_pad_s8.c + * Description: Pad a s8 vector + * + * $Date: 19 Sep 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nn_types.h" +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" +/** + * @ingroup Public + */ + +/** + * @addtogroup Pad + * @{ + */ + +/* + * Basic s8 pad function. + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_pad_s8(const int8_t *input, + int8_t *output, + const int8_t pad_value, + const cmsis_nn_dims *input_size, + const cmsis_nn_dims *pre_pad, + const cmsis_nn_dims *post_pad) +{ + + const cmsis_nn_dims output_size = {pre_pad->n + input_size->n + post_pad->n, + pre_pad->h + input_size->h + post_pad->h, + pre_pad->w + input_size->w + post_pad->w, + pre_pad->c + input_size->c + post_pad->c}; + + const int32_t batch_block_size = output_size.h * output_size.w * output_size.c; + const int32_t row_block_size = output_size.w * output_size.c; + const int32_t col_block_size = output_size.c; + + arm_memset_s8(output, pad_value, batch_block_size * pre_pad->n); + output += batch_block_size * pre_pad->n; + for (int32_t b = 0; b < input_size->n; b++) + { + + arm_memset_s8(output, pad_value, row_block_size * pre_pad->h); + output += row_block_size * pre_pad->h; + for (int32_t y = 0; y < input_size->h; y++) + { + + arm_memset_s8(output, pad_value, col_block_size * pre_pad->w); + output += col_block_size * pre_pad->w; + if (input_size->c == output_size.c) + { + arm_memcpy_s8(output, input, input_size->w * input_size->c); + output += input_size->w * input_size->c; + input += input_size->w * input_size->c; + } + else + { + for (int32_t x = 0; x < input_size->w; x++) + { + + arm_memset_s8(output, pad_value, pre_pad->c); + output += pre_pad->c; + + arm_memcpy_s8(output, input, input_size->c); + output += input_size->c; + input += input_size->c; + + arm_memset_s8(output, pad_value, post_pad->c); + output += post_pad->c; + } + } + + arm_memset_s8(output, pad_value, col_block_size * post_pad->w); + output += col_block_size * post_pad->w; + } + + arm_memset_s8(output, pad_value, row_block_size * post_pad->h); + output += row_block_size * post_pad->h; + } + arm_memset_s8(output, pad_value, batch_block_size * post_pad->n); + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Pad group + */ diff --git a/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_get_buffer_sizes_s16.c b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_get_buffer_sizes_s16.c new file mode 100644 index 0000000..fae4082 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_get_buffer_sizes_s16.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_avgpool_get_buffer_sizes_s16.c + * Description: Collection of get buffer size functions for avgpool s16 layer function. + * + * $Date: 13 January 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup Pooling + */ + +/** + * @addtogroup GetBufferSizePooling + * @{ + */ + +int32_t arm_avgpool_s16_get_buffer_size(const int output_x, const int ch_src) +{ +#if defined(ARM_MATH_MVEI) + return arm_avgpool_s16_get_buffer_size_mve(output_x, ch_src); +#elif defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + return arm_avgpool_s16_get_buffer_size_dsp(output_x, ch_src); +#else + (void)output_x; + (void)ch_src; + return 0; +#endif +} + +int32_t arm_avgpool_s16_get_buffer_size_dsp(const int output_x, const int ch_src) +{ + (void)output_x; + return (ch_src * sizeof(int32_t)); +} + +int32_t arm_avgpool_s16_get_buffer_size_mve(const int output_x, const int ch_src) +{ + (void)output_x; + (void)ch_src; + + return 0; +} + +/** + * @} end of GetBufferSizePooling group + */ diff --git a/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_get_buffer_sizes_s8.c b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_get_buffer_sizes_s8.c new file mode 100644 index 0000000..62b75f3 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_get_buffer_sizes_s8.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_avgpool_get_buffer_sizes_s8.c + * Description: Collection of get buffer size functions for avgpool s8 layer function. + * + * $Date: 25 January 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup Pooling + */ + +/** + * @addtogroup GetBufferSizePooling + * @{ + */ + +int32_t arm_avgpool_s8_get_buffer_size(const int output_x, const int ch_src) +{ +#if defined(ARM_MATH_MVEI) + return arm_avgpool_s8_get_buffer_size_mve(output_x, ch_src); +#elif defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + return arm_avgpool_s8_get_buffer_size_dsp(output_x, ch_src); +#else + (void)output_x; + (void)ch_src; + return 0; +#endif +} + +int32_t arm_avgpool_s8_get_buffer_size_dsp(const int output_x, const int ch_src) +{ + (void)output_x; + return (ch_src * sizeof(int32_t)); +} + +int32_t arm_avgpool_s8_get_buffer_size_mve(const int output_x, const int ch_src) +{ + (void)output_x; + (void)ch_src; + + return 0; +} + +/** + * @} end of GetBufferSizePooling group + */ diff --git a/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_s16.c b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_s16.c new file mode 100644 index 0000000..f214bed --- /dev/null +++ b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_s16.c @@ -0,0 +1,328 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_avgpool_s16.c + * Description: Pooling function implementations + * + * $Date: 27 November 2023 + * $Revision: V.2.5.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + +static void scale_q31_to_q15_and_clamp(const int32_t *buffer, + int16_t *target, + int32_t length, + const int32_t count, + const int act_min, + const int act_max) +{ + const int half_count = count / 2; + + for (int i = 0; i < length; i++) + { + int32_t sum = buffer[i] > 0 ? (buffer[i] + half_count) : (buffer[i] - half_count); + sum = sum / count; + sum = MAX(sum, act_min); + sum = MIN(sum, act_max); + + target[i] = (int16_t)sum; + } +} +#endif + +/** + * @ingroup Public + + */ + +/** + * @addtogroup Pooling + * @{ + */ + +/* + * s16 average pooling function + * + * Refer to header file for details. + * + */ +arm_cmsis_nn_status arm_avgpool_s16(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int16_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int16_t *dst) +{ + const int32_t input_y = input_dims->h; + const int32_t input_x = input_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_x = output_dims->w; + const int32_t stride_y = pool_params->stride.h; + const int32_t stride_x = pool_params->stride.w; + const int32_t kernel_y = filter_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t pad_y = pool_params->padding.h; + const int32_t pad_x = pool_params->padding.w; + const int32_t act_min = pool_params->activation.min; + const int32_t act_max = pool_params->activation.max; + const int32_t ch_src = input_dims->c; + const int32_t batch_input = input_x * input_y * ch_src; + int32_t batch_cnt = input_dims->n; + + if (batch_cnt < 1) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + +#if defined(ARM_MATH_MVEI) + (void)ctx; + + const int32_t batch_output = output_x * output_y * ch_src; + + while (batch_cnt) + { + for (int i_y = 0; i_y < output_y; i_y++) + { + for (int i_x = 0; i_x < output_x; i_x++) + { + const int32_t k_y_start = MAX(0, i_y * stride_y - pad_y); + const int32_t k_y_end = MIN(i_y * stride_y - pad_y + kernel_y, input_y); + + const int32_t k_x_start = MAX(0, i_x * stride_x - pad_x); + const int32_t k_x_end = MIN(i_x * stride_x - pad_x + kernel_x, input_x); + + const int16_t *src_base = src; + int16_t *out = &dst[ch_src * (i_x + i_y * output_x)]; + + int32_t ch_count = (ch_src + 7) / 8; + int32_t channels = ch_src; + + while (ch_count > 0) + { + int32_t count = 0; + + int32x4_t sum_1 = vdupq_n_s32(0); + int32x4_t sum_2 = vdupq_n_s32(0); + // Load store tail predicate + const mve_pred16_t ld_st_p = vctp16q(channels); + channels -= 8; + + for (int k_y = k_y_start; k_y < k_y_end; k_y++) + { + for (int k_x = k_x_start; k_x < k_x_end; k_x++) + { + const int16_t *src_inner = src_base + (ch_src * (k_x + k_y * input_x)); + const int16x8_t temp = vldrhq_z_s16(src_inner, ld_st_p); + + const int32x4_t temp_lo = vmovlbq_s16(temp); + const int32x4_t temp_hi = vmovltq_s16(temp); + + sum_1 = vaddq_s32(sum_1, temp_lo); + sum_2 = vaddq_s32(sum_2, temp_hi); + + count++; + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + // Perform the following operation + // sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + const int32_t half_count = count / 2; + // Predicate for 'sum > 0' operation + mve_pred16_t p = vcmpgtq_n_s32(sum_1, 0); + sum_1 = vaddq_m_n_s32(sum_1, sum_1, half_count, p); + sum_1 = vsubq_m_n_s32(sum_1, sum_1, half_count, ~p); + + p = vcmpgtq_n_s32(sum_2, 0); + sum_2 = vaddq_m_n_s32(sum_2, sum_2, half_count, p); + sum_2 = vsubq_m_n_s32(sum_2, sum_2, half_count, ~p); + + for (int i = 0; i < 4; i++) + { + sum_1[i] = sum_1[i] / count; + sum_2[i] = sum_2[i] / count; + } + + sum_1 = vmaxq_s32(sum_1, vdupq_n_s32(act_min)); + sum_1 = vminq_s32(sum_1, vdupq_n_s32(act_max)); + + sum_2 = vmaxq_s32(sum_2, vdupq_n_s32(act_min)); + sum_2 = vminq_s32(sum_2, vdupq_n_s32(act_max)); + + int16x8_t temp = vdupq_n_s16(0); + temp = vmovnbq_s32(temp, sum_1); + temp = vmovntq_s32(temp, sum_2); + + vstrhq_p_s16(out, temp, ld_st_p); + + out += 8; + ch_count--; + src_base += 8; + } + } + } + src += batch_input; + dst += batch_output; + + batch_cnt--; + } + +#elif defined(ARM_MATH_DSP) + /* Run the following code for CPU's with DSP extension + */ + int32_t *buffer = (int32_t *)ctx->buf; + + if (buffer == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + while (batch_cnt) + { + + for (int i_y = 0, idx_y = -pad_y; i_y < output_y; idx_y += stride_y, i_y++) + { + for (int i_x = 0, idx_x = -pad_x; i_x < output_x; idx_x += stride_x, i_x++) + { + /* Condition for kernel start dimension: + (base_idx_ + kernel__start) >= 0 */ + const int32_t kernel_y_start = MAX(0, -idx_y); + const int32_t kernel_x_start = MAX(0, -idx_x); + + /* Condition for kernel end dimension: + (base_idx_ + kernel__end) < dim_src_ */ + const int32_t kernel_y_end = MIN(kernel_y, input_y - idx_y); + const int32_t kernel_x_end = MIN(kernel_x, input_x - idx_x); + + int count = 0; + + for (int k_y = kernel_y_start; k_y < kernel_y_end; k_y++) + { + for (int k_x = kernel_x_start; k_x < kernel_x_end; k_x++) + { + const int16_t *start = src + ch_src * (k_x + idx_x + (k_y + idx_y) * input_x); + + if (count == 0) + { + for (int i = 0; i < ch_src; i++) + { + buffer[i] = start[i]; + } + } + else + { + for (int i = 0; i < ch_src; i++) + { + buffer[i] = QADD(start[i], buffer[i]); + } + } + count++; + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + scale_q31_to_q15_and_clamp(buffer, dst, ch_src, count, act_min, act_max); + dst += ch_src; + } + } + src += batch_input; + + batch_cnt--; + } + +#else + /* Reference C code adapted from CMSIS-NN arm_avgpool_s8.c. + */ + const int32_t batch_output = output_x * output_y * ch_src; + (void)ctx; + + while (batch_cnt) + { + for (int i_y = 0, base_idx_y = -pad_y; i_y < output_y; base_idx_y += stride_y, i_y++) + { + for (int i_x = 0, base_idx_x = -pad_x; i_x < output_x; base_idx_x += stride_x, i_x++) + { + /* Condition for kernel start dimension: (base_idx_ + kernel__start) >= 0 */ + const int32_t ker_y_start = MAX(0, -base_idx_y); + const int32_t ker_x_start = MAX(0, -base_idx_x); + + /* Condition for kernel end dimension: (base_idx_ + kernel__end) < dim_src_ */ + const int32_t kernel_y_end = MIN(kernel_y, input_y - base_idx_y); + const int32_t kernel_x_end = MIN(kernel_x, input_x - base_idx_x); + + for (int i_ch_in = 0; i_ch_in < ch_src; i_ch_in++) + { + int sum = 0; + int count = 0; + + for (int k_y = ker_y_start; k_y < kernel_y_end; k_y++) + { + for (int k_x = ker_x_start; k_x < kernel_x_end; k_x++) + { + sum += src[i_ch_in + ch_src * (k_x + base_idx_x + (k_y + base_idx_y) * input_x)]; + count++; + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + sum = MAX(sum, act_min); + sum = MIN(sum, act_max); + + dst[i_ch_in + ch_src * (i_x + i_y * output_x)] = sum; + } + } + } + src += batch_input; + dst += batch_output; + + batch_cnt--; + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Pooling group + */ diff --git a/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_s8.c b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_s8.c new file mode 100644 index 0000000..388be99 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_avgpool_s8.c @@ -0,0 +1,389 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_avgpool_s8.c + * Description: Pooling function implementations + * + * $Date: 27 November 2023 + * $Revision: V.3.3.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) +static void scale_q31_to_q7_and_clamp(const int32_t *buffer, + int8_t *target, + int32_t length, + const int32_t count, + const int act_min, + const int act_max) +{ + const int half_count = count / 2; + + for (int i = 0; i < length; i++) + { + int32_t sum = buffer[i] > 0 ? (buffer[i] + half_count) : (buffer[i] - half_count); + sum = sum / count; + sum = MAX(sum, act_min); + sum = MIN(sum, act_max); + + target[i] = (int8_t)sum; + } +} +#endif + +/** + * @ingroup Public + */ + +/** + * @addtogroup Pooling + * @{ + */ + +/* + * s8 average pooling function + * + * Refer to header file for details. + * + */ + +#if defined(ARM_MATH_MVEI) + +arm_cmsis_nn_status arm_avgpool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int8_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int8_t *dst) +{ + (void)ctx; + const int32_t input_y = input_dims->h; + const int32_t input_x = input_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_x = output_dims->w; + const int32_t stride_y = pool_params->stride.h; + const int32_t stride_x = pool_params->stride.w; + const int32_t kernel_y = filter_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t pad_y = pool_params->padding.h; + const int32_t pad_x = pool_params->padding.w; + const int32_t act_min = pool_params->activation.min; + const int32_t act_max = pool_params->activation.max; + const int32_t ch_src = input_dims->c; + const int32_t batch_input = input_x * input_y * ch_src; + const int32_t batch_output = output_x * output_y * ch_src; + int32_t batch_cnt = input_dims->n; + + if (batch_cnt < 1) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + while (batch_cnt) + { + for (int i_y = 0; i_y < output_y; i_y++) + { + for (int i_x = 0; i_x < output_x; i_x++) + { + const int32_t k_y_start = MAX(0, i_y * stride_y - pad_y); + const int32_t k_y_end = MIN(i_y * stride_y - pad_y + kernel_y, input_y); + + const int32_t k_x_start = MAX(0, i_x * stride_x - pad_x); + const int32_t k_x_end = MIN(i_x * stride_x - pad_x + kernel_x, input_x); + + const int8_t *src_base = src; + int8_t *out = &dst[ch_src * (i_x + i_y * output_x)]; + + int32_t ch_count = (ch_src + 15) / 16; + int32_t channels = ch_src; + + while (ch_count > 0) + { + int8x16_t temp; + int16x8_t temp_lo, temp_hi; + int32x4_t temp_lo_lo, temp_lo_hi, temp_hi_lo, temp_hi_hi; + int32_t count = 0; + + int32x4_t sum_1 = vdupq_n_s32(0); + int32x4_t sum_2 = vdupq_n_s32(0); + int32x4_t sum_3 = vdupq_n_s32(0); + int32x4_t sum_4 = vdupq_n_s32(0); + // Load store tail predicate + const mve_pred16_t ld_st_p = vctp8q(channels); + channels -= 16; + + for (int k_y = k_y_start; k_y < k_y_end; k_y++) + { + for (int k_x = k_x_start; k_x < k_x_end; k_x++) + { + const int8_t *src_inner = src_base + (ch_src * (k_x + k_y * input_x)); + temp = vldrbq_z_s8(src_inner, ld_st_p); + + temp_lo = vmovlbq_s8(temp); + temp_hi = vmovltq_s8(temp); + + temp_lo_lo = vmovlbq_s16(temp_lo); + temp_lo_hi = vmovltq_s16(temp_lo); + + temp_hi_lo = vmovlbq_s16(temp_hi); + temp_hi_hi = vmovltq_s16(temp_hi); + + sum_1 = vaddq_s32(sum_1, temp_lo_lo); + sum_2 = vaddq_s32(sum_2, temp_lo_hi); + sum_3 = vaddq_s32(sum_3, temp_hi_lo); + sum_4 = vaddq_s32(sum_4, temp_hi_hi); + + count++; + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + // Perform the following operation + // sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + const int32_t half_count = count / 2; + // Predicate for 'sum > 0' operation + mve_pred16_t p = vcmpgtq_n_s32(sum_1, 0); + sum_1 = vaddq_m_n_s32(sum_1, sum_1, half_count, p); + sum_1 = vsubq_m_n_s32(sum_1, sum_1, half_count, ~p); + + p = vcmpgtq_n_s32(sum_2, 0); + sum_2 = vaddq_m_n_s32(sum_2, sum_2, half_count, p); + sum_2 = vsubq_m_n_s32(sum_2, sum_2, half_count, ~p); + + p = vcmpgtq_n_s32(sum_3, 0); + sum_3 = vaddq_m_n_s32(sum_3, sum_3, half_count, p); + sum_3 = vsubq_m_n_s32(sum_3, sum_3, half_count, ~p); + + p = vcmpgtq_n_s32(sum_4, 0); + sum_4 = vaddq_m_n_s32(sum_4, sum_4, half_count, p); + sum_4 = vsubq_m_n_s32(sum_4, sum_4, half_count, ~p); + + for (int i = 0; i < 4; i++) + { + sum_1[i] = sum_1[i] / count; + sum_2[i] = sum_2[i] / count; + sum_3[i] = sum_3[i] / count; + sum_4[i] = sum_4[i] / count; + } + + sum_1 = vmaxq_s32(sum_1, vdupq_n_s32(act_min)); + sum_1 = vminq_s32(sum_1, vdupq_n_s32(act_max)); + + sum_2 = vmaxq_s32(sum_2, vdupq_n_s32(act_min)); + sum_2 = vminq_s32(sum_2, vdupq_n_s32(act_max)); + + sum_3 = vmaxq_s32(sum_3, vdupq_n_s32(act_min)); + sum_3 = vminq_s32(sum_3, vdupq_n_s32(act_max)); + + sum_4 = vmaxq_s32(sum_4, vdupq_n_s32(act_min)); + sum_4 = vminq_s32(sum_4, vdupq_n_s32(act_max)); + + temp_lo = vmovnbq_s32(temp_lo, sum_1); + temp_lo = vmovntq_s32(temp_lo, sum_2); + + temp_hi = vmovnbq_s32(temp_hi, sum_3); + temp_hi = vmovntq_s32(temp_hi, sum_4); + + temp = vmovnbq_s16(temp, temp_lo); + temp = vmovntq_s16(temp, temp_hi); + + vstrbq_p_s8(out, temp, ld_st_p); + out += 16; + + ch_count--; + src_base += 16; + } + } + } + src += batch_input; + dst += batch_output; + + batch_cnt--; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +#else +arm_cmsis_nn_status arm_avgpool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int8_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int8_t *dst) +{ + const int32_t input_y = input_dims->h; + const int32_t input_x = input_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_x = output_dims->w; + const int32_t stride_y = pool_params->stride.h; + const int32_t stride_x = pool_params->stride.w; + const int32_t kernel_y = filter_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t pad_y = pool_params->padding.h; + const int32_t pad_x = pool_params->padding.w; + const int32_t act_min = pool_params->activation.min; + const int32_t act_max = pool_params->activation.max; + const int32_t ch_src = input_dims->c; + int32_t batch_cnt = input_dims->n; + + if (batch_cnt < 1) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (ctx->buf == NULL && arm_avgpool_s8_get_buffer_size(output_dims->w, input_dims->c)) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + #if defined(ARM_MATH_DSP) + /* Run the following code for CPU's with DSP extension + */ + const int32_t batch_size = input_x * input_y * ch_src; + int32_t *buffer = (int32_t *)ctx->buf; + + while (batch_cnt) + { + for (int i_y = 0, idx_y = -pad_y; i_y < output_y; idx_y += stride_y, i_y++) + { + for (int i_x = 0, idx_x = -pad_x; i_x < output_x; idx_x += stride_x, i_x++) + { + /* Condition for kernel start dimension: + (base_idx_ + kernel__start) >= 0 */ + const int32_t kernel_y_start = MAX(0, -idx_y); + const int32_t kernel_x_start = MAX(0, -idx_x); + + /* Condition for kernel end dimension: + (base_idx_ + kernel__end) < dim_src_ */ + const int32_t kernel_y_end = MIN(kernel_y, input_y - idx_y); + const int32_t kernel_x_end = MIN(kernel_x, input_x - idx_x); + + int count = 0; + + for (int k_y = kernel_y_start; k_y < kernel_y_end; k_y++) + { + for (int k_x = kernel_x_start; k_x < kernel_x_end; k_x++) + { + const int8_t *start = src + ch_src * (k_x + idx_x + (k_y + idx_y) * input_x); + + if (count == 0) + { + for (int i = 0; i < ch_src; i++) + { + buffer[i] = start[i]; + } + } + else + { + for (int i = 0; i < ch_src; i++) + { + buffer[i] = QADD(start[i], buffer[i]); + } + } + count++; + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + scale_q31_to_q7_and_clamp(buffer, dst, ch_src, count, act_min, act_max); + dst += ch_src; + } + } + src += batch_size; + + batch_cnt--; + } + + #else + + /* Reference C code adapted from CMSIS-NN arm_avepool_q7_HWC. + */ + const int32_t batch_input = input_x * input_y * ch_src; + const int32_t batch_output = output_x * output_y * ch_src; + + while (batch_cnt) + { + for (int i_y = 0; i_y < output_y; i_y++) + { + for (int i_x = 0; i_x < output_x; i_x++) + { + for (int i_ch_in = 0; i_ch_in < ch_src; i_ch_in++) + { + int sum = 0; + int count = 0; + for (int k_y = i_y * stride_y - pad_y; k_y < i_y * stride_y - pad_y + kernel_y; k_y++) + { + for (int k_x = i_x * stride_x - pad_x; k_x < i_x * stride_x - pad_x + kernel_x; k_x++) + { + if (k_y >= 0 && k_x >= 0 && k_y < input_y && k_x < input_x) + { + sum += src[i_ch_in + ch_src * (k_x + k_y * input_x)]; + count++; + } + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + sum = MAX(sum, act_min); + sum = MIN(sum, act_max); + + dst[i_ch_in + ch_src * (i_x + i_y * output_x)] = sum; + } + } + } + src += batch_input; + dst += batch_output; + + batch_cnt--; + } + + #endif + return ARM_CMSIS_NN_SUCCESS; +} + +#endif /* ARM_MATH_MVEI */ + +/** + * @} end of Pooling group + */ diff --git a/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_max_pool_s16.c b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_max_pool_s16.c new file mode 100644 index 0000000..36d1dbb --- /dev/null +++ b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_max_pool_s16.c @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_max_pool_s16.c + * Description: Pooling function implementations + * + * $Date: 27 November 2023 + * $Revision: V.2.2.0 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +static void compare_and_replace_if_larger(int16_t *base, const int16_t *target, int32_t length) +{ +#if defined(ARM_MATH_MVEI) + int32_t loop_count = (length + 7) / 8; + for (int i = 0; i < loop_count; i++) + { + mve_pred16_t p = vctp16q((uint32_t)length); + const int16x8_t op_1 = vldrhq_z_s16(base, p); + const int16x8_t op_2 = vldrhq_z_s16(target, p); + const int16x8_t max = vmaxq_s16(op_1, op_2); + vstrhq_p_s16(base, max, p); + base += 8; + target += 8; + length -= 8; + } +#else + int16_t *dst = base; + const int16_t *src = target; + union arm_nnword ref_max; + union arm_nnword comp_max; + int32_t cnt = length >> 1; + + while (cnt > 0l) + { + ref_max.word = arm_nn_read_s16x2(dst); + comp_max.word = arm_nn_read_q15x2_ia(&src); + + if (comp_max.half_words[0] > ref_max.half_words[0]) + { + ref_max.half_words[0] = comp_max.half_words[0]; + } + if (comp_max.half_words[1] > ref_max.half_words[1]) + { + ref_max.half_words[1] = comp_max.half_words[1]; + } + + arm_nn_write_q15x2_ia(&dst, ref_max.word); + + cnt--; + } + + if (length & 0x1) + { + if (*src > *dst) + { + *dst = *src; + } + } +#endif +} + +static void clamp_output(int16_t *source, int32_t length, const int16_t act_min, const int16_t act_max) +{ +#if defined(ARM_MATH_MVEI) + const int16x8_t min = vdupq_n_s16((int16_t)act_min); + const int16x8_t max = vdupq_n_s16((int16_t)act_max); + + int32_t loop_count = (length + 7) / 8; + for (int i = 0; i < loop_count; i++) + { + mve_pred16_t p = vctp16q((uint32_t)length); + length -= 8; + const int16x8_t src = vldrhq_z_s16(source, p); + int16x8_t res = vmaxq_x_s16(src, min, p); + res = vminq_x_s16(res, max, p); + vstrhq_p_s16(source, res, p); + source += 8; + } +#else + union arm_nnword in; + int32_t cnt = length >> 1; + + while (cnt > 0l) + { + in.word = arm_nn_read_s16x2(source); + + in.half_words[0] = MAX(in.half_words[0], act_min); + in.half_words[0] = MIN(in.half_words[0], act_max); + in.half_words[1] = MAX(in.half_words[1], act_min); + in.half_words[1] = MIN(in.half_words[1], act_max); + + arm_nn_write_q15x2_ia(&source, in.word); + cnt--; + } + + if (length & 0x1) + { + int16_t comp = *source; + comp = MAX(comp, act_min); + comp = MIN(comp, act_max); + *source = comp; + } +#endif +} + +/** + * @ingroup Public + */ + +/** + * @addtogroup Pooling + * @{ + */ + +/* + * Optimized s16 max pooling function + * + * Refer to header file for details. + * + */ + +arm_cmsis_nn_status arm_max_pool_s16(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int16_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int16_t *dst) +{ + (void)ctx; + const int32_t input_y = input_dims->h; + const int32_t input_x = input_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_x = output_dims->w; + const int32_t stride_y = pool_params->stride.h; + const int32_t stride_x = pool_params->stride.w; + const int32_t kernel_y = filter_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t pad_y = pool_params->padding.h; + const int32_t pad_x = pool_params->padding.w; + const int16_t act_min = pool_params->activation.min; + const int16_t act_max = pool_params->activation.max; + const int32_t channel_in = input_dims->c; + const int32_t batch_size = input_x * input_y * channel_in; + int32_t batch_cnt = input_dims->n; + + if (batch_cnt < 1) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + while (batch_cnt) + { + + int16_t *dst_base = dst; + + for (int i_y = 0, base_idx_y = -pad_y; i_y < output_y; base_idx_y += stride_y, i_y++) + { + for (int i_x = 0, base_idx_x = -pad_x; i_x < output_x; base_idx_x += stride_x, i_x++) + { + /* Condition for kernel start dimension: (base_idx_ + kernel__start) >= 0 */ + const int32_t ker_y_start = MAX(0, -base_idx_y); + const int32_t ker_x_start = MAX(0, -base_idx_x); + + /* Condition for kernel end dimension: (base_idx_ + kernel__end) < dim_src_ */ + const int32_t kernel_y_end = MIN(kernel_y, input_y - base_idx_y); + const int32_t kernel_x_end = MIN(kernel_x, input_x - base_idx_x); + + int count = 0; + + for (int k_y = ker_y_start; k_y < kernel_y_end; k_y++) + { + for (int k_x = ker_x_start; k_x < kernel_x_end; k_x++) + { + const int16_t *start = src + channel_in * (k_x + base_idx_x + (k_y + base_idx_y) * input_x); + + if (count == 0) + { + memcpy(dst, start, channel_in * sizeof(int16_t)); + count++; + } + else + { + compare_and_replace_if_larger(dst, start, channel_in); + } + } + } + /* 'count' is expected to be non-zero here. */ + dst += channel_in; + } + } + + clamp_output(dst_base, output_x * output_y * channel_in, act_min, act_max); + + src += batch_size; + batch_cnt--; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Pooling group + */ diff --git a/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_max_pool_s8.c b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_max_pool_s8.c new file mode 100644 index 0000000..2dd4b49 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/PoolingFunctions/arm_max_pool_s8.c @@ -0,0 +1,243 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_max_pool_s8.c + * Description: Pooling function implementations + * + * $Date: 27 November 2023 + * $Revision: V.3.1.0 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +static void compare_and_replace_if_larger_q7(int8_t *base, const int8_t *target, int32_t length) +{ +#if defined(ARM_MATH_MVEI) + int32_t loop_count = (length + 15) / 16; + for (int i = 0; i < loop_count; i++) + { + mve_pred16_t p = vctp8q((uint32_t)length); + const int8x16_t op_1 = vldrbq_z_s8(base, p); + const int8x16_t op_2 = vldrbq_z_s8(target, p); + const int8x16_t max = vmaxq_x_s8(op_1, op_2, p); + vstrbq_p_s8(base, max, p); + base += 16; + target += 16; + length -= 16; + } +#else + int8_t *dst = base; + const int8_t *src = target; + union arm_nnword ref_max; + union arm_nnword comp_max; + int32_t cnt = length >> 2; + + while (cnt > 0l) + { + ref_max.word = arm_nn_read_s8x4(dst); + comp_max.word = arm_nn_read_s8x4_ia(&src); + + if (comp_max.bytes[0] > ref_max.bytes[0]) + { + ref_max.bytes[0] = comp_max.bytes[0]; + } + if (comp_max.bytes[1] > ref_max.bytes[1]) + { + ref_max.bytes[1] = comp_max.bytes[1]; + } + if (comp_max.bytes[2] > ref_max.bytes[2]) + { + ref_max.bytes[2] = comp_max.bytes[2]; + } + if (comp_max.bytes[3] > ref_max.bytes[3]) + { + ref_max.bytes[3] = comp_max.bytes[3]; + } + + arm_nn_write_s8x4_ia(&dst, ref_max.word); + + cnt--; + } + + cnt = length & 0x3; + while (cnt > 0l) + { + if (*src > *dst) + { + *dst = *src; + } + dst++; + src++; + cnt--; + } +#endif +} + +static void clamp_output(int8_t *source, int32_t length, const int32_t act_min, const int32_t act_max) +{ +#if defined(ARM_MATH_MVEI) + int32_t loop_count = (length + 15) / 16; + const int8x16_t vmin = vdupq_n_s8((int8_t)act_min); + const int8x16_t vmax = vdupq_n_s8((int8_t)act_max); + + for (int i = 0; i < loop_count; i++) + { + mve_pred16_t p = vctp8q((uint32_t)length); + length -= 16; + const int8x16_t src = vldrbq_z_s8(source, p); + int8x16_t res = vmaxq_x_s8(src, vmin, p); + res = vminq_x_s8(res, vmax, p); + vstrbq_p_s8(source, res, p); + source += 16; + } +#else + union arm_nnword in; + int32_t cnt = length >> 2; + + while (cnt > 0l) + { + in.word = arm_nn_read_s8x4(source); + + in.bytes[0] = MAX(in.bytes[0], act_min); + in.bytes[0] = MIN(in.bytes[0], act_max); + in.bytes[1] = MAX(in.bytes[1], act_min); + in.bytes[1] = MIN(in.bytes[1], act_max); + in.bytes[2] = MAX(in.bytes[2], act_min); + in.bytes[2] = MIN(in.bytes[2], act_max); + in.bytes[3] = MAX(in.bytes[3], act_min); + in.bytes[3] = MIN(in.bytes[3], act_max); + + arm_nn_write_s8x4_ia(&source, in.word); + cnt--; + } + + cnt = length & 0x3; + while (cnt > 0l) + { + int32_t comp = *source; + comp = MAX(comp, act_min); + comp = MIN(comp, act_max); + *source++ = (int8_t)comp; + cnt--; + } +#endif +} + +/** + * @ingroup Public + */ + +/** + * @addtogroup Pooling + * @{ + */ + +/* + * Optimized s8 max pooling function + * + * Refer to header file for details. + * + */ +arm_cmsis_nn_status arm_max_pool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int8_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int8_t *dst) +{ + (void)ctx; + const int32_t input_y = input_dims->h; + const int32_t input_x = input_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_x = output_dims->w; + const int32_t stride_y = pool_params->stride.h; + const int32_t stride_x = pool_params->stride.w; + const int32_t kernel_y = filter_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t pad_y = pool_params->padding.h; + const int32_t pad_x = pool_params->padding.w; + const int32_t act_min = pool_params->activation.min; + const int32_t act_max = pool_params->activation.max; + const int32_t channel_in = input_dims->c; + const int32_t batch_size = input_x * input_y * channel_in; + int32_t batch_cnt = input_dims->n; + + if (batch_cnt < 1) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + while (batch_cnt) + { + int8_t *dst_base = dst; + + for (int i_y = 0, base_idx_y = -pad_y; i_y < output_y; base_idx_y += stride_y, i_y++) + { + for (int i_x = 0, base_idx_x = -pad_x; i_x < output_x; base_idx_x += stride_x, i_x++) + { + /* Condition for kernel start dimension: (base_idx_ + kernel__start) >= 0 */ + const int32_t ker_y_start = MAX(0, -base_idx_y); + const int32_t ker_x_start = MAX(0, -base_idx_x); + + /* Condition for kernel end dimension: (base_idx_ + kernel__end) < dim_src_ */ + const int32_t kernel_y_end = MIN(kernel_y, input_y - base_idx_y); + const int32_t kernel_x_end = MIN(kernel_x, input_x - base_idx_x); + + int count = 0; + + for (int k_y = ker_y_start; k_y < kernel_y_end; k_y++) + { + for (int k_x = ker_x_start; k_x < kernel_x_end; k_x++) + { + const int8_t *start = src + channel_in * (k_x + base_idx_x + (k_y + base_idx_y) * input_x); + + if (count == 0) + { + arm_memcpy_s8(dst, start, channel_in); + count++; + } + else + { + compare_and_replace_if_larger_q7(dst, start, channel_in); + } + } + } + /* 'count' is expected to be non-zero here. */ + dst += channel_in; + } + } + + clamp_output(dst_base, output_x * output_y * channel_in, act_min, act_max); + + src += batch_size; + batch_cnt--; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Pooling group + */ diff --git a/src/third_party/cmsis_nn/Source/ReshapeFunctions/arm_reshape_s8.c b/src/third_party/cmsis_nn/Source/ReshapeFunctions/arm_reshape_s8.c new file mode 100644 index 0000000..8d4ff3e --- /dev/null +++ b/src/third_party/cmsis_nn/Source/ReshapeFunctions/arm_reshape_s8.c @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_reshape_s8.c + * Description: Reshape a s8 vector + * + * $Date: 26 October 2022 + * $Revision: V.1.0.2 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup Reshape + * @{ + */ + +/* + * Basic s8 reshape function. + * + * Refer header file for details. + * + */ + +void arm_reshape_s8(const int8_t *input, int8_t *output, const uint32_t total_size) +{ + arm_memcpy_s8(output, input, total_size); +} + +/** + * @} end of Reshape group + */ diff --git a/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_get_buffer_sizes_s8.c b/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_get_buffer_sizes_s8.c new file mode 100644 index 0000000..930d48c --- /dev/null +++ b/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_get_buffer_sizes_s8.c @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_svdf_get_buffer_sizes_s8.c + * Description: Collection of get buffer size functions for svdf s8 layer function. + * + * $Date: 5 September 2023 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" + +/** + * @ingroup SVDF + */ + +/** + * @addtogroup GetBufferSizeSVDF + * @{ + */ + +int32_t arm_svdf_s8_get_buffer_size_dsp(const cmsis_nn_dims *weights_feature_dims) +{ + (void)weights_feature_dims; + return 0; +} + +int32_t arm_svdf_s8_get_buffer_size_mve(const cmsis_nn_dims *weights_feature_dims) +{ + return weights_feature_dims->n * sizeof(int32_t); +} + +int32_t arm_svdf_s8_get_buffer_size(const cmsis_nn_dims *weights_feature_dims) +{ +#if defined(ARM_MATH_MVEI) + return arm_svdf_s8_get_buffer_size_mve(weights_feature_dims); +#else + return arm_svdf_s8_get_buffer_size_dsp(weights_feature_dims); +#endif +} + +/** + * @} end of GetBufferSizeSVDF group + */ diff --git a/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_s8.c b/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_s8.c new file mode 100644 index 0000000..cfe24b9 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_s8.c @@ -0,0 +1,289 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_svdf_s8.c + * Description: S8 basic SVDF layer function + * + * $Date: 24 Sep 2024 + * $Revision: V.6.1.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup SVDF + * @{ + */ + +/* + * S8 SVDF layer function for TensorFlow Lite with 8 bit state tensor + * + * Refer to header file for details. + * + */ + +arm_cmsis_nn_status arm_svdf_s8(const cmsis_nn_context *ctx, + const cmsis_nn_context *input_ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_svdf_params *svdf_params, + const cmsis_nn_per_tensor_quant_params *input_quant_params, + const cmsis_nn_per_tensor_quant_params *output_quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *state_dims, + int8_t *state_data, + const cmsis_nn_dims *weights_feature_dims, + const int8_t *weights_feature_data, + const cmsis_nn_dims *weights_time_dims, + const int8_t *weights_time_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)bias_dims; + (void)state_dims; + (void)output_dims; + +#if defined(ARM_MATH_MVEI) + if (ctx->buf == NULL) + { + return (ARM_CMSIS_NN_ARG_ERROR); + } +#endif + + const int32_t multiplier_in = input_quant_params->multiplier; + const int32_t shift_in = input_quant_params->shift; + const int32_t multiplier_out = output_quant_params->multiplier; + const int32_t shift_2 = output_quant_params->shift; + const int32_t zp_in = svdf_params->input_offset; + const int32_t zp_out = svdf_params->output_offset; + const int32_t in_activation_min = svdf_params->input_activation.min; + const int32_t in_activation_max = svdf_params->input_activation.max; + const int32_t out_activation_min = svdf_params->output_activation.min; + const int32_t out_activation_max = svdf_params->output_activation.max; + const int16_t rank = svdf_params->rank; + + const int32_t input_batches = input_dims->n; + const int32_t input_height = input_dims->h; + const int32_t feature_batches = weights_feature_dims->n; + const int32_t time_batches = weights_time_dims->h; + const int32_t unit_count = feature_batches / rank; + + if (input_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + int32_t *buffer_a = (int32_t *)input_ctx->buf; + + if (output_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + int32_t *buffer_b = (int32_t *)output_ctx->buf; + + int32_t *kernel_sum_data = (int32_t *)ctx->buf; + + // Left shift state + // Using memcpy on overlapping data is in general undefined behaviour, but since the behaviour of arm_memcpy_s8 is + // known it is certain that the data has been copied before it is overwritten in this case. +#ifdef ARM_MATH_MVEI + arm_memcpy_s8(state_data, + state_data + 1, + (size_t)((input_batches * feature_batches * time_batches - 1) * (int32_t)sizeof(int8_t))); +#else + memmove(state_data, + state_data + 1, + (size_t)((input_batches * feature_batches * time_batches - 1) * (int32_t)sizeof(int8_t))); +#endif + + // Matrix multiplication input * feature weight + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + int8_t *res_ptr = state_data + (time_batches * i_batch * feature_batches) + (time_batches - 1); + const int8_t *input = input_data + i_batch * input_height; + + arm_cmsis_nn_status res = arm_nn_vec_mat_mult_t_s8(input, + weights_feature_data, + kernel_sum_data, + NULL, + res_ptr, + -zp_in, + 0, + multiplier_in, + shift_in, + input_height, + feature_batches, + in_activation_min, + in_activation_max, + time_batches, + 0); + + if (res != ARM_CMSIS_NN_SUCCESS) + { + return res; + } + } + + // Matrix multiplicate time weight * state tensors + { + int32_t *ptr_a = buffer_a; + const int8_t *v2 = state_data; + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + const int8_t *v1 = weights_time_data; + + for (int i_feature_batch = 0; i_feature_batch < feature_batches; i_feature_batch++) + { + *ptr_a = 0; + int32_t sum = 0; +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + // Perform matrix multiplication in blocks of four + int j = 0; + int32_t block_count = time_batches >> 2; + for (int i = 0; i < block_count; i++) + { + j += 4; + + int32_t r1_1, r1_2, r2_1, r2_2; + v1 = read_and_pad_reordered(v1, &r1_1, &r1_2); + v2 = read_and_pad_reordered(v2, &r2_1, &r2_2); + sum = SMLAD(r1_1, r2_1, sum); + sum = SMLAD(r1_2, r2_2, sum); + } + + // Process the remaining data + for (; j < time_batches; j++) + { + sum += *v1 * *v2; + v1++; + v2++; + } +#else + for (int j = 0; j < time_batches; j++) + { + sum += *v1 * *v2; + v1++; + v2++; + } +#endif + + *ptr_a = sum; + ptr_a++; + } + } + } + + if (bias_data) + { + if (unit_count == feature_batches) + { + for (int i = 0; i < input_batches; i++) + { + int32_t *output_temp = buffer_b + i * feature_batches; + const int32_t *ptr_a = buffer_a + i * feature_batches; + + const int32_t *bi = bias_data; + for (int j = 0; j < feature_batches; j++) + { + output_temp[j] = ptr_a[j] + bi[j]; + } + } + } + else + { + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + int32_t *output_data_temp = buffer_b + i_batch * unit_count; + int32_t *ptr_a = buffer_a + i_batch * feature_batches; + + for (int i = 0; i < unit_count; i++) + { + int32_t sum = bias_data[i]; + for (int j = 0; j < rank; j++) + { + sum += *ptr_a; + ptr_a++; + } + output_data_temp[i] = sum; + } + } + } + } + else + { + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + int32_t *output_data_temp = buffer_b + i_batch * unit_count; + int32_t *ptr_a = buffer_a + i_batch * feature_batches; + + for (int i = 0; i < unit_count; i++) + { + int32_t sum = 0; + for (int j = 0; j < rank; j++) + { + sum += *ptr_a; + ptr_a++; + } + output_data_temp[i] = sum; + } + } + } + +#if defined(ARM_MATH_MVEI) + int32_t num_elements = input_batches * unit_count; + const int32_t loop_count = (num_elements + 3) / 4; + for (int i_op = 0; i_op < loop_count; i_op++) + { + mve_pred16_t p = vctp32q((uint32_t)num_elements); + int32x4_t op = vldrwq_z_s32(buffer_b, p); + op = arm_requantize_mve(op, multiplier_out, shift_2); + op = vaddq_n_s32(op, zp_out); + const int32x4_t min_vec = vdupq_n_s32((int8_t)out_activation_min); + const int32x4_t max_vec = vdupq_n_s32((int8_t)out_activation_max); + op = vmaxq_s32(op, min_vec); + op = vminq_s32(op, max_vec); + vstrbq_p_s32(output_data, op, p); + output_data += 4; + buffer_b += 4; + num_elements -= 4; + } +#else + for (int i = 0; i < input_batches * unit_count; i++) + { + output_data[i] = (int8_t)CLAMP( + arm_nn_requantize(buffer_b[i], multiplier_out, shift_2) + zp_out, out_activation_max, out_activation_min); + } +#endif + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of SVDF group + */ diff --git a/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_state_s16_s8.c b/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_state_s16_s8.c new file mode 100644 index 0000000..81deb58 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/SVDFunctions/arm_svdf_state_s16_s8.c @@ -0,0 +1,274 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_svdf_state_s16_s8.c + * Description: S8 basic SVDF layer function with s16 state tensor + * + * $Date: 24 Sep 2024 + * $Revision: V.3.1.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup SVDF + * @{ + */ + +/* + * S8 SVDF layer function for TensorFlow Lite with 16 bit state tensor + * + * Refer to header file for details. + * + */ + +arm_cmsis_nn_status arm_svdf_state_s16_s8(const cmsis_nn_context *input_ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_svdf_params *svdf_params, + const cmsis_nn_per_tensor_quant_params *input_quant_params, + const cmsis_nn_per_tensor_quant_params *output_quant_params, + const cmsis_nn_dims *input_dims, + const int8_t *input_data, + const cmsis_nn_dims *state_dims, + int16_t *state_data, + const cmsis_nn_dims *weights_feature_dims, + const int8_t *weights_feature_data, + const cmsis_nn_dims *weights_time_dims, + const int16_t *weights_time_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + int8_t *output_data) +{ + (void)bias_dims; + (void)state_dims; + (void)output_dims; + + const int32_t multiplier_in = input_quant_params->multiplier; + const int32_t shift_in = input_quant_params->shift; + const int32_t multiplier_out = output_quant_params->multiplier; + const int32_t shift_2 = output_quant_params->shift; + const int32_t zp_in = svdf_params->input_offset; + const int32_t zp_out = svdf_params->output_offset; + const int32_t in_activation_min = svdf_params->input_activation.min; + const int32_t in_activation_max = svdf_params->input_activation.max; + const int32_t out_activation_min = svdf_params->output_activation.min; + const int32_t out_activation_max = svdf_params->output_activation.max; + const int16_t rank = svdf_params->rank; + + const int32_t input_batches = input_dims->n; + const int32_t input_height = input_dims->h; + const int32_t feature_batches = weights_feature_dims->n; + const int32_t time_batches = weights_time_dims->h; + const int32_t unit_count = feature_batches / rank; + + if (input_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + int32_t *buffer_a = (int32_t *)input_ctx->buf; + + if (output_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + int32_t *buffer_b = (int32_t *)output_ctx->buf; + + // Left shift state + // Using memcpy on overlapping data is in general undefined behaviour, but since the behaviour of arm_memcpy_s8 is + // known it is certain that the data has been copied before it is overwritten in this case. +#ifdef ARM_MATH_MVEI + arm_memcpy_s8((int8_t *)state_data, + (int8_t *)(state_data + 1), + (size_t)((input_batches * feature_batches * time_batches - 1) * (int32_t)sizeof(int16_t))); +#else + memmove(state_data, + state_data + 1, + (size_t)((input_batches * feature_batches * time_batches - 1) * (int32_t)sizeof(int16_t))); +#endif + + // Matrix multiplication input * feature weight + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + int16_t *res_ptr = state_data + (time_batches * i_batch * feature_batches) + (time_batches - 1); + const int8_t *weight = weights_feature_data; + const int8_t *input = input_data + i_batch * input_height; + + arm_cmsis_nn_status res = arm_nn_vec_mat_mult_t_svdf_s8(input, + weight, + res_ptr, + -zp_in, + time_batches, + multiplier_in, + shift_in, + input_height, + feature_batches, + in_activation_min, + in_activation_max); + + if (res != ARM_CMSIS_NN_SUCCESS) + { + return res; + } + } + + { + // Matrix multiplication time weight * state tensors + int32_t *ptr_a = buffer_a; + const int16_t *v2 = state_data; + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + const int16_t *v1 = weights_time_data; + + for (int i_feature_batch = 0; i_feature_batch < feature_batches; i_feature_batch++) + { + *ptr_a = 0; + int32_t sum = 0; +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + // Perform matrix multiplication in blocks of two + int j = 0; + int32_t block_count = time_batches >> 1; + for (int i = 0; i < block_count; i++) + { + j += 2; + int32_t r1 = arm_nn_read_q15x2_ia(&v1); + int32_t r2 = arm_nn_read_q15x2_ia(&v2); + + sum = SMLAD(r1, r2, sum); + } + + // Process the remaining data + for (; j < time_batches; j++) + { + sum += *v1 * *v2; + v1++; + v2++; + } +#else + for (int j = 0; j < time_batches; j++) + { + sum += *v1 * *v2; + v1++; + v2++; + } +#endif + + *ptr_a = sum; + ptr_a++; + } + } + } + + if (bias_data) + { + if (unit_count == feature_batches) + { + for (int i = 0; i < input_batches; i++) + { + int32_t *output_temp = buffer_b + i * feature_batches; + const int32_t *ptr_a = buffer_a + i * feature_batches; + + const int32_t *bi = bias_data; + for (int j = 0; j < feature_batches; j++) + { + output_temp[j] = ptr_a[j] + bi[j]; + } + } + } + else + { + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + int32_t *output_data_temp = buffer_b + i_batch * unit_count; + int32_t *ptr_a = buffer_a + i_batch * feature_batches; + + for (int i = 0; i < unit_count; i++) + { + int32_t sum = bias_data[i]; + for (int j = 0; j < rank; j++) + { + sum += *ptr_a; + ptr_a++; + } + output_data_temp[i] = sum; + } + } + } + } + else + { + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + int32_t *output_data_temp = buffer_b + i_batch * unit_count; + int32_t *ptr_a = buffer_a + i_batch * feature_batches; + + for (int i = 0; i < unit_count; i++) + { + int32_t sum = 0; + for (int j = 0; j < rank; j++) + { + sum += *ptr_a; + ptr_a++; + } + output_data_temp[i] = sum; + } + } + } + +#if defined(ARM_MATH_MVEI) + int32_t num_elements = input_batches * unit_count; + const int32_t loop_count = (num_elements + 3) / 4; + for (int i_op = 0; i_op < loop_count; i_op++) + { + mve_pred16_t p = vctp32q((uint32_t)num_elements); + int32x4_t op = vldrwq_z_s32(buffer_b, p); + op = arm_requantize_mve(op, multiplier_out, shift_2); + op = vaddq_n_s32(op, zp_out); + const int32x4_t min_vec = vdupq_n_s32((int8_t)out_activation_min); + const int32x4_t max_vec = vdupq_n_s32((int8_t)out_activation_max); + op = vmaxq_s32(op, min_vec); + op = vminq_s32(op, max_vec); + vstrbq_p_s32(output_data, op, p); + output_data += 4; + buffer_b += 4; + num_elements -= 4; + } +#else + for (int i = 0; i < input_batches * unit_count; i++) + { + output_data[i] = (int8_t)CLAMP( + arm_nn_requantize(buffer_b[i], multiplier_out, shift_2) + zp_out, out_activation_max, out_activation_min); + } +#endif + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of SVDF group + */ diff --git a/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c new file mode 100644 index 0000000..6d73402 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_softmax_common_s8.c + * Description: Softmax with s8 input and output of s8 or s16. + * + * $Date: 5 January 2023 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +#define ACCUM_BITS 12 + +/** + * @ingroup groupSupport + */ + +/** + * @defgroup supportSoftmax Softmax + * + * Support functions for Softmax + * + */ + +/** + * @addtogroup supportSoftmax + * @{ + */ + +/* + * Softmax function with s8 input and output of s8 or s16. + * + * Refer header file for details. + * + */ +void arm_nn_softmax_common_s8(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + const bool int16_output, + void *output) +{ + const int32_t mask = (1 << shift); + + int32_t col = 0; + int32_t row_idx; + + for (row_idx = 0; row_idx < num_rows; ++row_idx) + { + // Find the maximum value in order to ensure numerical stability + int8_t max = *input; + + for (col = 1; col < row_size; ++col) + { + max = MAX(max, input[col]); + } + + int32_t diff = 0; + int32_t sum = 0; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + if (diff >= diff_min) + { + sum += DIV_POW2(EXP_ON_NEG(MUL_SAT(diff * mask, mult)), ACCUM_BITS); + } + } + + const int32_t headroom = CLZ(sum); + const int32_t shifted_scale = ONE_OVER1((sum > 0 ? sum << headroom : 0) - (1 << 31)); + int32_t bits_over_unit; + + if (int16_output) + { + int16_t *output_s16 = (int16_t *)output + row_idx * row_size; + + bits_over_unit = ACCUM_BITS - headroom + 15; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + + if (diff >= diff_min) + { + const int32_t res = + DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) + + NN_Q15_MIN; + output_s16[col] = (int16_t)CLAMP(res, (int32_t)NN_Q15_MAX, (int32_t)NN_Q15_MIN); + } + else + { + output_s16[col] = NN_Q15_MIN; + } + } + } + else + { + int8_t *output_s8 = (int8_t *)output + row_idx * row_size; + + bits_over_unit = ACCUM_BITS - headroom + 23; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + if (diff >= diff_min) + { + const int32_t res = + DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) + + NN_Q7_MIN; + output_s8[col] = (int8_t)CLAMP(res, (int32_t)NN_Q7_MAX, (int32_t)NN_Q7_MIN); + } + else + { + output_s8[col] = NN_Q7_MIN; + } + } + } + + input += row_size; + } +} + +/** + * @} end of Doxygen group + */ diff --git a/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s16.c b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s16.c new file mode 100644 index 0000000..a132e96 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s16.c @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright 2022-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_softmax_s16.c + * Description: S16 softmax function + * + * $Date: 5 January 2023 + * $Revision: V.2.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @addtogroup Softmax + * @{ + */ + +arm_cmsis_nn_status arm_softmax_s16(const int16_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const cmsis_nn_softmax_lut_s16 *softmax_params, + int16_t *output) +{ + int32_t col = 0; + int32_t row_idx; + + if (softmax_params->exp_lut == NULL || softmax_params->one_by_one_lut == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + for (row_idx = 0; row_idx < num_rows; ++row_idx) + { + // Find the maximum value in order to ensure numerical stability + int16_t max = *input; + for (col = 1; col < row_size; ++col) + { + max = MAX(max, input[col]); + } + + int32_t diff = 0; + int32_t sum = 0; + int16_t *cached_exp_results = output; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + const int32_t scaled_diff = arm_nn_requantize(diff, mult, shift); + const int32_t symmetric_scaled_diff = scaled_diff + NN_Q15_MAX; + const int16_t saturated_symmetric_scaled_diff = MIN(MAX(symmetric_scaled_diff, NN_Q15_MIN), NN_Q15_MAX); + + // Lookup from exp table and cache result for next step + const int16_t index = (256 + (saturated_symmetric_scaled_diff >> 7)); + const int16_t offset = saturated_symmetric_scaled_diff & 0x7f; + const int16_t base = softmax_params->exp_lut[index]; + const int16_t slope = softmax_params->exp_lut[index + 1] - softmax_params->exp_lut[index]; + const int16_t delta = (slope * offset + 64) >> 7; + const int16_t result = (base + delta); + cached_exp_results[col] = result; + + sum += cached_exp_results[col]; + } + + const int32_t headroom = CLZ(sum); + + // Compute the reciprocal 1/sum + const int32_t shifted_sum = (((sum) << (headroom - 1)) + (1 << 13)) >> 14; + + // Since LUT computes 1/(1 + x), compute x = (sum - 1) => -65536 + // Since LUT expects a symmetrical input, recenter from [UINT16_MIN, UINT16_MAX] to [INT16_MIN, INT16_MAX] => + // -32768 ==> So in total -65536 -32768 => -98304 + const int16_t symmetric_shifted_sum = shifted_sum - 98304; + + // Lookup from one by one table + const int16_t index = (256 + (symmetric_shifted_sum >> 7)); + const int16_t offset = symmetric_shifted_sum & 0x7f; + const int16_t base = softmax_params->one_by_one_lut[index]; + const int16_t slope = softmax_params->one_by_one_lut[index + 1] - softmax_params->one_by_one_lut[index]; + const int16_t delta = (slope * offset + 64) >> 7; + const int16_t one_by_one_result = (base + delta); + + for (col = 0; col < row_size; ++col) + { + const int16_t right_shift = 30 - headroom; + int32_t result = (cached_exp_results[col] * one_by_one_result) >> right_shift; + result = (result + 1) >> 1; // Last shift position and insert round + output[col] = (int16_t)result; + } + + output += row_size; + input += row_size; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Softmax group + */ diff --git a/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s8.c b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s8.c new file mode 100644 index 0000000..d49e0dc --- /dev/null +++ b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s8.c @@ -0,0 +1,215 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_softmax_s8.c + * Description: S8 softmax function + * + * $Date: 5 January 2023 + * $Revision: V.2.2.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +#define ACCUM_BITS 12 + +#if defined(ARM_MATH_MVEI) && !defined(ARM_GCC_12_2_ICE) +static int32x4_t arm_exp_on_negative_values_mve_32x4(int32x4_t val) +{ + #define SHIFT_START (24) + int32_t shift = SHIFT_START; + int32x4_t mask; + + const int32x4_t val_mod_minus_quarter = + vandq_s32(val, vdupq_n_s32((1 << SHIFT_START) - 1)) - vdupq_n_s32(1 << SHIFT_START); + const int32x4_t remainder = vsubq_s32(val_mod_minus_quarter, val); + const int32x4_t x = vaddq_n_s32(val_mod_minus_quarter << 5, 1 << 28); + const int32x4_t x2 = MUL_SAT_MVE(x, x); + const int32x4_t op_1 = DIV_POW2_MVE(MUL_SAT_MVE(x2, x2), 2) + MUL_SAT_MVE(x2, x); + const int32x4_t op_2 = x + DIV_POW2_MVE(MUL_SAT_MVE(op_1, vdupq_n_s32(715827883)) + x2, 1); + int32x4_t result = vdupq_n_s32(1895147668) + MUL_SAT_MVE(vdupq_n_s32(1895147668), op_2); + + #define SELECT_IF_NON_ZERO(x) \ + { \ + mve_pred16_t p = vcmpneq_n_s32(remainder & vdupq_n_s32(1 << shift++), 0); \ + mask = vmvnq_m_s32(vdupq_n_s32(0), vdupq_n_s32(0), p); \ + result = SELECT_USING_MASK(mask, MUL_SAT_MVE(result, vdupq_n_s32(x)), result); \ + } + + SELECT_IF_NON_ZERO(1672461947) + SELECT_IF_NON_ZERO(1302514674) + SELECT_IF_NON_ZERO(790015084) + SELECT_IF_NON_ZERO(290630308) + SELECT_IF_NON_ZERO(39332535) + SELECT_IF_NON_ZERO(720401) + SELECT_IF_NON_ZERO(242) + + #undef SELECT_IF_NON_ZERO + + mve_pred16_t p = vcmpeqq_n_s32(val, 0); + mask = vmvnq_m_s32(vdupq_n_s32(0), vdupq_n_s32(0), p); + + result = SELECT_USING_MASK(mask, vdupq_n_s32(NN_Q31_MAX), result); + return result; +} +#endif + +/** + * @ingroup Public + */ + +/** + * @addtogroup Softmax + * @{ + */ + +void arm_softmax_s8(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int8_t *output) +{ +#if defined(ARM_MATH_MVEI) && !defined(ARM_GCC_12_2_ICE) + + #define ACT_MIN ((int8_t)NN_Q7_MIN) + #define ACT_MAX ((int8_t)NN_Q7_MAX) + + const int32_t mask = (1 << shift); + + for (int i_num_rows = 0; i_num_rows < num_rows; ++i_num_rows) + { + int8_t max = ACT_MIN; + + int32_t vec_count = (row_size + 15) / 16; + uint32_t r_count = (uint32_t)row_size; + for (int i = 0; i < vec_count; i++) + { + mve_pred16_t p = vctp8q(r_count); + const int8x16_t ip = vldrbq_z_s8(&input[i * 16], p); + max = vmaxvq_p_s8(max, ip, p); + r_count -= 16; + } + + vec_count = row_size / 4; + int32_t idx = 0; + int32_t sum = 0; + + while (vec_count) + { + int32x4_t ip = vldrbq_s32(&input[idx * 4]); + ip = vsubq_n_s32(ip, max); + mve_pred16_t p = vcmpgeq_n_s32(ip, diff_min); + if (p != 0) + { + ip = vmulq_n_s32(ip, mask); + + int32x4_t res = MUL_SAT_MVE(ip, vdupq_n_s32(mult)); + + res = arm_exp_on_negative_values_mve_32x4(res); + res = DIV_POW2_MVE(res, ACCUM_BITS); + res = vpselq_s32(res, vdupq_n_s32(0), p); + sum += vaddvq_s32(res); + } + + vec_count--; + idx++; + } + + const int32_t tail_idx = row_size & ~3; + for (int i = 0; i < (row_size & 3); i++) + { + const int32_t diff = input[tail_idx + i] - max; + if (diff >= diff_min) + { + sum += DIV_POW2(EXP_ON_NEG(MUL_SAT(diff * mask, mult)), ACCUM_BITS); + } + } + + const int32_t headroom = CLZ((uint32_t)sum); + const int32_t bits_over_unit = ACCUM_BITS - headroom + 23; + const int32_t shifted_scale = ONE_OVER1((sum > 0 ? sum << headroom : 0) - (1 << 31)); + + vec_count = row_size / 4; + idx = 0; + + while (vec_count) + { + int32x4_t ip = vldrbq_s32(&input[idx]); + ip = vsubq_n_s32(ip, max); + + mve_pred16_t p = vcmpgeq_n_s32(ip, diff_min); + + int32x4_t tmp_res; + + if (p != 0) + { + ip = vmulq_n_s32(ip, mask); + + tmp_res = MUL_SAT_MVE(ip, vdupq_n_s32(mult)); + tmp_res = arm_exp_on_negative_values_mve_32x4(tmp_res); + tmp_res = MUL_SAT_MVE(vdupq_n_s32(shifted_scale), tmp_res); + tmp_res = DIV_POW2_MVE(tmp_res, bits_over_unit); + tmp_res += vdupq_n_s32(ACT_MIN); + + tmp_res = vmaxq_s32(tmp_res, vdupq_n_s32(ACT_MIN)); + tmp_res = vminq_s32(tmp_res, vdupq_n_s32(ACT_MAX)); + tmp_res = vpselq_s32(tmp_res, vdupq_n_s32(ACT_MIN), p); + } + else + { + tmp_res = vdupq_n_s32(ACT_MIN); + } + vstrbq_s32(&output[idx], tmp_res); + vec_count--; + idx += 4; + } + + for (int i = 0; i < (row_size & 3); i++) + { + int32_t diff = input[tail_idx + i] - max; + if (diff >= diff_min) + { + const int32_t res = + DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) + + NN_Q7_MIN; + output[tail_idx + i] = (int8_t)CLAMP(res, (int32_t)ACT_MAX, (int32_t)ACT_MIN); + } + else + { + output[tail_idx + i] = ACT_MIN; + } + } + + input += row_size; + output += row_size; + } +#else + arm_nn_softmax_common_s8(input, num_rows, row_size, mult, shift, diff_min, false, (void *)output); +#endif +} + +/** + * @} end of Softmax group + */ diff --git a/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s8_s16.c b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s8_s16.c new file mode 100644 index 0000000..4cbf2a9 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_s8_s16.c @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_softmax_s8_s16.c + * Description: S8 to s16 softmax function + * + * $Date: 7 January 2022 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup Softmax + * @{ + */ + +void arm_softmax_s8_s16(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int16_t *output) +{ + arm_nn_softmax_common_s8(input, num_rows, row_size, mult, shift, diff_min, true, (void *)output); +} +/** + * @} end of Softmax group + */ diff --git a/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_u8.c b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_u8.c new file mode 100644 index 0000000..a7c1bff --- /dev/null +++ b/src/third_party/cmsis_nn/Source/SoftmaxFunctions/arm_softmax_u8.c @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_softmax_u8.c + * Description: U8 softmax function + * + * $Date: 5 January 2023 + * $Revision: V.1.1.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +#define ACCUM_BITS 12 + +/** + * @ingroup Public + */ + +/** + * @addtogroup Softmax + * @{ + */ +void arm_softmax_u8(const uint8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + uint8_t *output) +{ + const int32_t mask = (1 << shift); + + int32_t col = 0; + int32_t row_idx; + + for (row_idx = 0; row_idx < num_rows; ++row_idx) + { + // Find the maximum value in order to ensure numerical stability + uint8_t max = *input; + + for (col = 1; col < row_size; ++col) + { + max = MAX(max, input[col]); + } + + int32_t diff = 0; + int32_t sum = 0; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + if (diff >= diff_min) + { + sum += DIV_POW2(EXP_ON_NEG(MUL_SAT(diff * mask, mult)), ACCUM_BITS); + } + } + + const int32_t headroom = CLZ((uint32_t)sum); + const int32_t bits_over_unit = ACCUM_BITS - headroom + 23; + const int32_t shifted_scale = ONE_OVER1((sum << headroom) - (1 << 31)); + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + if (diff >= diff_min) + { + const int32_t res = + DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit); + output[col] = (uint8_t)CLAMP(res, (int32_t)255, (int32_t)0); + } + else + { + output[col] = 0; + } + } + input += row_size; + output += row_size; + } +} +/** + * @} end of Softmax group + */ \ No newline at end of file diff --git a/src/third_party/cmsis_nn/Source/TransposeFunctions/arm_transpose_s8.c b/src/third_party/cmsis_nn/Source/TransposeFunctions/arm_transpose_s8.c new file mode 100644 index 0000000..b6e96d3 --- /dev/null +++ b/src/third_party/cmsis_nn/Source/TransposeFunctions/arm_transpose_s8.c @@ -0,0 +1,243 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_transpose_s8.c + * Description: Transpose a s8 vector + * + * $Date: 30 October 2024 + * $Revision: V.1.0.1 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "third_party/cmsis_nn/Include/arm_nnfunctions.h" +#include "third_party/cmsis_nn/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup Transpose + * @{ + */ + +static arm_cmsis_nn_status arm_transpose_s8_nhcw(const int8_t *input, + int8_t *const output, + const cmsis_nn_dims *const input_dims, + const int32_t *const in_strides, + const int32_t *const out_strides) +{ + const int32_t n = input_dims->n; + const int32_t h = input_dims->h; + const int32_t w = input_dims->w; + const int32_t c = input_dims->c; + + const int8_t *input_n = input; + int8_t *output_n = output; + + const uint16_t src_rows = w; + const uint16_t src_cols = c; + +#if defined(ARM_MATH_MVEI) + uint16x8_t vec_offsets; + uint16x8_t vec_input; + + vec_offsets = vidupq_u16((uint32_t)0, 1); + vec_offsets = vec_offsets * src_cols; +#endif + + for (int32_t i = 0; i < n; i++) + { + const int8_t *input_h = input_n; + int8_t *output_h = output_n; + + for (int32_t y = 0; y < h; y++) + { + +#if defined(ARM_MATH_MVEI) + const uint8_t *input_c = (const uint8_t *)input_h; + uint8_t *output_c = (uint8_t *)output_h; + + for (int32_t z = 0; z < src_cols; z++) + { + uint8_t const *input_w = (uint8_t const *)input_c; + uint8_t *output_w = (uint8_t *)output_c; + + int32_t block_count = src_rows; + while (block_count > 0) + { + mve_pred16_t p = vctp16q(block_count); + + vec_input = vldrbq_gather_offset_z_u16(input_w, vec_offsets, p); + vstrbq_p_u16(output_w, vec_input, p); + + input_w = input_w + src_cols * 8; + output_w += 8; + block_count -= 8; + } + + input_c++; + output_c += src_rows; + } +#else + const uint8_t *input_w = (const uint8_t *)input_h; + uint8_t *output_w = (uint8_t *)output_h; + + for (int32_t src_row_i = 0; src_row_i < src_rows; src_row_i++) + { + output_w = (uint8_t *)output + src_row_i; + + for (int32_t x = 0; x < src_cols; x++) + { + *output_w = *input_w++; + output_w += src_rows; + } + } +#endif + input_h += in_strides[1]; + output_h += out_strides[1]; + } + input_n += in_strides[0]; + output_n += out_strides[0]; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +static arm_cmsis_nn_status arm_transpose_s8_default(const int8_t *input, + int8_t *const output, + const cmsis_nn_dims *const input_dims, + const int32_t *const in_strides, + const int32_t *const out_strides) +{ + const int32_t n = input_dims->n; + const int32_t h = input_dims->h; + const int32_t w = input_dims->w; + const int32_t c = input_dims->c; + + for (int32_t i = 0; i < n; i++) + { + for (int32_t y = 0; y < h; y++) + { + for (int32_t x = 0; x < w; x++) + { + for (int32_t z = 0; z < c; z++) + { + const int32_t from_index = + i * in_strides[0] + y * in_strides[1] + x * in_strides[2] + z * in_strides[3]; + + const int32_t to_index = + i * out_strides[0] + y * out_strides[1] + x * out_strides[2] + z * out_strides[3]; + + output[to_index] = input[from_index]; + } + } + } + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/* + * Basic s8 transpose function. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_transpose_s8(const int8_t *input, + int8_t *const output, + const cmsis_nn_dims *const input_dims, + const cmsis_nn_dims *const output_dims, + const cmsis_nn_transpose_params *const transpose_params) +{ + int32_t in_strides[4]; + int32_t out_strides[4] = {0}; + + const uint32_t *const perm = transpose_params->permutations; + + const int32_t n = input_dims->n; + const int32_t h = input_dims->h; + const int32_t w = input_dims->w; + const int32_t c = input_dims->c; + + in_strides[0] = h * w * c; + in_strides[1] = w * c; + in_strides[2] = c; + in_strides[3] = 1; + + if (transpose_params->num_dims == 1) + { + arm_memcpy_s8(output, input, input_dims->n); + + return ARM_CMSIS_NN_SUCCESS; + } + else if (transpose_params->num_dims == 2) + { + const cmsis_nn_dims smaller_input_dims = {1, 1, n, h}; + + return arm_transpose_s8_nhcw(input, output, &smaller_input_dims, in_strides, out_strides); + } + else if (transpose_params->num_dims == 3) + { + const cmsis_nn_dims smaller_input_dims = {1, n, h, w}; + + in_strides[0] = 0; + in_strides[1] = h * w; + in_strides[2] = w; + in_strides[3] = 1; + + if (perm[0] > 2 || perm[1] > 2 || perm[2] > 2) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + out_strides[0] = 0; + out_strides[perm[0] + 1] = output_dims->h * output_dims->w; + out_strides[perm[1] + 1] = output_dims->w; + out_strides[perm[2] + 1] = 1; + + return arm_transpose_s8_default(input, output, &smaller_input_dims, in_strides, out_strides); + } + + if (perm[0] > 3 || perm[1] > 3 || perm[2] > 3 || perm[3] > 3) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + out_strides[perm[0]] = output_dims->h * output_dims->w * output_dims->c; + out_strides[perm[1]] = output_dims->w * output_dims->c; + out_strides[perm[2]] = output_dims->c; + out_strides[perm[3]] = 1; + +#if defined(ARM_MATH_MVEI) + if (perm[0] == 0 && perm[1] == 1) + { + return arm_transpose_s8_nhcw(input, output, input_dims, in_strides, out_strides); + } +#endif + + return arm_transpose_s8_default(input, output, input_dims, in_strides, out_strides); +} + +/** + * @} end of Transpose group + */ diff --git a/src/third_party/ruy/LICENSE b/src/third_party/flatbuffers/LICENSE similarity index 100% rename from src/third_party/ruy/LICENSE rename to src/third_party/flatbuffers/LICENSE diff --git a/src/third_party/flatbuffers/LICENSE.txt b/src/third_party/flatbuffers/LICENSE.txt deleted file mode 100644 index d645695..0000000 --- a/src/third_party/flatbuffers/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/third_party/flatbuffers/include/flatbuffers/allocator.h b/src/third_party/flatbuffers/include/flatbuffers/allocator.h new file mode 100644 index 0000000..f72bdd7 --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/allocator.h @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_ALLOCATOR_H_ +#define FLATBUFFERS_ALLOCATOR_H_ + +#include "third_party/flatbuffers/include/flatbuffers/base.h" + +namespace flatbuffers { + +// Allocator interface. This is flatbuffers-specific and meant only for +// `vector_downward` usage. +class Allocator { + public: + virtual ~Allocator() {} + + // Allocate `size` bytes of memory. + virtual uint8_t *allocate(size_t size) = 0; + + // Deallocate `size` bytes of memory at `p` allocated by this allocator. + virtual void deallocate(uint8_t *p, size_t size) = 0; + + // Reallocate `new_size` bytes of memory, replacing the old region of size + // `old_size` at `p`. In contrast to a normal realloc, this grows downwards, + // and is intended specifcally for `vector_downward` use. + // `in_use_back` and `in_use_front` indicate how much of `old_size` is + // actually in use at each end, and needs to be copied. + virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size, + size_t new_size, size_t in_use_back, + size_t in_use_front) { + FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows + uint8_t *new_p = allocate(new_size); + memcpy_downward(old_p, old_size, new_p, new_size, in_use_back, + in_use_front); + deallocate(old_p, old_size); + return new_p; + } + + protected: + // Called by `reallocate_downward` to copy memory from `old_p` of `old_size` + // to `new_p` of `new_size`. Only memory of size `in_use_front` and + // `in_use_back` will be copied from the front and back of the old memory + // allocation. + void memcpy_downward(uint8_t *old_p, size_t old_size, uint8_t *new_p, + size_t new_size, size_t in_use_back, + size_t in_use_front) { + memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back, + in_use_back); + memcpy(new_p, old_p, in_use_front); + } +}; + +} // namespace flatbuffers + +#endif // FLATBUFFERS_ALLOCATOR_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/array.h b/src/third_party/flatbuffers/include/flatbuffers/array.h new file mode 100644 index 0000000..f18cdca --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/array.h @@ -0,0 +1,256 @@ +/* + * Copyright 2021 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_ARRAY_H_ +#define FLATBUFFERS_ARRAY_H_ + +#include +#include + +#include "third_party/flatbuffers/include/flatbuffers/base.h" +#include "third_party/flatbuffers/include/flatbuffers/stl_emulation.h" +#include "third_party/flatbuffers/include/flatbuffers/vector.h" + +namespace flatbuffers { + +// This is used as a helper type for accessing arrays. +template class Array { + // Array can carry only POD data types (scalars or structs). + typedef typename flatbuffers::bool_constant::value> + scalar_tag; + typedef + typename flatbuffers::conditional::type + IndirectHelperType; + + public: + typedef uint16_t size_type; + typedef typename IndirectHelper::return_type return_type; + typedef VectorConstIterator const_iterator; + typedef VectorReverseIterator const_reverse_iterator; + + // If T is a LE-scalar or a struct (!scalar_tag::value). + static FLATBUFFERS_CONSTEXPR bool is_span_observable = + (scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1)) || + !scalar_tag::value; + + FLATBUFFERS_CONSTEXPR uint16_t size() const { return length; } + + return_type Get(uoffset_t i) const { + FLATBUFFERS_ASSERT(i < size()); + return IndirectHelper::Read(Data(), i); + } + + return_type operator[](uoffset_t i) const { return Get(i); } + + // If this is a Vector of enums, T will be its storage type, not the enum + // type. This function makes it convenient to retrieve value with enum + // type E. + template E GetEnum(uoffset_t i) const { + return static_cast(Get(i)); + } + + const_iterator begin() const { return const_iterator(Data(), 0); } + const_iterator end() const { return const_iterator(Data(), size()); } + + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + const_reverse_iterator crbegin() const { return rbegin(); } + const_reverse_iterator crend() const { return rend(); } + + // Get a mutable pointer to elements inside this array. + // This method used to mutate arrays of structs followed by a @p Mutate + // operation. For primitive types use @p Mutate directly. + // @warning Assignments and reads to/from the dereferenced pointer are not + // automatically converted to the correct endianness. + typename flatbuffers::conditional::type + GetMutablePointer(uoffset_t i) const { + FLATBUFFERS_ASSERT(i < size()); + return const_cast(&data()[i]); + } + + // Change elements if you have a non-const pointer to this object. + void Mutate(uoffset_t i, const T &val) { MutateImpl(scalar_tag(), i, val); } + + // The raw data in little endian format. Use with care. + const uint8_t *Data() const { return data_; } + + uint8_t *Data() { return data_; } + + // Similarly, but typed, much like std::vector::data + const T *data() const { return reinterpret_cast(Data()); } + T *data() { return reinterpret_cast(Data()); } + + // Copy data from a span with endian conversion. + // If this Array and the span overlap, the behavior is undefined. + void CopyFromSpan(flatbuffers::span src) { + const auto p1 = reinterpret_cast(src.data()); + const auto p2 = Data(); + FLATBUFFERS_ASSERT(!(p1 >= p2 && p1 < (p2 + length)) && + !(p2 >= p1 && p2 < (p1 + length))); + (void)p1; + (void)p2; + CopyFromSpanImpl(flatbuffers::bool_constant(), src); + } + + protected: + void MutateImpl(flatbuffers::true_type, uoffset_t i, const T &val) { + FLATBUFFERS_ASSERT(i < size()); + WriteScalar(data() + i, val); + } + + void MutateImpl(flatbuffers::false_type, uoffset_t i, const T &val) { + *(GetMutablePointer(i)) = val; + } + + void CopyFromSpanImpl(flatbuffers::true_type, + flatbuffers::span src) { + // Use std::memcpy() instead of std::copy() to avoid performance degradation + // due to aliasing if T is char or unsigned char. + // The size is known at compile time, so memcpy would be inlined. + std::memcpy(data(), src.data(), length * sizeof(T)); + } + + // Copy data from flatbuffers::span with endian conversion. + void CopyFromSpanImpl(flatbuffers::false_type, + flatbuffers::span src) { + for (size_type k = 0; k < length; k++) { Mutate(k, src[k]); } + } + + // This class is only used to access pre-existing data. Don't ever + // try to construct these manually. + // 'constexpr' allows us to use 'size()' at compile time. + // @note Must not use 'FLATBUFFERS_CONSTEXPR' here, as const is not allowed on + // a constructor. +#if defined(__cpp_constexpr) + constexpr Array(); +#else + Array(); +#endif + + uint8_t data_[length * sizeof(T)]; + + private: + // This class is a pointer. Copying will therefore create an invalid object. + // Private and unimplemented copy constructor. + Array(const Array &); + Array &operator=(const Array &); +}; + +// Specialization for Array[struct] with access using Offset pointer. +// This specialization used by idl_gen_text.cpp. +template class OffsetT> +class Array, length> { + static_assert(flatbuffers::is_same::value, "unexpected type T"); + + public: + typedef const void *return_type; + typedef uint16_t size_type; + + const uint8_t *Data() const { return data_; } + + // Make idl_gen_text.cpp::PrintContainer happy. + return_type operator[](uoffset_t) const { + FLATBUFFERS_ASSERT(false); + return nullptr; + } + + private: + // This class is only used to access pre-existing data. + Array(); + Array(const Array &); + Array &operator=(const Array &); + + uint8_t data_[1]; +}; + +template +FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span(Array &arr) + FLATBUFFERS_NOEXCEPT { + static_assert( + Array::is_span_observable, + "wrong type U, only plain struct, LE-scalar, or byte types are allowed"); + return span(arr.data(), N); +} + +template +FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span( + const Array &arr) FLATBUFFERS_NOEXCEPT { + static_assert( + Array::is_span_observable, + "wrong type U, only plain struct, LE-scalar, or byte types are allowed"); + return span(arr.data(), N); +} + +template +FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span +make_bytes_span(Array &arr) FLATBUFFERS_NOEXCEPT { + static_assert(Array::is_span_observable, + "internal error, Array might hold only scalars or structs"); + return span(arr.Data(), sizeof(U) * N); +} + +template +FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span +make_bytes_span(const Array &arr) FLATBUFFERS_NOEXCEPT { + static_assert(Array::is_span_observable, + "internal error, Array might hold only scalars or structs"); + return span(arr.Data(), sizeof(U) * N); +} + +// Cast a raw T[length] to a raw flatbuffers::Array +// without endian conversion. Use with care. +// TODO: move these Cast-methods to `internal` namespace. +template +Array &CastToArray(T (&arr)[length]) { + return *reinterpret_cast *>(arr); +} + +template +const Array &CastToArray(const T (&arr)[length]) { + return *reinterpret_cast *>(arr); +} + +template +Array &CastToArrayOfEnum(T (&arr)[length]) { + static_assert(sizeof(E) == sizeof(T), "invalid enum type E"); + return *reinterpret_cast *>(arr); +} + +template +const Array &CastToArrayOfEnum(const T (&arr)[length]) { + static_assert(sizeof(E) == sizeof(T), "invalid enum type E"); + return *reinterpret_cast *>(arr); +} + +template +bool operator==(const Array &lhs, + const Array &rhs) noexcept { + return std::addressof(lhs) == std::addressof(rhs) || + (lhs.size() == rhs.size() && + std::memcmp(lhs.Data(), rhs.Data(), rhs.size() * sizeof(T)) == 0); +} + +} // namespace flatbuffers + +#endif // FLATBUFFERS_ARRAY_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/base.h b/src/third_party/flatbuffers/include/flatbuffers/base.h index 8e97c08..ad6c852 100644 --- a/src/third_party/flatbuffers/include/flatbuffers/base.h +++ b/src/third_party/flatbuffers/include/flatbuffers/base.h @@ -1,6 +1,16 @@ #ifndef FLATBUFFERS_BASE_H_ #define FLATBUFFERS_BASE_H_ +// For TFLM, we always want FLATBUFFERS_LOCALE_INDEPENDENT to be defined as 0. +// We could achieve this by adding -DFLATBUFFERS_LOCALE_INDEPENDENT=0 to the +// TFLM Makefile. However, for (at least) the Arduino, adding additional build +// flags during the compilation can be a bit awkward. As such, we have instead +// made a decision to change the default to be FLATBUFFERS_LOCALE_INDEPENDENT=0 +// for TFLM to make it easier for external IDE integration. +#ifndef FLATBUFFERS_LOCALE_INDEPENDENT +#define FLATBUFFERS_LOCALE_INDEPENDENT 0 +#endif + // clang-format off // If activate should be declared and included first. @@ -32,8 +42,8 @@ #include #include -#if defined(ARDUINO) && !defined(ARDUINOSTL_M_H) - #include +#if defined(ARDUINO) && !defined(ARDUINOSTL_M_H) && defined(__AVR__) + #include #else #include #endif @@ -43,6 +53,7 @@ #include #include #include +#include #include #include @@ -50,10 +61,6 @@ #include #endif -#ifdef _STLPORT_VERSION - #define FLATBUFFERS_CPP98_STL -#endif - #ifdef __ANDROID__ #include #endif @@ -142,9 +149,9 @@ #endif #endif // !defined(FLATBUFFERS_LITTLEENDIAN) -#define FLATBUFFERS_VERSION_MAJOR 1 -#define FLATBUFFERS_VERSION_MINOR 12 -#define FLATBUFFERS_VERSION_REVISION 0 +#define FLATBUFFERS_VERSION_MAJOR 23 +#define FLATBUFFERS_VERSION_MINOR 5 +#define FLATBUFFERS_VERSION_REVISION 26 #define FLATBUFFERS_STRING_EXPAND(X) #X #define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X) namespace flatbuffers { @@ -177,10 +184,9 @@ namespace flatbuffers { #define FLATBUFFERS_CONSTEXPR_CPP11 #endif -// This macro is never used in code! #if (defined(__cplusplus) && __cplusplus >= 201402L) || \ (defined(__cpp_constexpr) && __cpp_constexpr >= 201304) - #define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR + #define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR_CPP11 #else #define FLATBUFFERS_CONSTEXPR_CPP14 #endif @@ -198,9 +204,15 @@ namespace flatbuffers { #if (!defined(_MSC_VER) || _MSC_FULL_VER >= 180020827) && \ (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 404)) || \ defined(__clang__) - #define FLATBUFFERS_DELETE_FUNC(func) func = delete; + #define FLATBUFFERS_DELETE_FUNC(func) func = delete #else - #define FLATBUFFERS_DELETE_FUNC(func) private: func; + #define FLATBUFFERS_DELETE_FUNC(func) private: func +#endif + +#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \ + (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \ + defined(__clang__) + #define FLATBUFFERS_DEFAULT_DECLARATION #endif // Check if we can use template aliases @@ -232,16 +244,26 @@ namespace flatbuffers { } #define FLATBUFFERS_HAS_STRING_VIEW 1 // Check for absl::string_view - #elif __has_include("absl/strings/string_view.h") - #include "absl/strings/string_view.h" - namespace flatbuffers { - typedef absl::string_view string_view; - } - #define FLATBUFFERS_HAS_STRING_VIEW 1 + #elif __has_include("absl/strings/string_view.h") && \ + __has_include("absl/base/config.h") && \ + (__cplusplus >= 201411) + #include "absl/base/config.h" + #if !defined(ABSL_USES_STD_STRING_VIEW) + #include "absl/strings/string_view.h" + namespace flatbuffers { + typedef absl::string_view string_view; + } + #define FLATBUFFERS_HAS_STRING_VIEW 1 + #endif #endif #endif // __has_include #endif // !FLATBUFFERS_HAS_STRING_VIEW +#ifndef FLATBUFFERS_GENERAL_HEAP_ALLOC_OK + // Allow heap allocations to be used + #define FLATBUFFERS_GENERAL_HEAP_ALLOC_OK 1 +#endif // !FLATBUFFERS_GENERAL_HEAP_ALLOC_OK + #ifndef FLATBUFFERS_HAS_NEW_STRTOD // Modern (C++11) strtod and strtof functions are available for use. // 1) nan/inf strings as argument of strtod; @@ -254,9 +276,12 @@ namespace flatbuffers { #endif // !FLATBUFFERS_HAS_NEW_STRTOD #ifndef FLATBUFFERS_LOCALE_INDEPENDENT - // Enable locale independent functions {strtof_l, strtod_l,strtoll_l, strtoull_l}. - #if ((defined(_MSC_VER) && _MSC_VER >= 1800) || \ - (defined(_XOPEN_VERSION) && (_XOPEN_VERSION>=700)) && (!defined(__ANDROID_API__) || (defined(__ANDROID_API__) && (__ANDROID_API__>=21)))) + // Enable locale independent functions {strtof_l, strtod_l,strtoll_l, + // strtoull_l}. + #if (defined(_MSC_VER) && _MSC_VER >= 1800) || \ + (defined(__ANDROID_API__) && __ANDROID_API__>= 21) || \ + (defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)) && \ + (!defined(__Fuchsia__) && !defined(__ANDROID_API__)) #define FLATBUFFERS_LOCALE_INDEPENDENT 1 #else #define FLATBUFFERS_LOCALE_INDEPENDENT 0 @@ -264,14 +289,14 @@ namespace flatbuffers { #endif // !FLATBUFFERS_LOCALE_INDEPENDENT // Suppress Undefined Behavior Sanitizer (recoverable only). Usage: -// - __supress_ubsan__("undefined") -// - __supress_ubsan__("signed-integer-overflow") +// - FLATBUFFERS_SUPPRESS_UBSAN("undefined") +// - FLATBUFFERS_SUPPRESS_UBSAN("signed-integer-overflow") #if defined(__clang__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >=7)) - #define __supress_ubsan__(type) __attribute__((no_sanitize(type))) + #define FLATBUFFERS_SUPPRESS_UBSAN(type) __attribute__((no_sanitize(type))) #elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409) - #define __supress_ubsan__(type) __attribute__((no_sanitize_undefined)) + #define FLATBUFFERS_SUPPRESS_UBSAN(type) __attribute__((no_sanitize_undefined)) #else - #define __supress_ubsan__(type) + #define FLATBUFFERS_SUPPRESS_UBSAN(type) #endif // This is constexpr function used for checking compile-time constants. @@ -284,7 +309,7 @@ template FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) { #if ((__cplusplus >= 201703L) \ || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))) // All attributes unknown to an implementation are ignored without causing an error. - #define FLATBUFFERS_ATTRIBUTE(attr) [[attr]] + #define FLATBUFFERS_ATTRIBUTE(attr) attr #define FLATBUFFERS_FALLTHROUGH() [[fallthrough]] #else @@ -309,9 +334,11 @@ namespace flatbuffers { // Also, using a consistent offset type maintains compatibility of serialized // offset values between 32bit and 64bit systems. typedef uint32_t uoffset_t; +typedef uint64_t uoffset64_t; // Signed offsets for references that can go in both directions. typedef int32_t soffset_t; +typedef int64_t soffset64_t; // Offset/index used in v-tables, can be changed to uint8_t in // format forks to save a bit of space if desired. @@ -320,10 +347,28 @@ typedef uint16_t voffset_t; typedef uintmax_t largest_scalar_t; // In 32bits, this evaluates to 2GB - 1 -#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1) +#define FLATBUFFERS_MAX_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset_t>::max() +#define FLATBUFFERS_MAX_64_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset64_t>::max() + +// The minimum size buffer that can be a valid flatbuffer. +// Includes the offset to the root table (uoffset_t), the offset to the vtable +// of the root table (soffset_t), the size of the vtable (uint16_t), and the +// size of the referring table (uint16_t). +#define FLATBUFFERS_MIN_BUFFER_SIZE sizeof(uoffset_t) + sizeof(soffset_t) + \ + sizeof(uint16_t) + sizeof(uint16_t) // We support aligning the contents of buffers up to this size. -#define FLATBUFFERS_MAX_ALIGNMENT 16 +#ifndef FLATBUFFERS_MAX_ALIGNMENT + #define FLATBUFFERS_MAX_ALIGNMENT 32 +#endif + +/// @brief The length of a FlatBuffer file header. +static const size_t kFileIdentifierLength = 4; + +inline bool VerifyAlignmentRequirements(size_t align, size_t min_align = 1) { + return (min_align <= align) && (align <= (FLATBUFFERS_MAX_ALIGNMENT)) && + (align & (align - 1)) == 0; // must be power of 2 +} #if defined(_MSC_VER) #pragma warning(disable: 4351) // C4351: new behavior: elements of array ... will be default initialized @@ -387,7 +432,7 @@ template T EndianScalar(T t) { template // UBSAN: C++ aliasing type rules, see std::bit_cast<> for details. -__supress_ubsan__("alignment") +FLATBUFFERS_SUPPRESS_UBSAN("alignment") T ReadScalar(const void *p) { return EndianScalar(*reinterpret_cast(p)); } @@ -401,13 +446,13 @@ T ReadScalar(const void *p) { template // UBSAN: C++ aliasing type rules, see std::bit_cast<> for details. -__supress_ubsan__("alignment") +FLATBUFFERS_SUPPRESS_UBSAN("alignment") void WriteScalar(void *p, T t) { *reinterpret_cast(p) = EndianScalar(t); } template struct Offset; -template __supress_ubsan__("alignment") void WriteScalar(void *p, Offset t) { +template FLATBUFFERS_SUPPRESS_UBSAN("alignment") void WriteScalar(void *p, Offset t) { *reinterpret_cast(p) = EndianScalar(t.o); } @@ -418,10 +463,43 @@ template __supress_ubsan__("alignment") void WriteScalar(void *p, Of // Computes how many bytes you'd have to pad to be able to write an // "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in // memory). -__supress_ubsan__("unsigned-integer-overflow") +FLATBUFFERS_SUPPRESS_UBSAN("unsigned-integer-overflow") inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) { return ((~buf_size) + 1) & (scalar_size - 1); } +// Generic 'operator==' with conditional specialisations. +// T e - new value of a scalar field. +// T def - default of scalar (is known at compile-time). +template inline bool IsTheSameAs(T e, T def) { return e == def; } + +#if defined(FLATBUFFERS_NAN_DEFAULTS) && \ + defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0) +// Like `operator==(e, def)` with weak NaN if T=(float|double). +template inline bool IsFloatTheSameAs(T e, T def) { + return (e == def) || ((def != def) && (e != e)); +} +template<> inline bool IsTheSameAs(float e, float def) { + return IsFloatTheSameAs(e, def); +} +template<> inline bool IsTheSameAs(double e, double def) { + return IsFloatTheSameAs(e, def); +} +#endif + +// Check 'v' is out of closed range [low; high]. +// Workaround for GCC warning [-Werror=type-limits]: +// comparison is always true due to limited range of data type. +template +inline bool IsOutRange(const T &v, const T &low, const T &high) { + return (v < low) || (high < v); +} + +// Check 'v' is in closed range [low; high]. +template +inline bool IsInRange(const T &v, const T &low, const T &high) { + return !IsOutRange(v, low, high); +} + } // namespace flatbuffers #endif // FLATBUFFERS_BASE_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/buffer.h b/src/third_party/flatbuffers/include/flatbuffers/buffer.h new file mode 100644 index 0000000..d364864 --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/buffer.h @@ -0,0 +1,199 @@ +/* + * Copyright 2021 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_BUFFER_H_ +#define FLATBUFFERS_BUFFER_H_ + +#include + +#include "third_party/flatbuffers/include/flatbuffers/base.h" + +namespace flatbuffers { + +// Wrapper for uoffset_t to allow safe template specialization. +// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset). +template struct Offset { + // The type of offset to use. + typedef uoffset_t offset_type; + + offset_type o; + Offset() : o(0) {} + Offset(const offset_type _o) : o(_o) {} + Offset<> Union() const { return o; } + bool IsNull() const { return !o; } +}; + +// Wrapper for uoffset64_t Offsets. +template struct Offset64 { + // The type of offset to use. + typedef uoffset64_t offset_type; + + offset_type o; + Offset64() : o(0) {} + Offset64(const offset_type offset) : o(offset) {} + Offset64<> Union() const { return o; } + bool IsNull() const { return !o; } +}; + +// Litmus check for ensuring the Offsets are the expected size. +static_assert(sizeof(Offset<>) == 4, "Offset has wrong size"); +static_assert(sizeof(Offset64<>) == 8, "Offset64 has wrong size"); + +inline void EndianCheck() { + int endiantest = 1; + // If this fails, see FLATBUFFERS_LITTLEENDIAN above. + FLATBUFFERS_ASSERT(*reinterpret_cast(&endiantest) == + FLATBUFFERS_LITTLEENDIAN); + (void)endiantest; +} + +template FLATBUFFERS_CONSTEXPR size_t AlignOf() { + // clang-format off + #ifdef _MSC_VER + return __alignof(T); + #else + #ifndef alignof + return __alignof__(T); + #else + return alignof(T); + #endif + #endif + // clang-format on +} + +// Lexicographically compare two strings (possibly containing nulls), and +// return true if the first is less than the second. +static inline bool StringLessThan(const char *a_data, uoffset_t a_size, + const char *b_data, uoffset_t b_size) { + const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size)); + return cmp == 0 ? a_size < b_size : cmp < 0; +} + +// When we read serialized data from memory, in the case of most scalars, +// we want to just read T, but in the case of Offset, we want to actually +// perform the indirection and return a pointer. +// The template specialization below does just that. +// It is wrapped in a struct since function templates can't overload on the +// return type like this. +// The typedef is for the convenience of callers of this function +// (avoiding the need for a trailing return decltype) +template struct IndirectHelper { + typedef T return_type; + typedef T mutable_return_type; + static const size_t element_stride = sizeof(T); + + static return_type Read(const uint8_t *p, const size_t i) { + return EndianScalar((reinterpret_cast(p))[i]); + } + static mutable_return_type Read(uint8_t *p, const size_t i) { + return reinterpret_cast( + Read(const_cast(p), i)); + } +}; + +// For vector of Offsets. +template class OffsetT> +struct IndirectHelper> { + typedef const T *return_type; + typedef T *mutable_return_type; + typedef typename OffsetT::offset_type offset_type; + static const offset_type element_stride = sizeof(offset_type); + + static return_type Read(const uint8_t *const p, const offset_type i) { + // Offsets are relative to themselves, so first update the pointer to + // point to the offset location. + const uint8_t *const offset_location = p + i * element_stride; + + // Then read the scalar value of the offset (which may be 32 or 64-bits) and + // then determine the relative location from the offset location. + return reinterpret_cast( + offset_location + ReadScalar(offset_location)); + } + static mutable_return_type Read(uint8_t *const p, const offset_type i) { + // Offsets are relative to themselves, so first update the pointer to + // point to the offset location. + uint8_t *const offset_location = p + i * element_stride; + + // Then read the scalar value of the offset (which may be 32 or 64-bits) and + // then determine the relative location from the offset location. + return reinterpret_cast( + offset_location + ReadScalar(offset_location)); + } +}; + +// For vector of structs. +template struct IndirectHelper { + typedef const T *return_type; + typedef T *mutable_return_type; + static const size_t element_stride = sizeof(T); + + static return_type Read(const uint8_t *const p, const size_t i) { + // Structs are stored inline, relative to the first struct pointer. + return reinterpret_cast(p + i * element_stride); + } + static mutable_return_type Read(uint8_t *const p, const size_t i) { + // Structs are stored inline, relative to the first struct pointer. + return reinterpret_cast(p + i * element_stride); + } +}; + +/// @brief Get a pointer to the file_identifier section of the buffer. +/// @return Returns a const char pointer to the start of the file_identifier +/// characters in the buffer. The returned char * has length +/// 'flatbuffers::FlatBufferBuilder::kFileIdentifierLength'. +/// This function is UNDEFINED for FlatBuffers whose schema does not include +/// a file_identifier (likely points at padding or the start of a the root +/// vtable). +inline const char *GetBufferIdentifier(const void *buf, + bool size_prefixed = false) { + return reinterpret_cast(buf) + + ((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t)); +} + +// Helper to see if the identifier in a buffer has the expected value. +inline bool BufferHasIdentifier(const void *buf, const char *identifier, + bool size_prefixed = false) { + return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier, + flatbuffers::kFileIdentifierLength) == 0; +} + +/// @cond FLATBUFFERS_INTERNAL +// Helpers to get a typed pointer to the root object contained in the buffer. +template T *GetMutableRoot(void *buf) { + if (!buf) return nullptr; + EndianCheck(); + return reinterpret_cast( + reinterpret_cast(buf) + + EndianScalar(*reinterpret_cast(buf))); +} + +template +T *GetMutableSizePrefixedRoot(void *buf) { + return GetMutableRoot(reinterpret_cast(buf) + sizeof(SizeT)); +} + +template const T *GetRoot(const void *buf) { + return GetMutableRoot(const_cast(buf)); +} + +template +const T *GetSizePrefixedRoot(const void *buf) { + return GetRoot(reinterpret_cast(buf) + sizeof(SizeT)); +} + +} // namespace flatbuffers + +#endif // FLATBUFFERS_BUFFER_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/buffer_ref.h b/src/third_party/flatbuffers/include/flatbuffers/buffer_ref.h new file mode 100644 index 0000000..2ca922d --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/buffer_ref.h @@ -0,0 +1,53 @@ +/* + * Copyright 2021 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_BUFFER_REF_H_ +#define FLATBUFFERS_BUFFER_REF_H_ + +#include "third_party/flatbuffers/include/flatbuffers/base.h" +#include "third_party/flatbuffers/include/flatbuffers/verifier.h" + +namespace flatbuffers { + +// Convenient way to bundle a buffer and its length, to pass it around +// typed by its root. +// A BufferRef does not own its buffer. +struct BufferRefBase {}; // for std::is_base_of + +template struct BufferRef : BufferRefBase { + BufferRef() : buf(nullptr), len(0), must_free(false) {} + BufferRef(uint8_t *_buf, uoffset_t _len) + : buf(_buf), len(_len), must_free(false) {} + + ~BufferRef() { + if (must_free) free(buf); + } + + const T *GetRoot() const { return flatbuffers::GetRoot(buf); } + + bool Verify() { + Verifier verifier(buf, len); + return verifier.VerifyBuffer(nullptr); + } + + uint8_t *buf; + uoffset_t len; + bool must_free; +}; + +} // namespace flatbuffers + +#endif // FLATBUFFERS_BUFFER_REF_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/code_generator.h b/src/third_party/flatbuffers/include/flatbuffers/code_generator.h new file mode 100644 index 0000000..71c3d63 --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/code_generator.h @@ -0,0 +1,97 @@ +/* + * Copyright 2023 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_CODE_GENERATOR_H_ +#define FLATBUFFERS_CODE_GENERATOR_H_ + +#include + +#include "third_party/flatbuffers/include/flatbuffers/idl.h" + +namespace flatbuffers { + +struct CodeGenOptions { + std::string output_path; +}; + +// A code generator interface for producing converting flatbuffer schema into +// code. +class CodeGenerator { + public: + virtual ~CodeGenerator() = default; + + enum Status { + OK = 0, + ERROR = 1, + FAILED_VERIFICATION = 2, + NOT_IMPLEMENTED = 3 + }; + + std::string status_detail; + + // Generate code from the provided `parser`. + // + // DEPRECATED: prefer using the other overload of GenerateCode for bfbs. + virtual Status GenerateCode(const Parser &parser, const std::string &path, + const std::string &filename) = 0; + + // Generate code from the provided `parser` and place it in the output. + virtual Status GenerateCodeString(const Parser &parser, + const std::string &filename, + std::string &output) { + (void)parser; + (void)filename; + (void)output; + return Status::NOT_IMPLEMENTED; + } + + // Generate code from the provided `buffer` of given `length`. The buffer is a + // serialized reflection.fbs. + virtual Status GenerateCode(const uint8_t *buffer, int64_t length, + const CodeGenOptions &options) = 0; + + virtual Status GenerateMakeRule(const Parser &parser, const std::string &path, + const std::string &filename, + std::string &output) = 0; + + virtual Status GenerateGrpcCode(const Parser &parser, const std::string &path, + const std::string &filename) = 0; + + virtual Status GenerateRootFile(const Parser &parser, + const std::string &path) = 0; + + virtual bool IsSchemaOnly() const = 0; + + virtual bool SupportsBfbsGeneration() const = 0; + + virtual bool SupportsRootFileGeneration() const = 0; + + virtual IDLOptions::Language Language() const = 0; + + virtual std::string LanguageName() const = 0; + + protected: + CodeGenerator() = default; + + private: + // Copying is not supported. + CodeGenerator(const CodeGenerator &) = delete; + CodeGenerator &operator=(const CodeGenerator &) = delete; +}; + +} // namespace flatbuffers + +#endif // FLATBUFFERS_CODE_GENERATOR_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/code_generators.h b/src/third_party/flatbuffers/include/flatbuffers/code_generators.h new file mode 100644 index 0000000..5e216b6 --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/code_generators.h @@ -0,0 +1,238 @@ +/* + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_CODE_GENERATORS_H_ +#define FLATBUFFERS_CODE_GENERATORS_H_ + +#include +#include + +#include "third_party/flatbuffers/include/flatbuffers/idl.h" + +namespace flatbuffers { + +// Utility class to assist in generating code through use of text templates. +// +// Example code: +// CodeWriter code("\t"); +// code.SetValue("NAME", "Foo"); +// code += "void {{NAME}}() { printf("%s", "{{NAME}}"); }"; +// code.SetValue("NAME", "Bar"); +// code += "void {{NAME}}() { printf("%s", "{{NAME}}"); }"; +// std::cout << code.ToString() << std::endl; +// +// Output: +// void Foo() { printf("%s", "Foo"); } +// void Bar() { printf("%s", "Bar"); } +class CodeWriter { + public: + CodeWriter(std::string pad = std::string()) + : pad_(pad), cur_ident_lvl_(0), ignore_ident_(false) {} + + // Clears the current "written" code. + void Clear() { + stream_.str(""); + stream_.clear(); + } + + // Associates a key with a value. All subsequent calls to operator+=, where + // the specified key is contained in {{ and }} delimiters will be replaced by + // the given value. + void SetValue(const std::string &key, const std::string &value) { + value_map_[key] = value; + } + + std::string GetValue(const std::string &key) const { + const auto it = value_map_.find(key); + return it == value_map_.end() ? "" : it->second; + } + + // Appends the given text to the generated code as well as a newline + // character. Any text within {{ and }} delimiters is replaced by values + // previously stored in the CodeWriter by calling SetValue above. The newline + // will be suppressed if the text ends with the \\ character. + void operator+=(std::string text); + + // Returns the current contents of the CodeWriter as a std::string. + std::string ToString() const { return stream_.str(); } + + // Increase ident level for writing code + void IncrementIdentLevel() { cur_ident_lvl_++; } + // Decrease ident level for writing code + void DecrementIdentLevel() { + if (cur_ident_lvl_) cur_ident_lvl_--; + } + + void SetPadding(const std::string &padding) { pad_ = padding; } + + private: + std::map value_map_; + std::stringstream stream_; + std::string pad_; + int cur_ident_lvl_; + bool ignore_ident_; + + // Add ident padding (tab or space) based on ident level + void AppendIdent(std::stringstream &stream); +}; + +class BaseGenerator { + public: + virtual bool generate() = 0; + + static std::string NamespaceDir(const Parser &parser, const std::string &path, + const Namespace &ns, + const bool dasherize = false); + + std::string GeneratedFileName(const std::string &path, + const std::string &file_name, + const IDLOptions &options) const; + + protected: + BaseGenerator(const Parser &parser, const std::string &path, + const std::string &file_name, std::string qualifying_start, + std::string qualifying_separator, std::string default_extension) + : parser_(parser), + path_(path), + file_name_(file_name), + qualifying_start_(qualifying_start), + qualifying_separator_(qualifying_separator), + default_extension_(default_extension) {} + virtual ~BaseGenerator() {} + + // No copy/assign. + BaseGenerator &operator=(const BaseGenerator &); + BaseGenerator(const BaseGenerator &); + + std::string NamespaceDir(const Namespace &ns, + const bool dasherize = false) const; + + static const char *FlatBuffersGeneratedWarning(); + + static std::string FullNamespace(const char *separator, const Namespace &ns); + + static std::string LastNamespacePart(const Namespace &ns); + + // tracks the current namespace for early exit in WrapInNameSpace + // c++, java and csharp returns a different namespace from + // the following default (no early exit, always fully qualify), + // which works for js and php + virtual const Namespace *CurrentNameSpace() const { return nullptr; } + + // Ensure that a type is prefixed with its namespace even within + // its own namespace to avoid conflict between generated method + // names and similarly named classes or structs + std::string WrapInNameSpace(const Namespace *ns, + const std::string &name) const; + + std::string WrapInNameSpace(const Definition &def, + const std::string &suffix = "") const; + + std::string GetNameSpace(const Definition &def) const; + + const Parser &parser_; + const std::string &path_; + const std::string &file_name_; + const std::string qualifying_start_; + const std::string qualifying_separator_; + const std::string default_extension_; +}; + +struct CommentConfig { + const char *first_line; + const char *content_line_prefix; + const char *last_line; +}; + +extern void GenComment(const std::vector &dc, + std::string *code_ptr, const CommentConfig *config, + const char *prefix = ""); + +class FloatConstantGenerator { + public: + virtual ~FloatConstantGenerator() {} + std::string GenFloatConstant(const FieldDef &field) const; + + private: + virtual std::string Value(double v, const std::string &src) const = 0; + virtual std::string Inf(double v) const = 0; + virtual std::string NaN(double v) const = 0; + + virtual std::string Value(float v, const std::string &src) const = 0; + virtual std::string Inf(float v) const = 0; + virtual std::string NaN(float v) const = 0; + + template + std::string GenFloatConstantImpl(const FieldDef &field) const; +}; + +class SimpleFloatConstantGenerator : public FloatConstantGenerator { + public: + SimpleFloatConstantGenerator(const char *nan_number, + const char *pos_inf_number, + const char *neg_inf_number); + + private: + std::string Value(double v, + const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Inf(double v) const FLATBUFFERS_OVERRIDE; + std::string NaN(double v) const FLATBUFFERS_OVERRIDE; + + std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Inf(float v) const FLATBUFFERS_OVERRIDE; + std::string NaN(float v) const FLATBUFFERS_OVERRIDE; + + const std::string nan_number_; + const std::string pos_inf_number_; + const std::string neg_inf_number_; +}; + +// C++, C#, Java like generator. +class TypedFloatConstantGenerator : public FloatConstantGenerator { + public: + TypedFloatConstantGenerator(const char *double_prefix, + const char *single_prefix, const char *nan_number, + const char *pos_inf_number, + const char *neg_inf_number = ""); + + private: + std::string Value(double v, + const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Inf(double v) const FLATBUFFERS_OVERRIDE; + + std::string NaN(double v) const FLATBUFFERS_OVERRIDE; + + std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Inf(float v) const FLATBUFFERS_OVERRIDE; + std::string NaN(float v) const FLATBUFFERS_OVERRIDE; + + std::string MakeNaN(const std::string &prefix) const; + std::string MakeInf(bool neg, const std::string &prefix) const; + + const std::string double_prefix_; + const std::string single_prefix_; + const std::string nan_number_; + const std::string pos_inf_number_; + const std::string neg_inf_number_; +}; + +std::string JavaCSharpMakeRule(const bool java, const Parser &parser, + const std::string &path, + const std::string &file_name); + +} // namespace flatbuffers + +#endif // FLATBUFFERS_CODE_GENERATORS_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/default_allocator.h b/src/third_party/flatbuffers/include/flatbuffers/default_allocator.h new file mode 100644 index 0000000..3c5aa83 --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/default_allocator.h @@ -0,0 +1,58 @@ +/* + * Copyright 2021 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_DEFAULT_ALLOCATOR_H_ +#define FLATBUFFERS_DEFAULT_ALLOCATOR_H_ + +#include "third_party/flatbuffers/include/flatbuffers/allocator.h" +#include "third_party/flatbuffers/include/flatbuffers/base.h" + +namespace flatbuffers { + +// DefaultAllocator uses new/delete to allocate memory regions +class DefaultAllocator : public Allocator { + public: + uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE { + return new uint8_t[size]; + } + + void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; } + + static void dealloc(void *p, size_t) { delete[] static_cast(p); } +}; + +// These functions allow for a null allocator to mean use the default allocator, +// as used by DetachedBuffer and vector_downward below. +// This is to avoid having a statically or dynamically allocated default +// allocator, or having to move it between the classes that may own it. +inline uint8_t *Allocate(Allocator *allocator, size_t size) { + return allocator->allocate(size); +} + +inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size) { + allocator->deallocate(p, size); +} + +inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p, + size_t old_size, size_t new_size, + size_t in_use_back, size_t in_use_front) { + return allocator->reallocate_downward(old_p, old_size, new_size, in_use_back, + in_use_front); +} + +} // namespace flatbuffers + +#endif // FLATBUFFERS_DEFAULT_ALLOCATOR_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/detached_buffer.h b/src/third_party/flatbuffers/include/flatbuffers/detached_buffer.h new file mode 100644 index 0000000..6d1885b --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/detached_buffer.h @@ -0,0 +1,114 @@ +/* + * Copyright 2021 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_DETACHED_BUFFER_H_ +#define FLATBUFFERS_DETACHED_BUFFER_H_ + +#include "third_party/flatbuffers/include/flatbuffers/allocator.h" +#include "third_party/flatbuffers/include/flatbuffers/base.h" +#include "third_party/flatbuffers/include/flatbuffers/default_allocator.h" + +namespace flatbuffers { + +// DetachedBuffer is a finished flatbuffer memory region, detached from its +// builder. The original memory region and allocator are also stored so that +// the DetachedBuffer can manage the memory lifetime. +class DetachedBuffer { + public: + DetachedBuffer() + : allocator_(nullptr), + own_allocator_(false), + buf_(nullptr), + reserved_(0), + cur_(nullptr), + size_(0) {} + + DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf, + size_t reserved, uint8_t *cur, size_t sz) + : allocator_(allocator), + own_allocator_(own_allocator), + buf_(buf), + reserved_(reserved), + cur_(cur), + size_(sz) {} + + DetachedBuffer(DetachedBuffer &&other) noexcept + : allocator_(other.allocator_), + own_allocator_(other.own_allocator_), + buf_(other.buf_), + reserved_(other.reserved_), + cur_(other.cur_), + size_(other.size_) { + other.reset(); + } + + DetachedBuffer &operator=(DetachedBuffer &&other) noexcept { + if (this == &other) return *this; + + destroy(); + + allocator_ = other.allocator_; + own_allocator_ = other.own_allocator_; + buf_ = other.buf_; + reserved_ = other.reserved_; + cur_ = other.cur_; + size_ = other.size_; + + other.reset(); + + return *this; + } + + ~DetachedBuffer() { destroy(); } + + const uint8_t *data() const { return cur_; } + + uint8_t *data() { return cur_; } + + size_t size() const { return size_; } + + // These may change access mode, leave these at end of public section + FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other)); + FLATBUFFERS_DELETE_FUNC( + DetachedBuffer &operator=(const DetachedBuffer &other)); + + protected: + Allocator *allocator_; + bool own_allocator_; + uint8_t *buf_; + size_t reserved_; + uint8_t *cur_; + size_t size_; + + inline void destroy() { + if (buf_) Deallocate(allocator_, buf_, reserved_); + if (own_allocator_ && allocator_) { delete allocator_; } + reset(); + } + + inline void reset() { + allocator_ = nullptr; + own_allocator_ = false; + buf_ = nullptr; + reserved_ = 0; + cur_ = nullptr; + size_ = 0; + } +}; + +} // namespace flatbuffers + +#endif // FLATBUFFERS_DETACHED_BUFFER_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/file_manager.h b/src/third_party/flatbuffers/include/flatbuffers/file_manager.h new file mode 100644 index 0000000..c1997f1 --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/file_manager.h @@ -0,0 +1,48 @@ +/* + * Copyright 2023 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_FILE_MANAGER_H_ +#define FLATBUFFERS_FILE_MANAGER_H_ + +#include +#include + +#include "third_party/flatbuffers/include/flatbuffers/util.h" + +namespace flatbuffers { + +// A File interface to write data to file by default or +// save only file names +class FileManager { + public: + FileManager() = default; + virtual ~FileManager() = default; + + virtual bool SaveFile(const std::string &absolute_file_name, + const std::string &content) = 0; + + virtual bool LoadFile(const std::string &absolute_file_name, + std::string *buf) = 0; + + private: + // Copying is not supported. + FileManager(const FileManager &) = delete; + FileManager &operator=(const FileManager &) = delete; +}; + +} // namespace flatbuffers + +#endif // FLATBUFFERS_FILE_MANAGER_H_ diff --git a/src/third_party/flatbuffers/include/flatbuffers/flatbuffer_builder.h b/src/third_party/flatbuffers/include/flatbuffers/flatbuffer_builder.h new file mode 100644 index 0000000..edb44dd --- /dev/null +++ b/src/third_party/flatbuffers/include/flatbuffers/flatbuffer_builder.h @@ -0,0 +1,1465 @@ +/* + * Copyright 2021 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_FLATBUFFER_BUILDER_H_ +#define FLATBUFFERS_FLATBUFFER_BUILDER_H_ + +#include +#include +#include +#include +#include + +#include "third_party/flatbuffers/include/flatbuffers/allocator.h" +#include "third_party/flatbuffers/include/flatbuffers/array.h" +#include "third_party/flatbuffers/include/flatbuffers/base.h" +#include "third_party/flatbuffers/include/flatbuffers/buffer.h" +#include "third_party/flatbuffers/include/flatbuffers/buffer_ref.h" +#include "third_party/flatbuffers/include/flatbuffers/default_allocator.h" +#include "third_party/flatbuffers/include/flatbuffers/detached_buffer.h" +#include "third_party/flatbuffers/include/flatbuffers/stl_emulation.h" +#include "third_party/flatbuffers/include/flatbuffers/string.h" +#include "third_party/flatbuffers/include/flatbuffers/struct.h" +#include "third_party/flatbuffers/include/flatbuffers/table.h" +#include "third_party/flatbuffers/include/flatbuffers/vector.h" +#include "third_party/flatbuffers/include/flatbuffers/vector_downward.h" +#include "third_party/flatbuffers/include/flatbuffers/verifier.h" + +namespace flatbuffers { + +// Converts a Field ID to a virtual table offset. +inline voffset_t FieldIndexToOffset(voffset_t field_id) { + // Should correspond to what EndTable() below builds up. + const voffset_t fixed_fields = + 2 * sizeof(voffset_t); // Vtable size and Object Size. + return fixed_fields + field_id * sizeof(voffset_t); +} + +template> +const T *data(const std::vector &v) { + // Eventually the returned pointer gets passed down to memcpy, so + // we need it to be non-null to avoid undefined behavior. + static uint8_t t; + return v.empty() ? reinterpret_cast(&t) : &v.front(); +} +template> +T *data(std::vector &v) { + // Eventually the returned pointer gets passed down to memcpy, so + // we need it to be non-null to avoid undefined behavior. + static uint8_t t; + return v.empty() ? reinterpret_cast(&t) : &v.front(); +} + +/// @addtogroup flatbuffers_cpp_api +/// @{ +/// @class FlatBufferBuilder +/// @brief Helper class to hold data needed in creation of a FlatBuffer. +/// To serialize data, you typically call one of the `Create*()` functions in +/// the generated code, which in turn call a sequence of `StartTable`/ +/// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/ +/// `CreateVector` functions. Do this is depth-first order to build up a tree to +/// the root. `Finish()` wraps up the buffer ready for transport. +template class FlatBufferBuilderImpl { + public: + // This switches the size type of the builder, based on if its 64-bit aware + // (uoffset64_t) or not (uoffset_t). + typedef + typename std::conditional::type SizeT; + + /// @brief Default constructor for FlatBufferBuilder. + /// @param[in] initial_size The initial size of the buffer, in bytes. Defaults + /// to `1024`. + /// @param[in] allocator An `Allocator` to use. If null will use + /// `DefaultAllocator`. + /// @param[in] own_allocator Whether the builder/vector should own the + /// allocator. Defaults to / `false`. + /// @param[in] buffer_minalign Force the buffer to be aligned to the given + /// minimum alignment upon reallocation. Only needed if you intend to store + /// types with custom alignment AND you wish to read the buffer in-place + /// directly after creation. + explicit FlatBufferBuilderImpl( + size_t initial_size = 1024, Allocator *allocator = nullptr, + bool own_allocator = false, + size_t buffer_minalign = AlignOf()) + : buf_(initial_size, allocator, own_allocator, buffer_minalign, + static_cast(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE + : FLATBUFFERS_MAX_BUFFER_SIZE)), + num_field_loc(0), + max_voffset_(0), + length_of_64_bit_region_(0), + nested(false), + finished(false), + minalign_(1), + force_defaults_(false), + dedup_vtables_(true), + string_pool(nullptr) { + EndianCheck(); + } + + /// @brief Move constructor for FlatBufferBuilder. + FlatBufferBuilderImpl(FlatBufferBuilderImpl &&other) noexcept + : buf_(1024, nullptr, false, AlignOf(), + static_cast(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE + : FLATBUFFERS_MAX_BUFFER_SIZE)), + num_field_loc(0), + max_voffset_(0), + length_of_64_bit_region_(0), + nested(false), + finished(false), + minalign_(1), + force_defaults_(false), + dedup_vtables_(true), + string_pool(nullptr) { + EndianCheck(); + // Default construct and swap idiom. + // Lack of delegating constructors in vs2010 makes it more verbose than + // needed. + Swap(other); + } + + /// @brief Move assignment operator for FlatBufferBuilder. + FlatBufferBuilderImpl &operator=(FlatBufferBuilderImpl &&other) noexcept { + // Move construct a temporary and swap idiom + FlatBufferBuilderImpl temp(std::move(other)); + Swap(temp); + return *this; + } + + void Swap(FlatBufferBuilderImpl &other) { + using std::swap; + buf_.swap(other.buf_); + swap(num_field_loc, other.num_field_loc); + swap(max_voffset_, other.max_voffset_); + swap(length_of_64_bit_region_, other.length_of_64_bit_region_); + swap(nested, other.nested); + swap(finished, other.finished); + swap(minalign_, other.minalign_); + swap(force_defaults_, other.force_defaults_); + swap(dedup_vtables_, other.dedup_vtables_); + swap(string_pool, other.string_pool); + } + + ~FlatBufferBuilderImpl() { + if (string_pool) delete string_pool; + } + + void Reset() { + Clear(); // clear builder state + buf_.reset(); // deallocate buffer + } + + /// @brief Reset all the state in this FlatBufferBuilder so it can be reused + /// to construct another buffer. + void Clear() { + ClearOffsets(); + buf_.clear(); + nested = false; + finished = false; + minalign_ = 1; + length_of_64_bit_region_ = 0; + if (string_pool) string_pool->clear(); + } + + /// @brief The current size of the serialized buffer, counting from the end. + /// @return Returns an `SizeT` with the current size of the buffer. + SizeT GetSize() const { return buf_.size(); } + + /// @brief The current size of the serialized buffer relative to the end of + /// the 32-bit region. + /// @return Returns an `uoffset_t` with the current size of the buffer. + template + // Only enable this method for the 64-bit builder, as only that builder is + // concerned with the 32/64-bit boundary, and should be the one to bare any + // run time costs. + typename std::enable_if::type GetSizeRelative32BitRegion() + const { + //[32-bit region][64-bit region] + // [XXXXXXXXXXXXXXXXXXX] GetSize() + // [YYYYYYYYYYYYY] length_of_64_bit_region_ + // [ZZZZ] return size + return static_cast(GetSize() - length_of_64_bit_region_); + } + + template + // Only enable this method for the 32-bit builder. + typename std::enable_if::type GetSizeRelative32BitRegion() + const { + return static_cast(GetSize()); + } + + /// @brief Get the serialized buffer (after you call `Finish()`). + /// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the + /// buffer. + uint8_t *GetBufferPointer() const { + Finished(); + return buf_.data(); + } + + /// @brief Get the serialized buffer (after you call `Finish()`) as a span. + /// @return Returns a constructed flatbuffers::span that is a view over the + /// FlatBuffer data inside the buffer. + flatbuffers::span GetBufferSpan() const { + Finished(); + return flatbuffers::span(buf_.data(), buf_.size()); + } + + /// @brief Get a pointer to an unfinished buffer. + /// @return Returns a `uint8_t` pointer to the unfinished buffer. + uint8_t *GetCurrentBufferPointer() const { return buf_.data(); } + + /// @brief Get the released pointer to the serialized buffer. + /// @warning Do NOT attempt to use this FlatBufferBuilder afterwards! + /// @return A `FlatBuffer` that owns the buffer and its allocator and + /// behaves similar to a `unique_ptr` with a deleter. + FLATBUFFERS_ATTRIBUTE([[deprecated("use Release() instead")]]) + DetachedBuffer ReleaseBufferPointer() { + Finished(); + return buf_.release(); + } + + /// @brief Get the released DetachedBuffer. + /// @return A `DetachedBuffer` that owns the buffer and its allocator. + DetachedBuffer Release() { + Finished(); + return buf_.release(); + } + + /// @brief Get the released pointer to the serialized buffer. + /// @param size The size of the memory block containing + /// the serialized `FlatBuffer`. + /// @param offset The offset from the released pointer where the finished + /// `FlatBuffer` starts. + /// @return A raw pointer to the start of the memory block containing + /// the serialized `FlatBuffer`. + /// @remark If the allocator is owned, it gets deleted when the destructor is + /// called.. + uint8_t *ReleaseRaw(size_t &size, size_t &offset) { + Finished(); + return buf_.release_raw(size, offset); + } + + /// @brief get the minimum alignment this buffer needs to be accessed + /// properly. This is only known once all elements have been written (after + /// you call Finish()). You can use this information if you need to embed + /// a FlatBuffer in some other buffer, such that you can later read it + /// without first having to copy it into its own buffer. + size_t GetBufferMinAlignment() const { + Finished(); + return minalign_; + } + + /// @cond FLATBUFFERS_INTERNAL + void Finished() const { + // If you get this assert, you're attempting to get access a buffer + // which hasn't been finished yet. Be sure to call + // FlatBufferBuilder::Finish with your root table. + // If you really need to access an unfinished buffer, call + // GetCurrentBufferPointer instead. + FLATBUFFERS_ASSERT(finished); + } + /// @endcond + + /// @brief In order to save space, fields that are set to their default value + /// don't get serialized into the buffer. + /// @param[in] fd When set to `true`, always serializes default values that + /// are set. Optional fields which are not set explicitly, will still not be + /// serialized. + void ForceDefaults(bool fd) { force_defaults_ = fd; } + + /// @brief By default vtables are deduped in order to save space. + /// @param[in] dedup When set to `true`, dedup vtables. + void DedupVtables(bool dedup) { dedup_vtables_ = dedup; } + + /// @cond FLATBUFFERS_INTERNAL + void Pad(size_t num_bytes) { buf_.fill(num_bytes); } + + void TrackMinAlign(size_t elem_size) { + if (elem_size > minalign_) minalign_ = elem_size; + } + + void Align(size_t elem_size) { + TrackMinAlign(elem_size); + buf_.fill(PaddingBytes(buf_.size(), elem_size)); + } + + void PushFlatBuffer(const uint8_t *bytes, size_t size) { + PushBytes(bytes, size); + finished = true; + } + + void PushBytes(const uint8_t *bytes, size_t size) { buf_.push(bytes, size); } + + void PopBytes(size_t amount) { buf_.pop(amount); } + + template void AssertScalarT() { + // The code assumes power of 2 sizes and endian-swap-ability. + static_assert(flatbuffers::is_scalar::value, "T must be a scalar type"); + } + + // Write a single aligned scalar to the buffer + template + ReturnT PushElement(T element) { + AssertScalarT(); + Align(sizeof(T)); + buf_.push_small(EndianScalar(element)); + return CalculateOffset(); + } + + template class OffsetT = Offset> + uoffset_t PushElement(OffsetT off) { + // Special case for offsets: see ReferTo below. + return PushElement(ReferTo(off.o)); + } + + // When writing fields, we track where they are, so we can create correct + // vtables later. + void TrackField(voffset_t field, uoffset_t off) { + FieldLoc fl = { off, field }; + buf_.scratch_push_small(fl); + num_field_loc++; + if (field > max_voffset_) { max_voffset_ = field; } + } + + // Like PushElement, but additionally tracks the field this represents. + template void AddElement(voffset_t field, T e, T def) { + // We don't serialize values equal to the default. + if (IsTheSameAs(e, def) && !force_defaults_) return; + TrackField(field, PushElement(e)); + } + + template void AddElement(voffset_t field, T e) { + TrackField(field, PushElement(e)); + } + + template void AddOffset(voffset_t field, Offset off) { + if (off.IsNull()) return; // Don't store. + AddElement(field, ReferTo(off.o), static_cast(0)); + } + + template void AddOffset(voffset_t field, Offset64 off) { + if (off.IsNull()) return; // Don't store. + AddElement(field, ReferTo(off.o), static_cast(0)); + } + + template void AddStruct(voffset_t field, const T *structptr) { + if (!structptr) return; // Default, don't store. + Align(AlignOf()); + buf_.push_small(*structptr); + TrackField(field, CalculateOffset()); + } + + void AddStructOffset(voffset_t field, uoffset_t off) { + TrackField(field, off); + } + + // Offsets initially are relative to the end of the buffer (downwards). + // This function converts them to be relative to the current location + // in the buffer (when stored here), pointing upwards. + uoffset_t ReferTo(uoffset_t off) { + // Align to ensure GetSizeRelative32BitRegion() below is correct. + Align(sizeof(uoffset_t)); + // 32-bit offsets are relative to the tail of the 32-bit region of the + // buffer. For most cases (without 64-bit entities) this is equivalent to + // size of the whole buffer (e.g. GetSize()) + return ReferTo(off, GetSizeRelative32BitRegion()); + } + + uoffset64_t ReferTo(uoffset64_t off) { + // Align to ensure GetSize() below is correct. + Align(sizeof(uoffset64_t)); + // 64-bit offsets are relative to tail of the whole buffer + return ReferTo(off, GetSize()); + } + + template T ReferTo(const T off, const T2 size) { + FLATBUFFERS_ASSERT(off && off <= size); + return size - off + static_cast(sizeof(T)); + } + + template T ReferTo(const T off, const T size) { + FLATBUFFERS_ASSERT(off && off <= size); + return size - off + static_cast(sizeof(T)); + } + + void NotNested() { + // If you hit this, you're trying to construct a Table/Vector/String + // during the construction of its parent table (between the MyTableBuilder + // and table.Finish(). + // Move the creation of these sub-objects to above the MyTableBuilder to + // not get this assert. + // Ignoring this assert may appear to work in simple cases, but the reason + // it is here is that storing objects in-line may cause vtable offsets + // to not fit anymore. It also leads to vtable duplication. + FLATBUFFERS_ASSERT(!nested); + // If you hit this, fields were added outside the scope of a table. + FLATBUFFERS_ASSERT(!num_field_loc); + } + + // From generated code (or from the parser), we call StartTable/EndTable + // with a sequence of AddElement calls in between. + uoffset_t StartTable() { + NotNested(); + nested = true; + return GetSizeRelative32BitRegion(); + } + + // This finishes one serialized object by generating the vtable if it's a + // table, comparing it against existing vtables, and writing the + // resulting vtable offset. + uoffset_t EndTable(uoffset_t start) { + // If you get this assert, a corresponding StartTable wasn't called. + FLATBUFFERS_ASSERT(nested); + // Write the vtable offset, which is the start of any Table. + // We fill its value later. + // This is relative to the end of the 32-bit region. + const uoffset_t vtable_offset_loc = + static_cast(PushElement(0)); + // Write a vtable, which consists entirely of voffset_t elements. + // It starts with the number of offsets, followed by a type id, followed + // by the offsets themselves. In reverse: + // Include space for the last offset and ensure empty tables have a + // minimum size. + max_voffset_ = + (std::max)(static_cast(max_voffset_ + sizeof(voffset_t)), + FieldIndexToOffset(0)); + buf_.fill_big(max_voffset_); + const uoffset_t table_object_size = vtable_offset_loc - start; + // Vtable use 16bit offsets. + FLATBUFFERS_ASSERT(table_object_size < 0x10000); + WriteScalar(buf_.data() + sizeof(voffset_t), + static_cast(table_object_size)); + WriteScalar(buf_.data(), max_voffset_); + // Write the offsets into the table + for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc); + it < buf_.scratch_end(); it += sizeof(FieldLoc)) { + auto field_location = reinterpret_cast(it); + const voffset_t pos = + static_cast(vtable_offset_loc - field_location->off); + // If this asserts, it means you've set a field twice. + FLATBUFFERS_ASSERT( + !ReadScalar(buf_.data() + field_location->id)); + WriteScalar(buf_.data() + field_location->id, pos); + } + ClearOffsets(); + auto vt1 = reinterpret_cast(buf_.data()); + auto vt1_size = ReadScalar(vt1); + auto vt_use = GetSizeRelative32BitRegion(); + // See if we already have generated a vtable with this exact same + // layout before. If so, make it point to the old one, remove this one. + if (dedup_vtables_) { + for (auto it = buf_.scratch_data(); it < buf_.scratch_end(); + it += sizeof(uoffset_t)) { + auto vt_offset_ptr = reinterpret_cast(it); + auto vt2 = reinterpret_cast(buf_.data_at(*vt_offset_ptr)); + auto vt2_size = ReadScalar(vt2); + if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue; + vt_use = *vt_offset_ptr; + buf_.pop(GetSizeRelative32BitRegion() - vtable_offset_loc); + break; + } + } + // If this is a new vtable, remember it. + if (vt_use == GetSizeRelative32BitRegion()) { + buf_.scratch_push_small(vt_use); + } + // Fill the vtable offset we created above. + // The offset points from the beginning of the object to where the vtable is + // stored. + // Offsets default direction is downward in memory for future format + // flexibility (storing all vtables at the start of the file). + WriteScalar(buf_.data_at(vtable_offset_loc + length_of_64_bit_region_), + static_cast(vt_use) - + static_cast(vtable_offset_loc)); + nested = false; + return vtable_offset_loc; + } + + FLATBUFFERS_ATTRIBUTE([[deprecated("call the version above instead")]]) + uoffset_t EndTable(uoffset_t start, voffset_t /*numfields*/) { + return EndTable(start); + } + + // This checks a required field has been set in a given table that has + // just been constructed. + template void Required(Offset table, voffset_t field) { + auto table_ptr = reinterpret_cast(buf_.data_at(table.o)); + bool ok = table_ptr->GetOptionalFieldOffset(field) != 0; + // If this fails, the caller will show what field needs to be set. + FLATBUFFERS_ASSERT(ok); + (void)ok; + } + + uoffset_t StartStruct(size_t alignment) { + Align(alignment); + return GetSizeRelative32BitRegion(); + } + + uoffset_t EndStruct() { return GetSizeRelative32BitRegion(); } + + void ClearOffsets() { + buf_.scratch_pop(num_field_loc * sizeof(FieldLoc)); + num_field_loc = 0; + max_voffset_ = 0; + } + + // Aligns such that when "len" bytes are written, an object can be written + // after it (forward in the buffer) with "alignment" without padding. + void PreAlign(size_t len, size_t alignment) { + if (len == 0) return; + TrackMinAlign(alignment); + buf_.fill(PaddingBytes(GetSize() + len, alignment)); + } + + // Aligns such than when "len" bytes are written, an object of type `AlignT` + // can be written after it (forward in the buffer) without padding. + template void PreAlign(size_t len) { + AssertScalarT(); + PreAlign(len, AlignOf()); + } + /// @endcond + + /// @brief Store a string in the buffer, which can contain any binary data. + /// @param[in] str A const char pointer to the data to be stored as a string. + /// @param[in] len The number of bytes that should be stored from `str`. + /// @return Returns the offset in the buffer where the string starts. + template class OffsetT = Offset> + OffsetT CreateString(const char *str, size_t len) { + CreateStringImpl(str, len); + return OffsetT( + CalculateOffset::offset_type>()); + } + + /// @brief Store a string in the buffer, which is null-terminated. + /// @param[in] str A const char pointer to a C-string to add to the buffer. + /// @return Returns the offset in the buffer where the string starts. + template class OffsetT = Offset> + OffsetT CreateString(const char *str) { + return CreateString(str, strlen(str)); + } + + /// @brief Store a string in the buffer, which is null-terminated. + /// @param[in] str A char pointer to a C-string to add to the buffer. + /// @return Returns the offset in the buffer where the string starts. + template class OffsetT = Offset> + OffsetT CreateString(char *str) { + return CreateString(str, strlen(str)); + } + + /// @brief Store a string in the buffer, which can contain any binary data. + /// @param[in] str A const reference to a std::string to store in the buffer. + /// @return Returns the offset in the buffer where the string starts. + template class OffsetT = Offset> + OffsetT CreateString(const std::string &str) { + return CreateString(str.c_str(), str.length()); + } + + // clang-format off + #ifdef FLATBUFFERS_HAS_STRING_VIEW + /// @brief Store a string in the buffer, which can contain any binary data. + /// @param[in] str A const string_view to copy in to the buffer. + /// @return Returns the offset in the buffer where the string starts. + template